Compare commits

...

3 Commits

Author SHA1 Message Date
Wilco Dijkstra 85c5b504aa malloc: Remove dumped heap support
Remove support for obsolete dumped heaps.  Dumping heaps was discontinued
8 years ago, however loading a dumped heap is still supported. This blocks
changes and improvements of the malloc data structures - hence it is time
to remove this.  Ancient binaries that still call malloc_set_state will now
get the -1 error code.  Update tst-mallocstate.c to just check for this.

Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
2025-09-19 13:11:56 +00:00
Wilco Dijkstra aebaeb2c33 AArch64: Update math-vector-fortran.h
Update math-vector-fortran.h with the latest set of math functions
and sort by name.

Reviewed-by: Yury Khrustalev <yury.khrustalev@arm.com>
2025-09-19 12:57:47 +00:00
Yury Khrustalev 20446e3502 manual: fix typo in tunables.texi 2025-09-19 12:03:38 +01:00
4 changed files with 60 additions and 586 deletions

View File

@ -150,19 +150,6 @@ memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
static size_t pagesize; static size_t pagesize;
/* These variables are used for undumping support. Chunked are marked
as using mmap, but we leave them alone if they fall into this
range. NB: The chunk size for these chunks only includes the
initial size field (of SIZE_SZ bytes), there is no trailing size
field (unlike with regular mmapped chunks). */
static mchunkptr dumped_main_arena_start; /* Inclusive. */
static mchunkptr dumped_main_arena_end; /* Exclusive. */
/* True if the pointer falls into the dumped arena. Use this after
chunk_is_mmapped indicates a chunk is mmapped. */
#define DUMPED_MAIN_ARENA_CHUNK(p) \
((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
/* The allocator functions. */ /* The allocator functions. */
static void * static void *
@ -202,9 +189,7 @@ __debug_free (void *mem)
if (__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK)) if (__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK))
mem = free_mcheck (mem); mem = free_mcheck (mem);
if (DUMPED_MAIN_ARENA_CHUNK (mem2chunk (mem))) if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
/* Do nothing. */;
else if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
free_check (mem); free_check (mem);
else else
__libc_free (mem); __libc_free (mem);
@ -227,32 +212,7 @@ __debug_realloc (void *oldmem, size_t bytes)
if ((!__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK) if ((!__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK)
|| !realloc_mcheck_before (&oldmem, &bytes, &oldsize, &victim))) || !realloc_mcheck_before (&oldmem, &bytes, &oldsize, &victim)))
{ {
mchunkptr oldp = mem2chunk (oldmem); if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
/* If this is a faked mmapped chunk from the dumped main arena,
always make a copy (and do not free the old chunk). */
if (DUMPED_MAIN_ARENA_CHUNK (oldp))
{
if (bytes == 0 && oldmem != NULL)
victim = NULL;
else
{
const INTERNAL_SIZE_T osize = chunksize (oldp);
/* Must alloc, copy, free. */
victim = __debug_malloc (bytes);
/* Copy as many bytes as are available from the old chunk
and fit into the new size. NB: The overhead for faked
mmapped chunks is only SIZE_SZ, not CHUNK_HDR_SZ as for
regular mmapped chunks. */
if (victim != NULL)
{
if (bytes > osize - SIZE_SZ)
bytes = osize - SIZE_SZ;
memcpy (victim, oldmem, bytes);
}
}
}
else if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
victim = realloc_check (oldmem, bytes); victim = realloc_check (oldmem, bytes);
else else
victim = __libc_realloc (oldmem, bytes); victim = __libc_realloc (oldmem, bytes);
@ -414,10 +374,6 @@ malloc_usable_size (void *mem)
if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK)) if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
return malloc_check_get_size (mem); return malloc_check_get_size (mem);
mchunkptr p = mem2chunk (mem);
if (DUMPED_MAIN_ARENA_CHUNK (p))
return chunksize (p) - SIZE_SZ;
return musable (mem); return musable (mem);
} }
@ -517,43 +473,10 @@ malloc_trim (size_t s)
#if SHLIB_COMPAT (libc_malloc_debug, GLIBC_2_0, GLIBC_2_25) #if SHLIB_COMPAT (libc_malloc_debug, GLIBC_2_0, GLIBC_2_25)
/* Support for restoring dumped heaps contained in historic Emacs /* Support for saving/restoring dumped heaps in old GLIBCs is no
executables. The heap saving feature (malloc_get_state) is no longer implemented - instead we provide dummy implementations
longer implemented in this version of glibc, but we have a heap which always fail. We need to provide these symbol so that
rewriter in malloc_set_state which transforms the heap into a existing Emacs binaries continue to work with BIND_NOW. */
version compatible with current malloc. */
#define MALLOC_STATE_MAGIC 0x444c4541l
#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
struct malloc_save_state
{
long magic;
long version;
mbinptr av[NBINS * 2 + 2];
char *sbrk_base;
int sbrked_mem_bytes;
unsigned long trim_threshold;
unsigned long top_pad;
unsigned int n_mmaps_max;
unsigned long mmap_threshold;
int check_action;
unsigned long max_sbrked_mem;
unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
unsigned int n_mmaps;
unsigned int max_n_mmaps;
unsigned long mmapped_mem;
unsigned long max_mmapped_mem;
int using_malloc_checking;
unsigned long max_fast;
unsigned long arena_test;
unsigned long arena_max;
unsigned long narenas;
};
/* Dummy implementation which always fails. We need to provide this
symbol so that existing Emacs binaries continue to work with
BIND_NOW. */
void * void *
malloc_get_state (void) malloc_get_state (void)
{ {
@ -566,81 +489,7 @@ compat_symbol (libc_malloc_debug, malloc_get_state, malloc_get_state,
int int
malloc_set_state (void *msptr) malloc_set_state (void *msptr)
{ {
struct malloc_save_state *ms = (struct malloc_save_state *) msptr; return -1;
if (ms->magic != MALLOC_STATE_MAGIC)
return -1;
/* Must fail if the major version is too high. */
if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
return -2;
if (debug_initialized == 1)
return -1;
bool check_was_enabled = __is_malloc_debug_enabled (MALLOC_CHECK_HOOK);
/* It's not too late, so disable MALLOC_CHECK_ and all of the hooks. */
__malloc_hook = NULL;
__realloc_hook = NULL;
__free_hook = NULL;
__memalign_hook = NULL;
__malloc_debug_disable (MALLOC_CHECK_HOOK);
/* We do not need to perform locking here because malloc_set_state
must be called before the first call into the malloc subsystem (usually via
__malloc_initialize_hook). pthread_create always calls calloc and thus
must be called only afterwards, so there cannot be more than one thread
when we reach this point. Also handle initialization if either we ended
up being called before the first malloc or through the hook when
malloc-check was enabled. */
if (debug_initialized < 0)
generic_hook_ini ();
else if (check_was_enabled)
__libc_free (__libc_malloc (0));
/* Patch the dumped heap. We no longer try to integrate into the
existing heap. Instead, we mark the existing chunks as mmapped.
Together with the update to dumped_main_arena_start and
dumped_main_arena_end, realloc and free will recognize these
chunks as dumped fake mmapped chunks and never free them. */
/* Find the chunk with the lowest address with the heap. */
mchunkptr chunk = NULL;
{
size_t *candidate = (size_t *) ms->sbrk_base;
size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
while (candidate < end)
if (*candidate != 0)
{
chunk = mem2chunk ((void *) (candidate + 1));
break;
}
else
++candidate;
}
if (chunk == NULL)
return 0;
/* Iterate over the dumped heap and patch the chunks so that they
are treated as fake mmapped chunks. */
mchunkptr top = ms->av[2];
while (chunk < top)
{
if (inuse (chunk))
{
/* Mark chunk as mmapped, to trigger the fallback path. */
size_t size = chunksize (chunk);
set_head (chunk, size | IS_MMAPPED);
}
chunk = next_chunk (chunk);
}
/* The dumped fake mmapped chunks all lie in this address range. */
dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
dumped_main_arena_end = top;
return 0;
} }
compat_symbol (libc_malloc_debug, malloc_set_state, malloc_set_state, compat_symbol (libc_malloc_debug, malloc_set_state, malloc_set_state,
GLIBC_2_0); GLIBC_2_0);

View File

@ -17,7 +17,6 @@
<https://www.gnu.org/licenses/>. */ <https://www.gnu.org/licenses/>. */
#include <errno.h> #include <errno.h>
#include <stdbool.h>
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
#include <libc-symbols.h> #include <libc-symbols.h>
@ -34,36 +33,8 @@ compat_symbol_reference (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
int malloc_set_state (void *); int malloc_set_state (void *);
compat_symbol_reference (libc, malloc_set_state, malloc_set_state, GLIBC_2_0); compat_symbol_reference (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
/* Maximum object size in the fake heap. */ #define NBINS 128
enum { max_size = 64 };
/* Allocation actions. These are randomized actions executed on the
dumped heap (see allocation_tasks below). They are interspersed
with operations on the new heap (see heap_activity). */
enum allocation_action
{
action_free, /* Dumped and freed. */
action_realloc, /* Dumped and realloc'ed. */
action_realloc_same, /* Dumped and realloc'ed, same size. */
action_realloc_smaller, /* Dumped and realloc'ed, shrunk. */
action_count
};
/* Dumped heap. Initialize it, so that the object is placed into the
.data section, for increased realism. The size is an upper bound;
we use about half of the space. */
static size_t dumped_heap[action_count * max_size * max_size
/ sizeof (size_t)] = {1};
/* Next free space in the dumped heap. Also top of the heap at the
end of the initialization procedure. */
static size_t *next_heap_chunk;
/* Copied from malloc.c and hooks.c. The version is deliberately
lower than the final version of malloc_set_state. */
# define NBINS 128
# define MALLOC_STATE_MAGIC 0x444c4541l
# define MALLOC_STATE_VERSION (0 * 0x100l + 4l)
static struct static struct
{ {
long magic; long magic;
@ -87,407 +58,20 @@ static struct
unsigned long arena_test; unsigned long arena_test;
unsigned long arena_max; unsigned long arena_max;
unsigned long narenas; unsigned long narenas;
} save_state = } save_state;
{
.magic = MALLOC_STATE_MAGIC,
.version = MALLOC_STATE_VERSION,
};
/* Allocate a blob in the fake heap. */
static void *
dumped_heap_alloc (size_t length)
{
/* malloc needs three state bits in the size field, so the minimum
alignment is 8 even on 32-bit architectures. malloc_set_state
should be compatible with such heaps even if it currently
provides more alignment to applications. */
enum
{
heap_alignment = 8,
heap_alignment_mask = heap_alignment - 1
};
_Static_assert (sizeof (size_t) <= heap_alignment,
"size_t compatible with heap alignment");
/* Need at least this many bytes for metadata and application
data. */
size_t chunk_size = sizeof (size_t) + length;
/* Round up the allocation size to the heap alignment. */
chunk_size += heap_alignment_mask;
chunk_size &= ~heap_alignment_mask;
TEST_VERIFY_EXIT ((chunk_size & 3) == 0);
if (next_heap_chunk == NULL)
/* Initialize the top of the heap. Add one word of zero padding,
to match existing practice. */
{
dumped_heap[0] = 0;
next_heap_chunk = dumped_heap + 1;
}
else
/* The previous chunk is allocated. */
chunk_size |= 1;
*next_heap_chunk = chunk_size;
/* User data starts after the chunk header. */
void *result = next_heap_chunk + 1;
next_heap_chunk += chunk_size / sizeof (size_t);
/* Mark the previous chunk as used. */
*next_heap_chunk = 1;
return result;
}
/* Global seed variable for the random number generator. */
static unsigned long long global_seed;
/* Simple random number generator. The numbers are in the range from
0 to UINT_MAX (inclusive). */
static unsigned int
rand_next (unsigned long long *seed)
{
/* Linear congruential generated as used for MMIX. */
*seed = *seed * 6364136223846793005ULL + 1442695040888963407ULL;
return *seed >> 32;
}
/* Fill LENGTH bytes at BUFFER with random contents, as determined by
SEED. */
static void
randomize_buffer (unsigned char *buffer, size_t length,
unsigned long long seed)
{
for (size_t i = 0; i < length; ++i)
buffer[i] = rand_next (&seed);
}
/* Dumps the buffer to standard output, in hexadecimal. */
static void
dump_hex (unsigned char *buffer, size_t length)
{
for (int i = 0; i < length; ++i)
printf (" %02X", buffer[i]);
}
/* Set to true if an error is encountered. */
static bool errors = false;
/* Keep track of object allocations. */
struct allocation
{
unsigned char *data;
unsigned int size;
unsigned int seed;
};
/* Check that the allocation task allocation has the expected
contents. */
static void
check_allocation (const struct allocation *alloc, int index)
{
size_t size = alloc->size;
if (alloc->data == NULL)
{
printf ("error: NULL pointer for allocation of size %zu at %d, seed %u\n",
size, index, alloc->seed);
errors = true;
return;
}
unsigned char expected[4096];
if (size > sizeof (expected))
{
printf ("error: invalid allocation size %zu at %d, seed %u\n",
size, index, alloc->seed);
errors = true;
return;
}
randomize_buffer (expected, size, alloc->seed);
if (memcmp (alloc->data, expected, size) != 0)
{
printf ("error: allocation %d data mismatch, size %zu, seed %u\n",
index, size, alloc->seed);
printf (" expected:");
dump_hex (expected, size);
putc ('\n', stdout);
printf (" actual:");
dump_hex (alloc->data, size);
putc ('\n', stdout);
errors = true;
}
}
/* A heap allocation combined with pending actions on it. */
struct allocation_task
{
struct allocation allocation;
enum allocation_action action;
};
/* Allocation tasks. Initialized by init_allocation_tasks and used by
perform_allocations. */
enum { allocation_task_count = action_count * max_size };
static struct allocation_task allocation_tasks[allocation_task_count];
/* Fisher-Yates shuffle of allocation_tasks. */
static void
shuffle_allocation_tasks (void)
{
for (int i = 0; i < allocation_task_count - 1; ++i)
{
/* Pick pair in the tail of the array. */
int j = i + (rand_next (&global_seed)
% ((unsigned) (allocation_task_count - i)));
TEST_VERIFY_EXIT (j >= 0 && j < allocation_task_count);
/* Exchange. */
struct allocation_task tmp = allocation_tasks[i];
allocation_tasks[i] = allocation_tasks[j];
allocation_tasks[j] = tmp;
}
}
/* Set up the allocation tasks and the dumped heap. */
static void
initial_allocations (void)
{
/* Initialize in a position-dependent way. */
for (int i = 0; i < allocation_task_count; ++i)
allocation_tasks[i] = (struct allocation_task)
{
.allocation =
{
.size = 1 + (i / action_count),
.seed = i,
},
.action = i % action_count
};
/* Execute the tasks in a random order. */
shuffle_allocation_tasks ();
/* Initialize the contents of the dumped heap. */
for (int i = 0; i < allocation_task_count; ++i)
{
struct allocation_task *task = allocation_tasks + i;
task->allocation.data = dumped_heap_alloc (task->allocation.size);
randomize_buffer (task->allocation.data, task->allocation.size,
task->allocation.seed);
}
for (int i = 0; i < allocation_task_count; ++i)
check_allocation (&allocation_tasks[i].allocation, i);
}
/* Indicates whether init_heap has run. This variable needs to be
volatile because malloc is declared __THROW, which implies it is a
leaf function, but we expect it to run our hooks. */
static volatile bool heap_initialized;
/* Executed by glibc malloc, through __malloc_initialize_hook
below. */
static void
init_heap (void)
{
if (test_verbose)
printf ("info: performing heap initialization\n");
heap_initialized = true;
/* Populate the dumped heap. */
initial_allocations ();
/* Complete initialization of the saved heap data structure. */
save_state.sbrk_base = (void *) dumped_heap;
save_state.sbrked_mem_bytes = sizeof (dumped_heap);
/* Top pointer. Adjust so that it points to the start of struct
malloc_chunk. */
save_state.av[2] = (void *) (next_heap_chunk - 1);
/* Integrate the dumped heap into the process heap. */
TEST_VERIFY_EXIT (malloc_set_state (&save_state) == 0);
}
/* Interpose the initialization callback. */
void (*volatile __malloc_initialize_hook) (void) = init_heap;
compat_symbol_reference (libc, __malloc_initialize_hook,
__malloc_initialize_hook, GLIBC_2_0);
/* Simulate occasional unrelated heap activity in the non-dumped
heap. */
enum { heap_activity_allocations_count = 32 };
static struct allocation heap_activity_allocations
[heap_activity_allocations_count] = {};
static int heap_activity_seed_counter = 1000 * 1000;
static void
heap_activity (void)
{
/* Only do this from time to time. */
if ((rand_next (&global_seed) % 4) == 0)
{
int slot = rand_next (&global_seed) % heap_activity_allocations_count;
struct allocation *alloc = heap_activity_allocations + slot;
if (alloc->data == NULL)
{
alloc->size = rand_next (&global_seed) % (4096U + 1);
alloc->data = xmalloc (alloc->size);
alloc->seed = heap_activity_seed_counter++;
randomize_buffer (alloc->data, alloc->size, alloc->seed);
check_allocation (alloc, 1000 + slot);
}
else
{
check_allocation (alloc, 1000 + slot);
free (alloc->data);
alloc->data = NULL;
}
}
}
static void
heap_activity_deallocate (void)
{
for (int i = 0; i < heap_activity_allocations_count; ++i)
free (heap_activity_allocations[i].data);
}
/* Perform a full heap check across the dumped heap allocation tasks,
and the simulated heap activity directly above. */
static void
full_heap_check (void)
{
/* Dumped heap. */
for (int i = 0; i < allocation_task_count; ++i)
if (allocation_tasks[i].allocation.data != NULL)
check_allocation (&allocation_tasks[i].allocation, i);
/* Heap activity allocations. */
for (int i = 0; i < heap_activity_allocations_count; ++i)
if (heap_activity_allocations[i].data != NULL)
check_allocation (heap_activity_allocations + i, i);
}
/* Used as an optimization barrier to force a heap allocation. */
__attribute_optimization_barrier__
static void
my_free (void *ptr)
{
free (ptr);
}
static int static int
do_test (void) do_test (void)
{ {
my_free (malloc (1)); /* Check the dummy implementations always fail. */
TEST_VERIFY_EXIT (heap_initialized); TEST_VERIFY_EXIT (malloc_set_state (&save_state) == -1);
/* The first pass performs the randomly generated allocation
tasks. */
if (test_verbose)
printf ("info: first pass through allocation tasks\n");
full_heap_check ();
/* Execute the post-undump tasks in a random order. */
shuffle_allocation_tasks ();
for (int i = 0; i < allocation_task_count; ++i)
{
heap_activity ();
struct allocation_task *task = allocation_tasks + i;
switch (task->action)
{
case action_free:
check_allocation (&task->allocation, i);
free (task->allocation.data);
task->allocation.data = NULL;
break;
case action_realloc:
check_allocation (&task->allocation, i);
task->allocation.data = xrealloc
(task->allocation.data, task->allocation.size + max_size);
check_allocation (&task->allocation, i);
break;
case action_realloc_same:
check_allocation (&task->allocation, i);
task->allocation.data = xrealloc
(task->allocation.data, task->allocation.size);
check_allocation (&task->allocation, i);
break;
case action_realloc_smaller:
check_allocation (&task->allocation, i);
size_t new_size = task->allocation.size - 1;
task->allocation.data = xrealloc (task->allocation.data, new_size);
if (new_size == 0)
{
if (task->allocation.data != NULL)
{
printf ("error: realloc with size zero did not deallocate\n");
errors = true;
}
/* No further action on this task. */
task->action = action_free;
}
else
{
task->allocation.size = new_size;
check_allocation (&task->allocation, i);
}
break;
case action_count:
FAIL_EXIT1 ("task->action should never be action_count");
}
full_heap_check ();
}
/* The second pass frees the objects which were allocated during the
first pass. */
if (test_verbose)
printf ("info: second pass through allocation tasks\n");
shuffle_allocation_tasks ();
for (int i = 0; i < allocation_task_count; ++i)
{
heap_activity ();
struct allocation_task *task = allocation_tasks + i;
switch (task->action)
{
case action_free:
/* Already freed, nothing to do. */
break;
case action_realloc:
case action_realloc_same:
case action_realloc_smaller:
check_allocation (&task->allocation, i);
free (task->allocation.data);
task->allocation.data = NULL;
break;
case action_count:
FAIL_EXIT1 ("task->action should never be action_count");
}
full_heap_check ();
}
heap_activity_deallocate ();
/* Check that the malloc_get_state stub behaves in the intended
way. */
errno = 0; errno = 0;
if (malloc_get_state () != NULL) TEST_VERIFY_EXIT (malloc_get_state () == NULL);
{
printf ("error: malloc_get_state succeeded\n");
errors = true;
}
if (errno != ENOSYS)
{
printf ("error: malloc_get_state: %m\n");
errors = true;
}
return errors; TEST_VERIFY_EXIT (errno == ENOSYS);
return 0;
} }
#include <support/test-driver.c> #include <support/test-driver.c>

View File

@ -759,7 +759,7 @@ Linux version 5.17, although not always enabled by some kernel
configurations), this tunable can be used to control whether configurations), this tunable can be used to control whether
@theglibc{} decorates the underlying memory obtained from operating @theglibc{} decorates the underlying memory obtained from operating
system with a string describing its usage (for instance, on the thread system with a string describing its usage (for instance, on the thread
stack created by @code{ptthread_create} or memory allocated by stack created by @code{pthread_create} or memory allocated by
@code{malloc}). @code{malloc}).
The process mappings can be obtained by reading the @code{/proc/<pid>maps} The process mappings can be obtained by reading the @code{/proc/<pid>maps}

View File

@ -15,33 +15,74 @@
! You should have received a copy of the GNU Lesser General Public ! You should have received a copy of the GNU Lesser General Public
! License along with the GNU C Library; if not, see ! License along with the GNU C Library; if not, see
! <https://www.gnu.org/licenses/>. ! <https://www.gnu.org/licenses/>.
!GCC$ builtin (acos) attributes simd (notinbranch) !GCC$ builtin (acos) attributes simd (notinbranch)
!GCC$ builtin (acosf) attributes simd (notinbranch) !GCC$ builtin (acosf) attributes simd (notinbranch)
!GCC$ builtin (acosh) attributes simd (notinbranch)
!GCC$ builtin (acoshf) attributes simd (notinbranch)
!GCC$ builtin (acospi) attributes simd (notinbranch)
!GCC$ builtin (acospif) attributes simd (notinbranch)
!GCC$ builtin (asin) attributes simd (notinbranch) !GCC$ builtin (asin) attributes simd (notinbranch)
!GCC$ builtin (asinf) attributes simd (notinbranch) !GCC$ builtin (asinf) attributes simd (notinbranch)
!GCC$ builtin (asinh) attributes simd (notinbranch)
!GCC$ builtin (asinhf) attributes simd (notinbranch)
!GCC$ builtin (asinpi) attributes simd (notinbranch)
!GCC$ builtin (asinpif) attributes simd (notinbranch)
!GCC$ builtin (atan) attributes simd (notinbranch) !GCC$ builtin (atan) attributes simd (notinbranch)
!GCC$ builtin (atanf) attributes simd (notinbranch)
!GCC$ builtin (atan2) attributes simd (notinbranch) !GCC$ builtin (atan2) attributes simd (notinbranch)
!GCC$ builtin (atan2f) attributes simd (notinbranch) !GCC$ builtin (atan2f) attributes simd (notinbranch)
!GCC$ builtin (atan2pi) attributes simd (notinbranch)
!GCC$ builtin (atan2pif) attributes simd (notinbranch)
!GCC$ builtin (atanf) attributes simd (notinbranch)
!GCC$ builtin (atanh) attributes simd (notinbranch)
!GCC$ builtin (atanhf) attributes simd (notinbranch)
!GCC$ builtin (atanpi) attributes simd (notinbranch)
!GCC$ builtin (atanpif) attributes simd (notinbranch)
!GCC$ builtin (cbrt) attributes simd (notinbranch)
!GCC$ builtin (cbrtf) attributes simd (notinbranch)
!GCC$ builtin (cos) attributes simd (notinbranch) !GCC$ builtin (cos) attributes simd (notinbranch)
!GCC$ builtin (cosf) attributes simd (notinbranch) !GCC$ builtin (cosf) attributes simd (notinbranch)
!GCC$ builtin (cosh) attributes simd (notinbranch)
!GCC$ builtin (coshf) attributes simd (notinbranch)
!GCC$ builtin (cospi) attributes simd (notinbranch)
!GCC$ builtin (cospif) attributes simd (notinbranch)
!GCC$ builtin (erf) attributes simd (notinbranch)
!GCC$ builtin (erfc) attributes simd (notinbranch)
!GCC$ builtin (erfcf) attributes simd (notinbranch)
!GCC$ builtin (erff) attributes simd (notinbranch)
!GCC$ builtin (exp) attributes simd (notinbranch) !GCC$ builtin (exp) attributes simd (notinbranch)
!GCC$ builtin (expf) attributes simd (notinbranch)
!GCC$ builtin (exp10) attributes simd (notinbranch) !GCC$ builtin (exp10) attributes simd (notinbranch)
!GCC$ builtin (exp10f) attributes simd (notinbranch) !GCC$ builtin (exp10f) attributes simd (notinbranch)
!GCC$ builtin (exp10m1) attributes simd (notinbranch)
!GCC$ builtin (exp10m1f) attributes simd (notinbranch)
!GCC$ builtin (exp2) attributes simd (notinbranch) !GCC$ builtin (exp2) attributes simd (notinbranch)
!GCC$ builtin (exp2f) attributes simd (notinbranch) !GCC$ builtin (exp2f) attributes simd (notinbranch)
!GCC$ builtin (exp2m1) attributes simd (notinbranch)
!GCC$ builtin (exp2m1f) attributes simd (notinbranch)
!GCC$ builtin (expf) attributes simd (notinbranch)
!GCC$ builtin (expm1) attributes simd (notinbranch) !GCC$ builtin (expm1) attributes simd (notinbranch)
!GCC$ builtin (expm1f) attributes simd (notinbranch) !GCC$ builtin (expm1f) attributes simd (notinbranch)
!GCC$ builtin (hypot) attributes simd (notinbranch)
!GCC$ builtin (hypotf) attributes simd (notinbranch)
!GCC$ builtin (log) attributes simd (notinbranch) !GCC$ builtin (log) attributes simd (notinbranch)
!GCC$ builtin (logf) attributes simd (notinbranch)
!GCC$ builtin (log10) attributes simd (notinbranch) !GCC$ builtin (log10) attributes simd (notinbranch)
!GCC$ builtin (log10f) attributes simd (notinbranch) !GCC$ builtin (log10f) attributes simd (notinbranch)
!GCC$ builtin (log1p) attributes simd (notinbranch) !GCC$ builtin (log1p) attributes simd (notinbranch)
!GCC$ builtin (log1pf) attributes simd (notinbranch) !GCC$ builtin (log1pf) attributes simd (notinbranch)
!GCC$ builtin (log2) attributes simd (notinbranch) !GCC$ builtin (log2) attributes simd (notinbranch)
!GCC$ builtin (log2f) attributes simd (notinbranch) !GCC$ builtin (log2f) attributes simd (notinbranch)
!GCC$ builtin (logf) attributes simd (notinbranch)
!GCC$ builtin (pow) attributes simd (notinbranch)
!GCC$ builtin (powf) attributes simd (notinbranch)
!GCC$ builtin (sin) attributes simd (notinbranch) !GCC$ builtin (sin) attributes simd (notinbranch)
!GCC$ builtin (sinf) attributes simd (notinbranch) !GCC$ builtin (sinf) attributes simd (notinbranch)
!GCC$ builtin (sinh) attributes simd (notinbranch)
!GCC$ builtin (sinhf) attributes simd (notinbranch)
!GCC$ builtin (sinpi) attributes simd (notinbranch)
!GCC$ builtin (sinpif) attributes simd (notinbranch)
!GCC$ builtin (tan) attributes simd (notinbranch) !GCC$ builtin (tan) attributes simd (notinbranch)
!GCC$ builtin (tanf) attributes simd (notinbranch) !GCC$ builtin (tanf) attributes simd (notinbranch)
!GCC$ builtin (tanh) attributes simd (notinbranch)
!GCC$ builtin (tanhf) attributes simd (notinbranch)
!GCC$ builtin (tanpi) attributes simd (notinbranch)
!GCC$ builtin (tanpif) attributes simd (notinbranch)