Change mmap representation

This commit is contained in:
Wilco Dijkstra 2025-07-10 15:59:37 +00:00
parent 4d059af105
commit 4b74591022
2 changed files with 24 additions and 28 deletions

View File

@ -151,8 +151,8 @@ mem2chunk_check (void *mem, unsigned char **magic_p)
offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
offset < 0x2000) ||
!chunk_is_mmapped (p) || prev_inuse (p) ||
((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
((prev_size (p) + sz) & page_mask) != 0)
((((uintptr_t) mmap_base (p)) & page_mask) != 0) ||
(mmap_size (p) & page_mask) != 0)
return NULL;
for (sz = CHUNK_HDR_SZ + memsize (p) - 1;
@ -273,7 +273,6 @@ realloc_check (void *oldmem, size_t bytes)
__libc_lock_unlock (main_arena.mutex);
if (!oldp)
malloc_printerr ("realloc(): invalid pointer");
const INTERNAL_SIZE_T oldsize = chunksize (oldp);
if (rb > PTRDIFF_MAX)
{
@ -293,8 +292,8 @@ realloc_check (void *oldmem, size_t bytes)
else
#endif
{
/* Note the extra SIZE_SZ overhead. */
if (oldsize - SIZE_SZ >= chnb)
size_t oldsize = memsize (oldp);
if (oldsize >= rb)
newmem = oldmem; /* do nothing */
else
{
@ -303,7 +302,7 @@ realloc_check (void *oldmem, size_t bytes)
newmem = _int_malloc (&main_arena, rb);
if (newmem)
{
memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
memcpy (newmem, oldmem, oldsize);
munmap_chunk (oldp);
}
}
@ -312,7 +311,7 @@ realloc_check (void *oldmem, size_t bytes)
else
{
top_check ();
newmem = _int_realloc (&main_arena, oldp, oldsize, chnb);
newmem = _int_realloc (&main_arena, oldp, chunksize (oldp), chnb);
}
DIAG_PUSH_NEEDS_COMMENT;

View File

@ -1412,6 +1412,10 @@ checked_request2size (size_t req) __nonnull (1)
/* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
/* Return mmap base pointer/size of a chunk with IS_MMAPPED set. */
#define mmap_base(p) ((void*)(((char *) (p)) - prev_size (p)))
#define mmap_size(p) (prev_size (p) + chunksize (p) + CHUNK_HDR_SZ)
/* Treat space at ptr + offset as a chunk */
#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
@ -1455,7 +1459,7 @@ checked_request2size (size_t req) __nonnull (1)
#define memsize(p) \
(__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
chunksize (p) - CHUNK_HDR_SZ : \
chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
chunksize (p) - CHUNK_HDR_SZ + SIZE_SZ)
/* If memory tagging is enabled the layout changes to accommodate the granule
size, this is wasteful for small allocations so not done by default.
@ -2102,7 +2106,7 @@ do_check_chunk (mstate av, mchunkptr p)
else
{
/* chunk is page-aligned */
assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
assert ((mmap_size (p) & (GLRO (dl_pagesize) - 1)) == 0);
/* mem is aligned */
assert (!misaligned_chunk (p));
}
@ -2402,14 +2406,14 @@ do_check_malloc_state (mstate av)
/* Allocate a mmap chunk - used for large block sizes or as a fallback.
Round up size to nearest page. Add padding if MALLOC_ALIGNMENT is
larger than CHUNK_HDR_SZ. Add SIZE_SZ at the end since there is no
following chunk whose prev_size field could be used. */
larger than CHUNK_HDR_SZ. Add CHUNK_HDR_SZ at the end so that mmap
chunks have the same layout as regular chunks. */
static void *
sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags)
{
size_t padding = MALLOC_ALIGNMENT - CHUNK_HDR_SZ;
size_t size = ALIGN_UP (nb + padding + SIZE_SZ, pagesize);
size_t size = ALIGN_UP (nb + padding + CHUNK_HDR_SZ, pagesize);
char *mm = (char *) MMAP (NULL, size,
mtag_mmap_flags | PROT_READ | PROT_WRITE,
@ -2424,7 +2428,7 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags)
/* Store offset to start of mmap in prev_size. */
mchunkptr p = (mchunkptr) (mm + padding);
set_prev_size (p, padding);
set_head (p, (size - padding) | IS_MMAPPED);
set_head (p, (size - padding - CHUNK_HDR_SZ) | IS_MMAPPED);
/* update statistics */
int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1;
@ -2976,13 +2980,12 @@ static void
munmap_chunk (mchunkptr p)
{
size_t pagesize = GLRO (dl_pagesize);
INTERNAL_SIZE_T size = chunksize (p);
assert (chunk_is_mmapped (p));
uintptr_t mem = (uintptr_t) chunk2mem (p);
uintptr_t block = (uintptr_t) p - prev_size (p);
size_t total_size = prev_size (p) + size;
uintptr_t block = (uintptr_t) mmap_base (p);
size_t total_size = mmap_size (p);
/* Unfortunately we have to do the compilers job by hand here. Normally
we would test BLOCK and TOTAL-SIZE separately for compliance with the
page size. But gcc does not recognize the optimization possibility
@ -3013,15 +3016,15 @@ mremap_chunk (mchunkptr p, size_t new_size)
assert (chunk_is_mmapped (p));
uintptr_t block = (uintptr_t) p - offset;
uintptr_t block = (uintptr_t) mmap_base (p);
uintptr_t mem = (uintptr_t) chunk2mem(p);
size_t total_size = offset + size;
size_t total_size = mmap_size (p);
if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
|| __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
malloc_printerr("mremap_chunk(): invalid pointer");
/* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
/* Note the extra CHUNK_HDR_SZ overhead as in mmap_chunk(). */
new_size = ALIGN_UP (new_size + offset + CHUNK_HDR_SZ, pagesize);
/* No need to remap if the number of pages does not change. */
if (total_size == new_size)
@ -3040,7 +3043,7 @@ mremap_chunk (mchunkptr p, size_t new_size)
assert (!misaligned_chunk (p));
assert (prev_size (p) == offset);
set_head (p, (new_size - offset) | IS_MMAPPED);
set_head (p, (new_size - offset - CHUNK_HDR_SZ) | IS_MMAPPED);
INTERNAL_SIZE_T new;
new = atomic_fetch_add_relaxed (&mp_.mmapped_mem, new_size - size - offset)
@ -3326,11 +3329,6 @@ tcache_init (void)
if (tcache_shutting_down)
return;
/* Check minimum mmap chunk is larger than max tcache size. This means
mmap chunks with their different layout are never added to tcache. */
if (MAX_TCACHE_SMALL_SIZE >= GLRO (dl_pagesize) / 2)
malloc_printerr ("max tcache size too large");
size_t bytes = sizeof (tcache_perthread_struct);
tcache = (tcache_perthread_struct *) __libc_malloc2 (bytes);
@ -3480,7 +3478,6 @@ __libc_free (void *mem)
{
tc_idx = large_csize2tidx (size);
if (size >= MINSIZE
&& !chunk_is_mmapped (p)
&& __glibc_likely (tcache->num_slots[tc_idx] != 0))
return tcache_put_large (p, tc_idx);
}
@ -5265,7 +5262,7 @@ musable (void *mem)
mchunkptr p = mem2chunk (mem);
if (chunk_is_mmapped (p))
return chunksize (p) - CHUNK_HDR_SZ;
return memsize (p);
else if (inuse (p))
return memsize (p);