Remove mmap_unaligned.
Remove mmap_unaligned, which was used to heuristically decide whether to optimistically call mmap() in such a way that could reduce the total number of system calls. If I remember correctly, the intention of mmap_unaligned was to avoid always executing the slow path in the presence of ASLR. However, that reasoning seems to have been based on a flawed understanding of how ASLR actually works. Although ASLR apparently causes mmap() to ignore address requests, it does not cause total placement randomness, so there is a reasonable expectation that iterative mmap() calls will start returning chunk-aligned mappings once the first chunk has been properly aligned.
This commit is contained in:
parent
7ad54c1c30
commit
a8f8d7540d
@ -44,8 +44,7 @@ extern size_t arena_maxclass; /* Max size class for arenas. */
|
|||||||
|
|
||||||
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
|
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
|
||||||
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
||||||
bool chunk_boot0(void);
|
bool chunk_boot(void);
|
||||||
bool chunk_boot1(void);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -14,8 +14,6 @@ void pages_purge(void *addr, size_t length);
|
|||||||
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
||||||
bool chunk_dealloc_mmap(void *chunk, size_t size);
|
bool chunk_dealloc_mmap(void *chunk, size_t size);
|
||||||
|
|
||||||
bool chunk_mmap_boot(void);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
@ -74,8 +74,7 @@
|
|||||||
#define chunk_alloc JEMALLOC_N(chunk_alloc)
|
#define chunk_alloc JEMALLOC_N(chunk_alloc)
|
||||||
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
|
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
|
||||||
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
|
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
|
||||||
#define chunk_boot0 JEMALLOC_N(chunk_boot0)
|
#define chunk_boot JEMALLOC_N(chunk_boot)
|
||||||
#define chunk_boot1 JEMALLOC_N(chunk_boot1)
|
|
||||||
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
|
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
|
||||||
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
|
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
|
||||||
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
|
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
|
||||||
@ -83,7 +82,6 @@
|
|||||||
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
|
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
|
||||||
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
|
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
|
||||||
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
||||||
#define chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot)
|
|
||||||
#define chunk_npages JEMALLOC_N(chunk_npages)
|
#define chunk_npages JEMALLOC_N(chunk_npages)
|
||||||
#define chunks_mtx JEMALLOC_N(chunks_mtx)
|
#define chunks_mtx JEMALLOC_N(chunks_mtx)
|
||||||
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
||||||
@ -192,10 +190,6 @@
|
|||||||
#define malloc_write JEMALLOC_N(malloc_write)
|
#define malloc_write JEMALLOC_N(malloc_write)
|
||||||
#define map_bias JEMALLOC_N(map_bias)
|
#define map_bias JEMALLOC_N(map_bias)
|
||||||
#define mb_write JEMALLOC_N(mb_write)
|
#define mb_write JEMALLOC_N(mb_write)
|
||||||
#define mmap_unaligned_tsd_boot JEMALLOC_N(mmap_unaligned_tsd_boot)
|
|
||||||
#define mmap_unaligned_tsd_cleanup_wrapper JEMALLOC_N(mmap_unaligned_tsd_cleanup_wrapper)
|
|
||||||
#define mmap_unaligned_tsd_get JEMALLOC_N(mmap_unaligned_tsd_get)
|
|
||||||
#define mmap_unaligned_tsd_set JEMALLOC_N(mmap_unaligned_tsd_set)
|
|
||||||
#define mutex_boot JEMALLOC_N(mutex_boot)
|
#define mutex_boot JEMALLOC_N(mutex_boot)
|
||||||
#define narenas JEMALLOC_N(narenas)
|
#define narenas JEMALLOC_N(narenas)
|
||||||
#define ncpus JEMALLOC_N(ncpus)
|
#define ncpus JEMALLOC_N(ncpus)
|
||||||
|
12
src/chunk.c
12
src/chunk.c
@ -274,7 +274,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_boot0(void)
|
chunk_boot(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
/* Set variables according to the value of opt_lg_chunk. */
|
/* Set variables according to the value of opt_lg_chunk. */
|
||||||
@ -301,13 +301,3 @@ chunk_boot0(void)
|
|||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
|
||||||
chunk_boot1(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (chunk_mmap_boot())
|
|
||||||
return (true);
|
|
||||||
|
|
||||||
return (false);
|
|
||||||
}
|
|
||||||
|
@ -1,17 +1,6 @@
|
|||||||
#define JEMALLOC_CHUNK_MMAP_C_
|
#define JEMALLOC_CHUNK_MMAP_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
/* Data. */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
|
|
||||||
* potentially avoid some system calls.
|
|
||||||
*/
|
|
||||||
malloc_tsd_data(static, mmap_unaligned, bool, false)
|
|
||||||
malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
|
|
||||||
malloc_tsd_no_cleanup)
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
/* Function prototypes for non-inline static functions. */
|
||||||
|
|
||||||
@ -112,16 +101,6 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
|
|||||||
if (trailsize != 0)
|
if (trailsize != 0)
|
||||||
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||||
|
|
||||||
/*
|
|
||||||
* If mmap() returned an aligned mapping, reset mmap_unaligned so that
|
|
||||||
* the next chunk_alloc_mmap() execution tries the fast allocation
|
|
||||||
* method.
|
|
||||||
*/
|
|
||||||
if (unaligned == false && mmap_unaligned_booted) {
|
|
||||||
bool mu = false;
|
|
||||||
mmap_unaligned_tsd_set(&mu);
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(ret != NULL);
|
assert(ret != NULL);
|
||||||
*zero = true;
|
*zero = true;
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -131,6 +110,7 @@ void *
|
|||||||
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
size_t offset;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ideally, there would be a way to specify alignment to mmap() (like
|
* Ideally, there would be a way to specify alignment to mmap() (like
|
||||||
@ -152,44 +132,34 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
|||||||
*
|
*
|
||||||
* Another possible confounding factor is address space layout
|
* Another possible confounding factor is address space layout
|
||||||
* randomization (ASLR), which causes mmap(2) to disregard the
|
* randomization (ASLR), which causes mmap(2) to disregard the
|
||||||
* requested address. mmap_unaligned tracks whether the previous
|
* requested address. As such, repeatedly trying to extend unaligned
|
||||||
* chunk_alloc_mmap() execution received any unaligned or relocated
|
* mappings could result in an infinite loop, so if extension fails,
|
||||||
* mappings, and if so, the current execution will immediately fall
|
* immediately fall back to the reliable method of over-allocation
|
||||||
* back to the slow method. However, we keep track of whether the fast
|
* followed by trimming.
|
||||||
* method would have succeeded, and if so, we make a note to try the
|
|
||||||
* fast method next time.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
|
ret = pages_map(NULL, size);
|
||||||
size_t offset;
|
if (ret == NULL)
|
||||||
|
return (NULL);
|
||||||
|
|
||||||
ret = pages_map(NULL, size);
|
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
||||||
if (ret == NULL)
|
if (offset != 0) {
|
||||||
return (NULL);
|
/* Try to extend chunk boundary. */
|
||||||
|
if (pages_map((void *)((uintptr_t)ret + size), chunksize -
|
||||||
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
offset) == NULL) {
|
||||||
if (offset != 0) {
|
/*
|
||||||
bool mu = true;
|
* Extension failed. Clean up, then fall back to the
|
||||||
mmap_unaligned_tsd_set(&mu);
|
* reliable-but-expensive method.
|
||||||
/* Try to extend chunk boundary. */
|
*/
|
||||||
if (pages_map((void *)((uintptr_t)ret + size),
|
pages_unmap(ret, size);
|
||||||
chunksize - offset) == NULL) {
|
return (chunk_alloc_mmap_slow(size, alignment, true,
|
||||||
/*
|
zero));
|
||||||
* Extension failed. Clean up, then revert to
|
} else {
|
||||||
* the reliable-but-expensive method.
|
/* Clean up unneeded leading space. */
|
||||||
*/
|
pages_unmap(ret, chunksize - offset);
|
||||||
pages_unmap(ret, size);
|
ret = (void *)((uintptr_t)ret + (chunksize - offset));
|
||||||
return (chunk_alloc_mmap_slow(size, alignment,
|
|
||||||
true, zero));
|
|
||||||
} else {
|
|
||||||
/* Clean up unneeded leading space. */
|
|
||||||
pages_unmap(ret, chunksize - offset);
|
|
||||||
ret = (void *)((uintptr_t)ret + (chunksize -
|
|
||||||
offset));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else
|
}
|
||||||
return (chunk_alloc_mmap_slow(size, alignment, false, zero));
|
|
||||||
|
|
||||||
assert(ret != NULL);
|
assert(ret != NULL);
|
||||||
*zero = true;
|
*zero = true;
|
||||||
@ -205,21 +175,3 @@ chunk_dealloc_mmap(void *chunk, size_t size)
|
|||||||
|
|
||||||
return (config_munmap == false);
|
return (config_munmap == false);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
|
||||||
chunk_mmap_boot(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
/*
|
|
||||||
* XXX For the non-TLS implementation of tsd, the first access from
|
|
||||||
* each thread causes memory allocation. The result is a bootstrapping
|
|
||||||
* problem for this particular use case, so for now just disable it by
|
|
||||||
* leaving it in an unbooted state.
|
|
||||||
*/
|
|
||||||
#ifdef JEMALLOC_TLS
|
|
||||||
if (mmap_unaligned_tsd_boot())
|
|
||||||
return (true);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return (false);
|
|
||||||
}
|
|
||||||
|
@ -634,7 +634,7 @@ malloc_init_hard(void)
|
|||||||
return (true);
|
return (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (chunk_boot0()) {
|
if (chunk_boot()) {
|
||||||
malloc_mutex_unlock(&init_lock);
|
malloc_mutex_unlock(&init_lock);
|
||||||
return (true);
|
return (true);
|
||||||
}
|
}
|
||||||
@ -711,11 +711,6 @@ malloc_init_hard(void)
|
|||||||
ncpus = malloc_ncpus();
|
ncpus = malloc_ncpus();
|
||||||
malloc_mutex_lock(&init_lock);
|
malloc_mutex_lock(&init_lock);
|
||||||
|
|
||||||
if (chunk_boot1()) {
|
|
||||||
malloc_mutex_unlock(&init_lock);
|
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mutex_boot()) {
|
if (mutex_boot()) {
|
||||||
malloc_mutex_unlock(&init_lock);
|
malloc_mutex_unlock(&init_lock);
|
||||||
return (true);
|
return (true);
|
||||||
|
Loading…
Reference in New Issue
Block a user