Remove mmap_unaligned.

Remove mmap_unaligned, which was used to heuristically decide whether to
optimistically call mmap() in such a way that could reduce the total
number of system calls.  If I remember correctly, the intention of
mmap_unaligned was to avoid always executing the slow path in the
presence of ASLR.  However, that reasoning seems to have been based on a
flawed understanding of how ASLR actually works.  Although ASLR
apparently causes mmap() to ignore address requests, it does not cause
total placement randomness, so there is a reasonable expectation that
iterative mmap() calls will start returning chunk-aligned mappings once
the first chunk has been properly aligned.
This commit is contained in:
Jason Evans
2012-04-21 19:17:21 -07:00
parent 7ad54c1c30
commit a8f8d7540d
6 changed files with 29 additions and 101 deletions

View File

@@ -274,7 +274,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
}
bool
chunk_boot0(void)
chunk_boot(void)
{
/* Set variables according to the value of opt_lg_chunk. */
@@ -301,13 +301,3 @@ chunk_boot0(void)
return (false);
}
bool
chunk_boot1(void)
{
if (chunk_mmap_boot())
return (true);
return (false);
}

View File

@@ -1,17 +1,6 @@
#define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
/*
* Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
* potentially avoid some system calls.
*/
malloc_tsd_data(static, mmap_unaligned, bool, false)
malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
malloc_tsd_no_cleanup)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
@@ -112,16 +101,6 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
if (trailsize != 0)
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
/*
* If mmap() returned an aligned mapping, reset mmap_unaligned so that
* the next chunk_alloc_mmap() execution tries the fast allocation
* method.
*/
if (unaligned == false && mmap_unaligned_booted) {
bool mu = false;
mmap_unaligned_tsd_set(&mu);
}
assert(ret != NULL);
*zero = true;
return (ret);
@@ -131,6 +110,7 @@ void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
void *ret;
size_t offset;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
@@ -152,44 +132,34 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
*
* Another possible confounding factor is address space layout
* randomization (ASLR), which causes mmap(2) to disregard the
* requested address. mmap_unaligned tracks whether the previous
* chunk_alloc_mmap() execution received any unaligned or relocated
* mappings, and if so, the current execution will immediately fall
* back to the slow method. However, we keep track of whether the fast
* method would have succeeded, and if so, we make a note to try the
* fast method next time.
* requested address. As such, repeatedly trying to extend unaligned
* mappings could result in an infinite loop, so if extension fails,
* immediately fall back to the reliable method of over-allocation
* followed by trimming.
*/
if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
size_t offset;
ret = pages_map(NULL, size);
if (ret == NULL)
return (NULL);
ret = pages_map(NULL, size);
if (ret == NULL)
return (NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
bool mu = true;
mmap_unaligned_tsd_set(&mu);
/* Try to extend chunk boundary. */
if (pages_map((void *)((uintptr_t)ret + size),
chunksize - offset) == NULL) {
/*
* Extension failed. Clean up, then revert to
* the reliable-but-expensive method.
*/
pages_unmap(ret, size);
return (chunk_alloc_mmap_slow(size, alignment,
true, zero));
} else {
/* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset);
ret = (void *)((uintptr_t)ret + (chunksize -
offset));
}
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
/* Try to extend chunk boundary. */
if (pages_map((void *)((uintptr_t)ret + size), chunksize -
offset) == NULL) {
/*
* Extension failed. Clean up, then fall back to the
* reliable-but-expensive method.
*/
pages_unmap(ret, size);
return (chunk_alloc_mmap_slow(size, alignment, true,
zero));
} else {
/* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset);
ret = (void *)((uintptr_t)ret + (chunksize - offset));
}
} else
return (chunk_alloc_mmap_slow(size, alignment, false, zero));
}
assert(ret != NULL);
*zero = true;
@@ -205,21 +175,3 @@ chunk_dealloc_mmap(void *chunk, size_t size)
return (config_munmap == false);
}
bool
chunk_mmap_boot(void)
{
/*
* XXX For the non-TLS implementation of tsd, the first access from
* each thread causes memory allocation. The result is a bootstrapping
* problem for this particular use case, so for now just disable it by
* leaving it in an unbooted state.
*/
#ifdef JEMALLOC_TLS
if (mmap_unaligned_tsd_boot())
return (true);
#endif
return (false);
}

View File

@@ -634,7 +634,7 @@ malloc_init_hard(void)
return (true);
}
if (chunk_boot0()) {
if (chunk_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
@@ -711,11 +711,6 @@ malloc_init_hard(void)
ncpus = malloc_ncpus();
malloc_mutex_lock(&init_lock);
if (chunk_boot1()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (mutex_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);