diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h index e047c2b1..8fb1fe6d 100644 --- a/include/jemalloc/internal/chunk.h +++ b/include/jemalloc/internal/chunk.h @@ -44,8 +44,7 @@ extern size_t arena_maxclass; /* Max size class for arenas. */ void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero); void chunk_dealloc(void *chunk, size_t size, bool unmap); -bool chunk_boot0(void); -bool chunk_boot1(void); +bool chunk_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ diff --git a/include/jemalloc/internal/chunk_mmap.h b/include/jemalloc/internal/chunk_mmap.h index 8224430a..b29f39e9 100644 --- a/include/jemalloc/internal/chunk_mmap.h +++ b/include/jemalloc/internal/chunk_mmap.h @@ -14,8 +14,6 @@ void pages_purge(void *addr, size_t length); void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero); bool chunk_dealloc_mmap(void *chunk, size_t size); -bool chunk_mmap_boot(void); - #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES diff --git a/include/jemalloc/internal/private_namespace.h b/include/jemalloc/internal/private_namespace.h index 15fe3c51..bb1b63e9 100644 --- a/include/jemalloc/internal/private_namespace.h +++ b/include/jemalloc/internal/private_namespace.h @@ -74,8 +74,7 @@ #define chunk_alloc JEMALLOC_N(chunk_alloc) #define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) #define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) -#define chunk_boot0 JEMALLOC_N(chunk_boot0) -#define chunk_boot1 JEMALLOC_N(chunk_boot1) +#define chunk_boot JEMALLOC_N(chunk_boot) #define chunk_dealloc JEMALLOC_N(chunk_dealloc) #define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap) #define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) @@ -83,7 +82,6 @@ #define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent) #define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork) #define chunk_in_dss JEMALLOC_N(chunk_in_dss) -#define chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot) #define chunk_npages JEMALLOC_N(chunk_npages) #define chunks_mtx JEMALLOC_N(chunks_mtx) #define chunks_rtree JEMALLOC_N(chunks_rtree) @@ -192,10 +190,6 @@ #define malloc_write JEMALLOC_N(malloc_write) #define map_bias JEMALLOC_N(map_bias) #define mb_write JEMALLOC_N(mb_write) -#define mmap_unaligned_tsd_boot JEMALLOC_N(mmap_unaligned_tsd_boot) -#define mmap_unaligned_tsd_cleanup_wrapper JEMALLOC_N(mmap_unaligned_tsd_cleanup_wrapper) -#define mmap_unaligned_tsd_get JEMALLOC_N(mmap_unaligned_tsd_get) -#define mmap_unaligned_tsd_set JEMALLOC_N(mmap_unaligned_tsd_set) #define mutex_boot JEMALLOC_N(mutex_boot) #define narenas JEMALLOC_N(narenas) #define ncpus JEMALLOC_N(ncpus) diff --git a/src/chunk.c b/src/chunk.c index 0fccd0ce..5426b027 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -274,7 +274,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap) } bool -chunk_boot0(void) +chunk_boot(void) { /* Set variables according to the value of opt_lg_chunk. */ @@ -301,13 +301,3 @@ chunk_boot0(void) return (false); } - -bool -chunk_boot1(void) -{ - - if (chunk_mmap_boot()) - return (true); - - return (false); -} diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c index 126406ae..9ff7480a 100644 --- a/src/chunk_mmap.c +++ b/src/chunk_mmap.c @@ -1,17 +1,6 @@ #define JEMALLOC_CHUNK_MMAP_C_ #include "jemalloc/internal/jemalloc_internal.h" -/******************************************************************************/ -/* Data. */ - -/* - * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and - * potentially avoid some system calls. - */ -malloc_tsd_data(static, mmap_unaligned, bool, false) -malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false, - malloc_tsd_no_cleanup) - /******************************************************************************/ /* Function prototypes for non-inline static functions. */ @@ -112,16 +101,6 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero) if (trailsize != 0) pages_unmap((void *)((uintptr_t)ret + size), trailsize); - /* - * If mmap() returned an aligned mapping, reset mmap_unaligned so that - * the next chunk_alloc_mmap() execution tries the fast allocation - * method. - */ - if (unaligned == false && mmap_unaligned_booted) { - bool mu = false; - mmap_unaligned_tsd_set(&mu); - } - assert(ret != NULL); *zero = true; return (ret); @@ -131,6 +110,7 @@ void * chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) { void *ret; + size_t offset; /* * Ideally, there would be a way to specify alignment to mmap() (like @@ -152,44 +132,34 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) * * Another possible confounding factor is address space layout * randomization (ASLR), which causes mmap(2) to disregard the - * requested address. mmap_unaligned tracks whether the previous - * chunk_alloc_mmap() execution received any unaligned or relocated - * mappings, and if so, the current execution will immediately fall - * back to the slow method. However, we keep track of whether the fast - * method would have succeeded, and if so, we make a note to try the - * fast method next time. + * requested address. As such, repeatedly trying to extend unaligned + * mappings could result in an infinite loop, so if extension fails, + * immediately fall back to the reliable method of over-allocation + * followed by trimming. */ - if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) { - size_t offset; + ret = pages_map(NULL, size); + if (ret == NULL) + return (NULL); - ret = pages_map(NULL, size); - if (ret == NULL) - return (NULL); - - offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); - if (offset != 0) { - bool mu = true; - mmap_unaligned_tsd_set(&mu); - /* Try to extend chunk boundary. */ - if (pages_map((void *)((uintptr_t)ret + size), - chunksize - offset) == NULL) { - /* - * Extension failed. Clean up, then revert to - * the reliable-but-expensive method. - */ - pages_unmap(ret, size); - return (chunk_alloc_mmap_slow(size, alignment, - true, zero)); - } else { - /* Clean up unneeded leading space. */ - pages_unmap(ret, chunksize - offset); - ret = (void *)((uintptr_t)ret + (chunksize - - offset)); - } + offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); + if (offset != 0) { + /* Try to extend chunk boundary. */ + if (pages_map((void *)((uintptr_t)ret + size), chunksize - + offset) == NULL) { + /* + * Extension failed. Clean up, then fall back to the + * reliable-but-expensive method. + */ + pages_unmap(ret, size); + return (chunk_alloc_mmap_slow(size, alignment, true, + zero)); + } else { + /* Clean up unneeded leading space. */ + pages_unmap(ret, chunksize - offset); + ret = (void *)((uintptr_t)ret + (chunksize - offset)); } - } else - return (chunk_alloc_mmap_slow(size, alignment, false, zero)); + } assert(ret != NULL); *zero = true; @@ -205,21 +175,3 @@ chunk_dealloc_mmap(void *chunk, size_t size) return (config_munmap == false); } - -bool -chunk_mmap_boot(void) -{ - - /* - * XXX For the non-TLS implementation of tsd, the first access from - * each thread causes memory allocation. The result is a bootstrapping - * problem for this particular use case, so for now just disable it by - * leaving it in an unbooted state. - */ -#ifdef JEMALLOC_TLS - if (mmap_unaligned_tsd_boot()) - return (true); -#endif - - return (false); -} diff --git a/src/jemalloc.c b/src/jemalloc.c index 00c2b23c..f9c89168 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -634,7 +634,7 @@ malloc_init_hard(void) return (true); } - if (chunk_boot0()) { + if (chunk_boot()) { malloc_mutex_unlock(&init_lock); return (true); } @@ -711,11 +711,6 @@ malloc_init_hard(void) ncpus = malloc_ncpus(); malloc_mutex_lock(&init_lock); - if (chunk_boot1()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - if (mutex_boot()) { malloc_mutex_unlock(&init_lock); return (true);