diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 41df11fd..03e3f3ce 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -373,7 +373,7 @@ void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero); + size_t alignment, bool zero, bool try_tcache); bool arena_new(arena_t *arena, unsigned ind); void arena_boot(void); void arena_prefork(arena_t *arena); @@ -390,9 +390,10 @@ unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); prof_ctx_t *arena_prof_ctx_get(const void *ptr); void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); -void *arena_malloc(size_t size, bool zero); +void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); void *arena_malloc_prechosen(arena_t *arena, size_t size, bool zero); -void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr); +void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, + bool try_tcache); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) @@ -548,7 +549,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) } JEMALLOC_INLINE void * -arena_malloc(size_t size, bool zero) +arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) { tcache_t *tcache; @@ -556,20 +557,24 @@ arena_malloc(size_t size, bool zero) assert(size <= arena_maxclass); if (size <= SMALL_MAXCLASS) { - if ((tcache = tcache_get(true)) != NULL) + if (try_tcache && (tcache = tcache_get(true)) != NULL) return (tcache_alloc_small(tcache, size, zero)); - else - return (arena_malloc_small(choose_arena(), size, zero)); + else { + return (arena_malloc_small(choose_arena(arena), size, + zero)); + } } else { /* * Initialize tcache after checking size in order to avoid * infinite recursion during tcache initialization. */ - if (size <= tcache_maxclass && (tcache = tcache_get(true)) != - NULL) + if (try_tcache && size <= tcache_maxclass && (tcache = + tcache_get(true)) != NULL) return (tcache_alloc_large(tcache, size, zero)); - else - return (arena_malloc_large(choose_arena(), size, zero)); + else { + return (arena_malloc_large(choose_arena(arena), size, + zero)); + } } } @@ -587,11 +592,11 @@ arena_malloc_prechosen(arena_t *arena, size_t size, bool zero) } JEMALLOC_INLINE void -arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) +arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache) { size_t pageind; arena_chunk_map_t *mapelm; - tcache_t *tcache = tcache_get(false); + tcache_t *tcache; assert(arena != NULL); assert(chunk->arena == arena); @@ -603,7 +608,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { /* Small allocation. */ - if (tcache != NULL) + if (try_tcache && (tcache = tcache_get(false)) != NULL) tcache_dalloc_small(tcache, ptr); else { arena_run_t *run; @@ -630,7 +635,8 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) assert(((uintptr_t)ptr & PAGE_MASK) == 0); - if (size <= tcache_maxclass && tcache != NULL) { + if (try_tcache && size <= tcache_maxclass && (tcache = + tcache_get(false)) != NULL) { tcache_dalloc_large(tcache, ptr, size); } else { malloc_mutex_lock(&arena->lock); diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 0c22bfb7..c8e40198 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -399,7 +399,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) size_t s2u(size_t size); size_t sa2u(size_t size, size_t alignment, size_t *run_size_p); -arena_t *choose_arena(void); +arena_t *choose_arena(arena_t *arena); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) @@ -517,10 +517,13 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p) /* Choose an arena based on a per-thread value. */ JEMALLOC_INLINE arena_t * -choose_arena(void) +choose_arena(arena_t *arena) { arena_t *ret; + if (arena != NULL) + return (arena); + if ((ret = *arenas_tsd_get()) == NULL) { ret = choose_arena_hard(); assert(ret != NULL); @@ -556,7 +559,7 @@ imalloc(size_t size) assert(size != 0); if (size <= arena_maxclass) - return (arena_malloc(size, false)); + return (arena_malloc(NULL, size, false, true)); else return (huge_malloc(size, false)); } @@ -566,7 +569,7 @@ icalloc(size_t size) { if (size <= arena_maxclass) - return (arena_malloc(size, true)); + return (arena_malloc(NULL, size, true, true)); else return (huge_malloc(size, true)); } @@ -580,7 +583,7 @@ ipalloc(size_t usize, size_t alignment, bool zero) assert(usize == sa2u(usize, alignment, NULL)); if (usize <= arena_maxclass && alignment <= PAGE) - ret = arena_malloc(usize, zero); + ret = arena_malloc(NULL, usize, zero, true); else { size_t run_size JEMALLOC_CC_SILENCE_INIT(0); @@ -594,7 +597,7 @@ ipalloc(size_t usize, size_t alignment, bool zero) */ sa2u(usize, alignment, &run_size); if (run_size <= arena_maxclass) { - ret = arena_palloc(choose_arena(), usize, run_size, + ret = arena_palloc(choose_arena(NULL), usize, run_size, alignment, zero); } else if (alignment <= chunksize) ret = huge_malloc(usize, zero); @@ -647,7 +650,7 @@ idalloc(void *ptr) chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) - arena_dalloc(chunk->arena, chunk, ptr); + arena_dalloc(chunk->arena, chunk, ptr, true); else huge_dalloc(ptr, true); } @@ -711,7 +714,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, } else { if (size + extra <= arena_maxclass) { return (arena_ralloc(ptr, oldsize, size, extra, - alignment, zero)); + alignment, zero, true)); } else { return (huge_ralloc(ptr, oldsize, size, extra, alignment, zero)); diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h index a1d9aae3..93e721d5 100644 --- a/include/jemalloc/internal/tcache.h +++ b/include/jemalloc/internal/tcache.h @@ -237,7 +237,7 @@ tcache_get(bool create) tcache_enabled_set(false); /* Memoize. */ return (NULL); } - return (tcache_create(choose_arena())); + return (tcache_create(choose_arena(NULL))); } if (tcache == TCACHE_STATE_PURGATORY) { /* diff --git a/src/arena.c b/src/arena.c index b7e14228..64440996 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1888,7 +1888,7 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, void * arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero) + size_t alignment, bool zero, bool try_tcache) { void *ret; size_t copysize; @@ -1909,7 +1909,7 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, return (NULL); ret = ipalloc(usize, alignment, zero); } else - ret = arena_malloc(size + extra, zero); + ret = arena_malloc(NULL, size + extra, zero, try_tcache); if (ret == NULL) { if (extra == 0) @@ -1921,7 +1921,7 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, return (NULL); ret = ipalloc(usize, alignment, zero); } else - ret = arena_malloc(size, zero); + ret = arena_malloc(NULL, size, zero, try_tcache); if (ret == NULL) return (NULL); diff --git a/src/ctl.c b/src/ctl.c index 2afca51a..6777688a 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -1016,7 +1016,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, int ret; unsigned newind, oldind; - newind = oldind = choose_arena()->ind; + newind = oldind = choose_arena(NULL)->ind; WRITE(newind, unsigned); READ(oldind, unsigned); if (newind != oldind) { diff --git a/src/jemalloc.c b/src/jemalloc.c index a6d2df57..690cf082 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1487,7 +1487,6 @@ je_nallocm(size_t *rsize, size_t size, int flags) * End experimental functions. */ /******************************************************************************/ - /* * The following functions are used by threading libraries for protection of * malloc during fork(). @@ -1552,3 +1551,55 @@ jemalloc_postfork_child(void) } /******************************************************************************/ +/* + * The following functions are used for TLS allocation/deallocation in static + * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() + * is that these avoid accessing TLS variables. + */ + +static void * +a0alloc(size_t size, bool zero) +{ + + if (malloc_init()) + return (NULL); + + if (size == 0) + size = 1; + + if (size <= arena_maxclass) + return (arena_malloc(arenas[0], size, zero, false)); + else + return (huge_malloc(size, zero)); +} + +void * +a0malloc(size_t size) +{ + + return (a0alloc(size, false)); +} + +void * +a0calloc(size_t num, size_t size) +{ + + return (a0alloc(num * size, true)); +} + +void +a0free(void *ptr) +{ + arena_chunk_t *chunk; + + if (ptr == NULL) + return; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (chunk != ptr) + arena_dalloc(chunk->arena, chunk, ptr, false); + else + huge_dalloc(ptr, true); +} + +/******************************************************************************/