Add a0malloc(), a0calloc(), and a0free().

Add a0malloc(), a0calloc(), and a0free(), which are used by FreeBSD's
libc to allocate/deallocate TLS in static binaries.
This commit is contained in:
Jason Evans 2012-04-03 09:28:00 -07:00
parent 633aaff967
commit 01b3fe55ff
6 changed files with 89 additions and 29 deletions

View File

@ -373,7 +373,7 @@ void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero); size_t extra, bool zero);
void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero); size_t alignment, bool zero, bool try_tcache);
bool arena_new(arena_t *arena, unsigned ind); bool arena_new(arena_t *arena, unsigned ind);
void arena_boot(void); void arena_boot(void);
void arena_prefork(arena_t *arena); void arena_prefork(arena_t *arena);
@ -390,9 +390,10 @@ unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr); const void *ptr);
prof_ctx_t *arena_prof_ctx_get(const void *ptr); prof_ctx_t *arena_prof_ctx_get(const void *ptr);
void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
void *arena_malloc(size_t size, bool zero); void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
void *arena_malloc_prechosen(arena_t *arena, size_t size, bool zero); void *arena_malloc_prechosen(arena_t *arena, size_t size, bool zero);
void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
bool try_tcache);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
@ -548,7 +549,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
} }
JEMALLOC_INLINE void * JEMALLOC_INLINE void *
arena_malloc(size_t size, bool zero) arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
{ {
tcache_t *tcache; tcache_t *tcache;
@ -556,20 +557,24 @@ arena_malloc(size_t size, bool zero)
assert(size <= arena_maxclass); assert(size <= arena_maxclass);
if (size <= SMALL_MAXCLASS) { if (size <= SMALL_MAXCLASS) {
if ((tcache = tcache_get(true)) != NULL) if (try_tcache && (tcache = tcache_get(true)) != NULL)
return (tcache_alloc_small(tcache, size, zero)); return (tcache_alloc_small(tcache, size, zero));
else else {
return (arena_malloc_small(choose_arena(), size, zero)); return (arena_malloc_small(choose_arena(arena), size,
zero));
}
} else { } else {
/* /*
* Initialize tcache after checking size in order to avoid * Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization. * infinite recursion during tcache initialization.
*/ */
if (size <= tcache_maxclass && (tcache = tcache_get(true)) != if (try_tcache && size <= tcache_maxclass && (tcache =
NULL) tcache_get(true)) != NULL)
return (tcache_alloc_large(tcache, size, zero)); return (tcache_alloc_large(tcache, size, zero));
else else {
return (arena_malloc_large(choose_arena(), size, zero)); return (arena_malloc_large(choose_arena(arena), size,
zero));
}
} }
} }
@ -587,11 +592,11 @@ arena_malloc_prechosen(arena_t *arena, size_t size, bool zero)
} }
JEMALLOC_INLINE void JEMALLOC_INLINE void
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
{ {
size_t pageind; size_t pageind;
arena_chunk_map_t *mapelm; arena_chunk_map_t *mapelm;
tcache_t *tcache = tcache_get(false); tcache_t *tcache;
assert(arena != NULL); assert(arena != NULL);
assert(chunk->arena == arena); assert(chunk->arena == arena);
@ -603,7 +608,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
/* Small allocation. */ /* Small allocation. */
if (tcache != NULL) if (try_tcache && (tcache = tcache_get(false)) != NULL)
tcache_dalloc_small(tcache, ptr); tcache_dalloc_small(tcache, ptr);
else { else {
arena_run_t *run; arena_run_t *run;
@ -630,7 +635,8 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
assert(((uintptr_t)ptr & PAGE_MASK) == 0); assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (size <= tcache_maxclass && tcache != NULL) { if (try_tcache && size <= tcache_maxclass && (tcache =
tcache_get(false)) != NULL) {
tcache_dalloc_large(tcache, ptr, size); tcache_dalloc_large(tcache, ptr, size);
} else { } else {
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);

View File

@ -399,7 +399,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
size_t s2u(size_t size); size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment, size_t *run_size_p); size_t sa2u(size_t size, size_t alignment, size_t *run_size_p);
arena_t *choose_arena(void); arena_t *choose_arena(arena_t *arena);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
@ -517,10 +517,13 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
/* Choose an arena based on a per-thread value. */ /* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t * JEMALLOC_INLINE arena_t *
choose_arena(void) choose_arena(arena_t *arena)
{ {
arena_t *ret; arena_t *ret;
if (arena != NULL)
return (arena);
if ((ret = *arenas_tsd_get()) == NULL) { if ((ret = *arenas_tsd_get()) == NULL) {
ret = choose_arena_hard(); ret = choose_arena_hard();
assert(ret != NULL); assert(ret != NULL);
@ -556,7 +559,7 @@ imalloc(size_t size)
assert(size != 0); assert(size != 0);
if (size <= arena_maxclass) if (size <= arena_maxclass)
return (arena_malloc(size, false)); return (arena_malloc(NULL, size, false, true));
else else
return (huge_malloc(size, false)); return (huge_malloc(size, false));
} }
@ -566,7 +569,7 @@ icalloc(size_t size)
{ {
if (size <= arena_maxclass) if (size <= arena_maxclass)
return (arena_malloc(size, true)); return (arena_malloc(NULL, size, true, true));
else else
return (huge_malloc(size, true)); return (huge_malloc(size, true));
} }
@ -580,7 +583,7 @@ ipalloc(size_t usize, size_t alignment, bool zero)
assert(usize == sa2u(usize, alignment, NULL)); assert(usize == sa2u(usize, alignment, NULL));
if (usize <= arena_maxclass && alignment <= PAGE) if (usize <= arena_maxclass && alignment <= PAGE)
ret = arena_malloc(usize, zero); ret = arena_malloc(NULL, usize, zero, true);
else { else {
size_t run_size JEMALLOC_CC_SILENCE_INIT(0); size_t run_size JEMALLOC_CC_SILENCE_INIT(0);
@ -594,7 +597,7 @@ ipalloc(size_t usize, size_t alignment, bool zero)
*/ */
sa2u(usize, alignment, &run_size); sa2u(usize, alignment, &run_size);
if (run_size <= arena_maxclass) { if (run_size <= arena_maxclass) {
ret = arena_palloc(choose_arena(), usize, run_size, ret = arena_palloc(choose_arena(NULL), usize, run_size,
alignment, zero); alignment, zero);
} else if (alignment <= chunksize) } else if (alignment <= chunksize)
ret = huge_malloc(usize, zero); ret = huge_malloc(usize, zero);
@ -647,7 +650,7 @@ idalloc(void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) if (chunk != ptr)
arena_dalloc(chunk->arena, chunk, ptr); arena_dalloc(chunk->arena, chunk, ptr, true);
else else
huge_dalloc(ptr, true); huge_dalloc(ptr, true);
} }
@ -711,7 +714,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
} else { } else {
if (size + extra <= arena_maxclass) { if (size + extra <= arena_maxclass) {
return (arena_ralloc(ptr, oldsize, size, extra, return (arena_ralloc(ptr, oldsize, size, extra,
alignment, zero)); alignment, zero, true));
} else { } else {
return (huge_ralloc(ptr, oldsize, size, extra, return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero)); alignment, zero));

View File

@ -237,7 +237,7 @@ tcache_get(bool create)
tcache_enabled_set(false); /* Memoize. */ tcache_enabled_set(false); /* Memoize. */
return (NULL); return (NULL);
} }
return (tcache_create(choose_arena())); return (tcache_create(choose_arena(NULL)));
} }
if (tcache == TCACHE_STATE_PURGATORY) { if (tcache == TCACHE_STATE_PURGATORY) {
/* /*

View File

@ -1888,7 +1888,7 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
void * void *
arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero) size_t alignment, bool zero, bool try_tcache)
{ {
void *ret; void *ret;
size_t copysize; size_t copysize;
@ -1909,7 +1909,7 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
return (NULL); return (NULL);
ret = ipalloc(usize, alignment, zero); ret = ipalloc(usize, alignment, zero);
} else } else
ret = arena_malloc(size + extra, zero); ret = arena_malloc(NULL, size + extra, zero, try_tcache);
if (ret == NULL) { if (ret == NULL) {
if (extra == 0) if (extra == 0)
@ -1921,7 +1921,7 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
return (NULL); return (NULL);
ret = ipalloc(usize, alignment, zero); ret = ipalloc(usize, alignment, zero);
} else } else
ret = arena_malloc(size, zero); ret = arena_malloc(NULL, size, zero, try_tcache);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);

View File

@ -1016,7 +1016,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret; int ret;
unsigned newind, oldind; unsigned newind, oldind;
newind = oldind = choose_arena()->ind; newind = oldind = choose_arena(NULL)->ind;
WRITE(newind, unsigned); WRITE(newind, unsigned);
READ(oldind, unsigned); READ(oldind, unsigned);
if (newind != oldind) { if (newind != oldind) {

View File

@ -1487,7 +1487,6 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* End experimental functions. * End experimental functions.
*/ */
/******************************************************************************/ /******************************************************************************/
/* /*
* The following functions are used by threading libraries for protection of * The following functions are used by threading libraries for protection of
* malloc during fork(). * malloc during fork().
@ -1552,3 +1551,55 @@ jemalloc_postfork_child(void)
} }
/******************************************************************************/ /******************************************************************************/
/*
* The following functions are used for TLS allocation/deallocation in static
* binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
* is that these avoid accessing TLS variables.
*/
static void *
a0alloc(size_t size, bool zero)
{
if (malloc_init())
return (NULL);
if (size == 0)
size = 1;
if (size <= arena_maxclass)
return (arena_malloc(arenas[0], size, zero, false));
else
return (huge_malloc(size, zero));
}
void *
a0malloc(size_t size)
{
return (a0alloc(size, false));
}
void *
a0calloc(size_t num, size_t size)
{
return (a0alloc(num * size, true));
}
void
a0free(void *ptr)
{
arena_chunk_t *chunk;
if (ptr == NULL)
return;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk->arena, chunk, ptr, false);
else
huge_dalloc(ptr, true);
}
/******************************************************************************/