Add support for user-specified chunk allocators/deallocators.

Add new mallctl endpoints "arena<i>.chunk.alloc" and
"arena<i>.chunk.dealloc" to allow userspace to configure
jemalloc's chunk allocator and deallocator on a per-arena
basis.
This commit is contained in:
aravind
2014-05-05 15:16:56 -07:00
committed by Jason Evans
parent 4bbd11b789
commit fb7fe50a88
16 changed files with 283 additions and 45 deletions

View File

@@ -570,8 +570,8 @@ arena_chunk_init_hard(arena_t *arena)
zero = false;
malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
&zero, arena->dss_prec);
chunk = (arena_chunk_t *)chunk_alloc(arena, chunksize, chunksize,
false, &zero, arena->dss_prec);
malloc_mutex_lock(&arena->lock);
if (chunk == NULL)
return (NULL);
@@ -668,7 +668,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
arena->spare = chunk;
malloc_mutex_unlock(&arena->lock);
chunk_dealloc((void *)spare, chunksize, true);
chunk_dealloc(arena, (void *)spare, chunksize, true);
malloc_mutex_lock(&arena->lock);
if (config_stats)
arena->stats.mapped -= chunksize;
@@ -2319,6 +2319,8 @@ arena_new(arena_t *arena, unsigned ind)
arena->ind = ind;
arena->nthreads = 0;
arena->chunk_alloc = chunk_alloc_default;
arena->chunk_dealloc = (chunk_dealloc_t *)chunk_unmap;
if (malloc_mutex_init(&arena->lock))
return (true);

View File

@@ -32,7 +32,7 @@ base_pages_alloc(size_t minsize)
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
zero = false;
base_pages = chunk_alloc(csize, chunksize, true, &zero,
base_pages = chunk_alloc(NULL, csize, chunksize, true, &zero,
chunk_dss_prec_get());
if (base_pages == NULL)
return (true);

View File

@@ -104,7 +104,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
malloc_mutex_unlock(&chunks_mtx);
node = base_node_alloc();
if (node == NULL) {
chunk_dealloc(ret, size, true);
chunk_dealloc(NULL, ret, size, true);
return (NULL);
}
malloc_mutex_lock(&chunks_mtx);
@@ -141,8 +141,8 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
* takes advantage of this to avoid demanding zeroed chunks, but taking
* advantage of them if they are returned.
*/
void *
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
static void *
chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero,
dss_prec_t dss_prec)
{
void *ret;
@@ -156,32 +156,56 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
if (have_dss && dss_prec == dss_prec_primary) {
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
alignment, base, zero)) != NULL)
goto label_return;
return (ret);
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
goto label_return;
return (ret);
}
/* mmap. */
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
alignment, base, zero)) != NULL)
goto label_return;
return (ret);
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
goto label_return;
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary) {
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
alignment, base, zero)) != NULL)
goto label_return;
return (ret);
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
goto label_return;
return (ret);
}
/* All strategies for allocation failed. */
ret = NULL;
label_return:
return (NULL);
}
/*
* Default arena chunk allocation routine in the absence of user-override.
*/
void *
chunk_alloc_default(size_t size, size_t alignment, bool *zero,
unsigned arena_ind)
{
return (chunk_alloc_core(size, alignment, false, zero,
arenas[arena_ind]->dss_prec));
}
void *
chunk_alloc(arena_t *arena, size_t size, size_t alignment, bool base,
bool *zero, dss_prec_t dss_prec)
{
void *ret;
if (arena)
ret = arena->chunk_alloc(size, alignment, zero, arena->ind);
else
ret = chunk_alloc_core(size, alignment, base, zero, dss_prec);
if (ret != NULL) {
if (config_ivsalloc && base == false) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
chunk_dealloc(ret, size, true);
chunk_dealloc(arena, ret, size, true);
return (NULL);
}
}
@@ -312,7 +336,7 @@ chunk_unmap(void *chunk, size_t size)
}
void
chunk_dealloc(void *chunk, size_t size, bool unmap)
chunk_dealloc(arena_t *arena, void *chunk, size_t size, bool unmap)
{
assert(chunk != NULL);
@@ -329,8 +353,12 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
malloc_mutex_unlock(&chunks_mtx);
}
if (unmap)
chunk_unmap(chunk, size);
if (unmap) {
if (arena)
arena->chunk_dealloc(chunk, size, arena->ind);
else
chunk_unmap(chunk, size);
}
}
bool

View File

@@ -113,6 +113,8 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(arena_i_purge)
static void arena_purge(unsigned arena_ind);
CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_chunk_alloc)
CTL_PROTO(arena_i_chunk_dealloc)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
@@ -251,9 +253,15 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_accum"), CTL(opt_prof_accum)}
};
static const ctl_named_node_t chunk_node[] = {
{NAME("alloc"), CTL(arena_i_chunk_alloc)},
{NAME("dealloc"), CTL(arena_i_chunk_dealloc)}
};
static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)},
{NAME("dss"), CTL(arena_i_dss)}
{NAME("dss"), CTL(arena_i_dss)},
{NAME("chunk"), CHILD(named, chunk)},
};
static const ctl_named_node_t super_arena_i_node[] = {
{NAME(""), CHILD(named, arena_i)}
@@ -1368,6 +1376,57 @@ label_return:
return (ret);
}
static int
arena_i_chunk_alloc_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = mib[1];
arena_t *arena;
malloc_mutex_lock(&ctl_mtx);
if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
malloc_mutex_lock(&arena->lock);
READ(arena->chunk_alloc, chunk_alloc_t *);
WRITE(arena->chunk_alloc, chunk_alloc_t *);
} else {
ret = EFAULT;
goto label_outer_return;
}
ret = 0;
label_return:
malloc_mutex_unlock(&arena->lock);
label_outer_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
arena_i_chunk_dealloc_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = mib[1];
arena_t *arena;
malloc_mutex_lock(&ctl_mtx);
if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
malloc_mutex_lock(&arena->lock);
READ(arena->chunk_dealloc, chunk_dealloc_t *);
WRITE(arena->chunk_dealloc, chunk_dealloc_t *);
} else {
ret = EFAULT;
goto label_outer_return;
}
ret = 0;
label_return:
malloc_mutex_unlock(&arena->lock);
label_outer_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static const ctl_named_node_t *
arena_i_index(const size_t *mib, size_t miblen, size_t i)
{

View File

@@ -16,14 +16,15 @@ malloc_mutex_t huge_mtx;
static extent_tree_t huge;
void *
huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
huge_malloc(arena_t *arena, size_t size, bool zero, dss_prec_t dss_prec)
{
return (huge_palloc(size, chunksize, zero, dss_prec));
return (huge_palloc(arena, size, chunksize, zero, dss_prec));
}
void *
huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero,
dss_prec_t dss_prec)
{
void *ret;
size_t csize;
@@ -48,7 +49,7 @@ huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
ret = chunk_alloc(arena, csize, alignment, false, &is_zeroed, dss_prec);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@@ -57,6 +58,7 @@ huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
/* Insert node into huge. */
node->addr = ret;
node->size = csize;
node->arena = arena;
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
@@ -96,8 +98,9 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
}
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc,
dss_prec_t dss_prec)
{
void *ret;
size_t copysize;
@@ -112,18 +115,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* space and copying.
*/
if (alignment > chunksize)
ret = huge_palloc(size + extra, alignment, zero, dss_prec);
ret = huge_palloc(arena, size + extra, alignment, zero, dss_prec);
else
ret = huge_malloc(size + extra, zero, dss_prec);
ret = huge_malloc(arena, size + extra, zero, dss_prec);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
ret = huge_palloc(size, alignment, zero, dss_prec);
ret = huge_palloc(arena, size, alignment, zero, dss_prec);
else
ret = huge_malloc(size, zero, dss_prec);
ret = huge_malloc(arena, size, zero, dss_prec);
if (ret == NULL)
return (NULL);
@@ -238,7 +241,7 @@ huge_dalloc(void *ptr, bool unmap)
if (unmap)
huge_dalloc_junk(node->addr, node->size);
chunk_dealloc(node->addr, node->size, unmap);
chunk_dealloc(node->arena, node->addr, node->size, unmap);
base_node_dealloc(node);
}

View File

@@ -1983,7 +1983,7 @@ a0alloc(size_t size, bool zero)
if (size <= arena_maxclass)
return (arena_malloc(arenas[0], size, zero, false));
else
return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
return (huge_malloc(NULL, size, zero, huge_dss_prec_get(arenas[0])));
}
void *