Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and recyclable chunks into arena_t, so that each arena can manage huge allocations and recyclable virtual memory completely independently of other arenas. Add chunk node caching to arenas, in order to avoid contention on the base allocator. Use chunks_rtree to look up huge allocations rather than a red-black tree. Maintain a per arena unsorted list of huge allocations (which will be needed to enumerate huge allocations during arena reset). Remove the --enable-ivsalloc option, make ivsalloc() always available, and use it for size queries if --enable-debug is enabled. The only practical implications to this removal are that 1) ivsalloc() is now always available during live debugging (and the underlying radix tree is available during core-based debugging), and 2) size query validation can no longer be enabled independent of --enable-debug. Remove the stats.chunks.{current,total,high} mallctls, and replace their underlying statistics with simpler atomically updated counters used exclusively for gdump triggering. These statistics are no longer very useful because each arena manages chunks independently, and per arena statistics provide similar information. Simplify chunk synchronization code, now that base chunk allocation cannot cause recursive lock acquisition.
This commit is contained in:
74
src/arena.c
74
src/arena.c
@@ -20,6 +20,7 @@ unsigned nhclasses; /* Number of huge size classes. */
|
||||
* definition.
|
||||
*/
|
||||
|
||||
static void arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk);
|
||||
static void arena_purge(arena_t *arena, bool all);
|
||||
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
|
||||
bool cleaned);
|
||||
@@ -392,8 +393,7 @@ arena_chunk_init_spare(arena_t *arena)
|
||||
}
|
||||
|
||||
static arena_chunk_t *
|
||||
arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
|
||||
bool *zero)
|
||||
arena_chunk_alloc_internal(arena_t *arena, bool *zero)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
chunk_alloc_t *chunk_alloc;
|
||||
@@ -403,7 +403,16 @@ arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
|
||||
chunk_dalloc = arena->chunk_dalloc;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
|
||||
arena->ind, NULL, size, alignment, zero);
|
||||
arena->ind, NULL, chunksize, chunksize, zero);
|
||||
if (chunk != NULL) {
|
||||
chunk->node.arena = arena;
|
||||
chunk->node.addr = chunk;
|
||||
chunk->node.size = 0; /* Indicates this is an arena chunk. */
|
||||
if (chunk_register(chunk, &chunk->node)) {
|
||||
chunk_dalloc((void *)chunk, chunksize, arena->ind);
|
||||
chunk = NULL;
|
||||
}
|
||||
}
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if (config_stats && chunk != NULL) {
|
||||
arena->stats.mapped += chunksize;
|
||||
@@ -423,12 +432,10 @@ arena_chunk_init_hard(arena_t *arena)
|
||||
assert(arena->spare == NULL);
|
||||
|
||||
zero = false;
|
||||
chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
|
||||
chunk = arena_chunk_alloc_internal(arena, &zero);
|
||||
if (chunk == NULL)
|
||||
return (NULL);
|
||||
|
||||
chunk->arena = arena;
|
||||
|
||||
/*
|
||||
* Initialize the map to contain one maximal free untouched run. Mark
|
||||
* the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
|
||||
@@ -514,6 +521,7 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
||||
}
|
||||
chunk_dalloc = arena->chunk_dalloc;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
chunk_deregister(spare, &spare->node);
|
||||
chunk_dalloc((void *)spare, chunksize, arena->ind);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if (config_stats) {
|
||||
@@ -593,6 +601,32 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
|
||||
arena_huge_malloc_stats_update_undo(arena, usize);
|
||||
}
|
||||
|
||||
extent_node_t *
|
||||
arena_node_alloc(arena_t *arena)
|
||||
{
|
||||
extent_node_t *node;
|
||||
|
||||
malloc_mutex_lock(&arena->node_cache_mtx);
|
||||
node = ql_last(&arena->node_cache, link_ql);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(&arena->node_cache_mtx);
|
||||
return (base_alloc(sizeof(extent_node_t)));
|
||||
}
|
||||
ql_tail_remove(&arena->node_cache, extent_node_t, link_ql);
|
||||
malloc_mutex_unlock(&arena->node_cache_mtx);
|
||||
return (node);
|
||||
}
|
||||
|
||||
void
|
||||
arena_node_dalloc(arena_t *arena, extent_node_t *node)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&arena->node_cache_mtx);
|
||||
ql_elm_new(node, link_ql);
|
||||
ql_tail_insert(&arena->node_cache, node, link_ql);
|
||||
malloc_mutex_unlock(&arena->node_cache_mtx);
|
||||
}
|
||||
|
||||
void *
|
||||
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
||||
bool *zero)
|
||||
@@ -1782,7 +1816,7 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
|
||||
if (run == bin->runcur)
|
||||
bin->runcur = NULL;
|
||||
else {
|
||||
index_t binind = arena_bin_index(chunk->arena, bin);
|
||||
index_t binind = arena_bin_index(chunk->node.arena, bin);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
|
||||
if (bin_info->nregs != 1) {
|
||||
@@ -2123,7 +2157,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
arena_t *arena;
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena = chunk->arena;
|
||||
arena = chunk->node.arena;
|
||||
|
||||
if (usize < oldsize) {
|
||||
/* Fill before shrinking in order avoid a race. */
|
||||
@@ -2338,10 +2372,21 @@ arena_new(unsigned ind)
|
||||
|
||||
arena->ind = ind;
|
||||
arena->nthreads = 0;
|
||||
if (malloc_mutex_init(&arena->lock))
|
||||
return (NULL);
|
||||
arena->chunk_alloc = chunk_alloc_default;
|
||||
arena->chunk_dalloc = chunk_dalloc_default;
|
||||
|
||||
if (malloc_mutex_init(&arena->lock))
|
||||
ql_new(&arena->huge);
|
||||
if (malloc_mutex_init(&arena->huge_mtx))
|
||||
return (NULL);
|
||||
extent_tree_szad_new(&arena->chunks_szad_mmap);
|
||||
extent_tree_ad_new(&arena->chunks_ad_mmap);
|
||||
extent_tree_szad_new(&arena->chunks_szad_dss);
|
||||
extent_tree_ad_new(&arena->chunks_ad_dss);
|
||||
ql_new(&arena->node_cache);
|
||||
if (malloc_mutex_init(&arena->chunks_mtx))
|
||||
return (NULL);
|
||||
if (malloc_mutex_init(&arena->node_cache_mtx))
|
||||
return (NULL);
|
||||
|
||||
if (config_stats) {
|
||||
@@ -2551,6 +2596,9 @@ arena_prefork(arena_t *arena)
|
||||
unsigned i;
|
||||
|
||||
malloc_mutex_prefork(&arena->lock);
|
||||
malloc_mutex_prefork(&arena->huge_mtx);
|
||||
malloc_mutex_prefork(&arena->chunks_mtx);
|
||||
malloc_mutex_prefork(&arena->node_cache_mtx);
|
||||
for (i = 0; i < NBINS; i++)
|
||||
malloc_mutex_prefork(&arena->bins[i].lock);
|
||||
}
|
||||
@@ -2562,6 +2610,9 @@ arena_postfork_parent(arena_t *arena)
|
||||
|
||||
for (i = 0; i < NBINS; i++)
|
||||
malloc_mutex_postfork_parent(&arena->bins[i].lock);
|
||||
malloc_mutex_postfork_parent(&arena->node_cache_mtx);
|
||||
malloc_mutex_postfork_parent(&arena->chunks_mtx);
|
||||
malloc_mutex_postfork_parent(&arena->huge_mtx);
|
||||
malloc_mutex_postfork_parent(&arena->lock);
|
||||
}
|
||||
|
||||
@@ -2572,5 +2623,8 @@ arena_postfork_child(arena_t *arena)
|
||||
|
||||
for (i = 0; i < NBINS; i++)
|
||||
malloc_mutex_postfork_child(&arena->bins[i].lock);
|
||||
malloc_mutex_postfork_child(&arena->node_cache_mtx);
|
||||
malloc_mutex_postfork_child(&arena->chunks_mtx);
|
||||
malloc_mutex_postfork_child(&arena->huge_mtx);
|
||||
malloc_mutex_postfork_child(&arena->lock);
|
||||
}
|
||||
|
65
src/base.c
65
src/base.c
@@ -11,8 +11,9 @@ static size_t base_allocated;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* base_mtx must be held. */
|
||||
static extent_node_t *
|
||||
base_node_try_alloc_locked(void)
|
||||
base_node_try_alloc(void)
|
||||
{
|
||||
extent_node_t *node;
|
||||
|
||||
@@ -24,8 +25,9 @@ base_node_try_alloc_locked(void)
|
||||
return (node);
|
||||
}
|
||||
|
||||
/* base_mtx must be held. */
|
||||
static void
|
||||
base_node_dalloc_locked(extent_node_t *node)
|
||||
base_node_dalloc(extent_node_t *node)
|
||||
{
|
||||
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||
@@ -42,14 +44,14 @@ base_chunk_alloc(size_t minsize)
|
||||
void *addr;
|
||||
|
||||
assert(minsize != 0);
|
||||
node = base_node_try_alloc_locked();
|
||||
node = base_node_try_alloc();
|
||||
/* Allocate enough space to also carve a node out if necessary. */
|
||||
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
|
||||
csize = CHUNK_CEILING(minsize + nsize);
|
||||
addr = chunk_alloc_base(csize);
|
||||
if (addr == NULL) {
|
||||
if (node != NULL)
|
||||
base_node_dalloc_locked(node);
|
||||
base_node_dalloc(node);
|
||||
return (NULL);
|
||||
}
|
||||
if (node == NULL) {
|
||||
@@ -63,8 +65,13 @@ base_chunk_alloc(size_t minsize)
|
||||
return (node);
|
||||
}
|
||||
|
||||
static void *
|
||||
base_alloc_locked(size_t size)
|
||||
/*
|
||||
* base_alloc() guarantees demand-zeroed memory, in order to make multi-page
|
||||
* sparse data structures such as radix tree nodes efficient with respect to
|
||||
* physical memory usage.
|
||||
*/
|
||||
void *
|
||||
base_alloc(size_t size)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize;
|
||||
@@ -79,6 +86,7 @@ base_alloc_locked(size_t size)
|
||||
|
||||
key.addr = NULL;
|
||||
key.size = csize;
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||
if (node != NULL) {
|
||||
/* Use existing space. */
|
||||
@@ -87,8 +95,10 @@ base_alloc_locked(size_t size)
|
||||
/* Try to allocate more space. */
|
||||
node = base_chunk_alloc(csize);
|
||||
}
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
if (node == NULL) {
|
||||
ret = NULL;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
ret = node->addr;
|
||||
if (node->size > csize) {
|
||||
@@ -96,50 +106,15 @@ base_alloc_locked(size_t size)
|
||||
node->size -= csize;
|
||||
extent_tree_szad_insert(&base_avail_szad, node);
|
||||
} else
|
||||
base_node_dalloc_locked(node);
|
||||
base_node_dalloc(node);
|
||||
if (config_stats)
|
||||
base_allocated += csize;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* base_alloc() guarantees demand-zeroed memory, in order to make multi-page
|
||||
* sparse data structures such as radix tree nodes efficient with respect to
|
||||
* physical memory usage.
|
||||
*/
|
||||
void *
|
||||
base_alloc(size_t size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
ret = base_alloc_locked(size);
|
||||
label_return:
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
extent_node_t *
|
||||
base_node_alloc(void)
|
||||
{
|
||||
extent_node_t *ret;
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
if ((ret = base_node_try_alloc_locked()) == NULL)
|
||||
ret = (extent_node_t *)base_alloc_locked(sizeof(extent_node_t));
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
base_node_dalloc(extent_node_t *node)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
base_node_dalloc_locked(node);
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
}
|
||||
|
||||
size_t
|
||||
base_allocated_get(void)
|
||||
{
|
||||
|
275
src/chunk.c
275
src/chunk.c
@@ -7,19 +7,9 @@
|
||||
const char *opt_dss = DSS_DEFAULT;
|
||||
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||
|
||||
malloc_mutex_t chunks_mtx;
|
||||
chunk_stats_t stats_chunks;
|
||||
|
||||
/*
|
||||
* Trees of chunks that were previously allocated (trees differ only in node
|
||||
* ordering). These are used when allocating chunks, in an attempt to re-use
|
||||
* address space. Depending on function, different tree orderings are needed,
|
||||
* which is why there are two trees with the same contents.
|
||||
*/
|
||||
static extent_tree_t chunks_szad_mmap;
|
||||
static extent_tree_t chunks_ad_mmap;
|
||||
static extent_tree_t chunks_szad_dss;
|
||||
static extent_tree_t chunks_ad_dss;
|
||||
/* Used exclusively for gdump triggering. */
|
||||
static size_t curchunks;
|
||||
static size_t highchunks;
|
||||
|
||||
rtree_t chunks_rtree;
|
||||
|
||||
@@ -29,18 +19,51 @@ size_t chunksize_mask; /* (chunksize - 1). */
|
||||
size_t chunk_npages;
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Function prototypes for static functions that are referenced prior to
|
||||
* definition.
|
||||
*/
|
||||
|
||||
static void chunk_dalloc_core(void *chunk, size_t size);
|
||||
bool
|
||||
chunk_register(const void *chunk, const extent_node_t *node)
|
||||
{
|
||||
|
||||
/******************************************************************************/
|
||||
assert(node->addr == chunk);
|
||||
|
||||
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
|
||||
return (true);
|
||||
if (config_prof && opt_prof) {
|
||||
size_t nadd = (node->size == 0) ? 1 : node->size / chunksize;
|
||||
size_t cur = atomic_add_z(&curchunks, nadd);
|
||||
size_t high = atomic_read_z(&highchunks);
|
||||
while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
|
||||
/*
|
||||
* Don't refresh cur, because it may have decreased
|
||||
* since this thread lost the highchunks update race.
|
||||
*/
|
||||
high = atomic_read_z(&highchunks);
|
||||
}
|
||||
if (cur > high && prof_gdump_get_unlocked())
|
||||
prof_gdump();
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_deregister(const void *chunk, const extent_node_t *node)
|
||||
{
|
||||
bool err;
|
||||
|
||||
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
||||
assert(!err);
|
||||
if (config_prof && opt_prof) {
|
||||
size_t nsub = (node->size == 0) ? 1 : node->size / chunksize;
|
||||
assert(atomic_read_z(&curchunks) >= nsub);
|
||||
atomic_sub_z(&curchunks, nsub);
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
|
||||
void *new_addr, size_t size, size_t alignment, bool base, bool *zero)
|
||||
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
extent_node_t *node;
|
||||
@@ -50,27 +73,17 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
|
||||
|
||||
assert(new_addr == NULL || alignment == chunksize);
|
||||
|
||||
if (base) {
|
||||
/*
|
||||
* This function may need to call base_node_{,de}alloc(), but
|
||||
* the current chunk allocation request is on behalf of the
|
||||
* base allocator. Avoid deadlock (and if that weren't an
|
||||
* issue, potential for infinite recursion) by returning NULL.
|
||||
*/
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
alloc_size = size + alignment - chunksize;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < size)
|
||||
return (NULL);
|
||||
key.addr = new_addr;
|
||||
key.size = alloc_size;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
node = (new_addr != NULL) ? extent_tree_ad_search(chunks_ad, &key) :
|
||||
extent_tree_szad_nsearch(chunks_szad, &key);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
|
||||
@@ -95,20 +108,12 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
|
||||
if (trailsize != 0) {
|
||||
/* Insert the trailing space as a smaller chunk. */
|
||||
if (node == NULL) {
|
||||
/*
|
||||
* An additional node is required, but
|
||||
* base_node_alloc() can cause a new base chunk to be
|
||||
* allocated. Drop chunks_mtx in order to avoid
|
||||
* deadlock, and if node allocation fails, deallocate
|
||||
* the result before returning an error.
|
||||
*/
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
node = base_node_alloc();
|
||||
node = arena_node_alloc(arena);
|
||||
if (node == NULL) {
|
||||
chunk_dalloc_core(ret, size);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_unmap(arena, ret, size);
|
||||
return (NULL);
|
||||
}
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
}
|
||||
node->addr = (void *)((uintptr_t)(ret) + size);
|
||||
node->size = trailsize;
|
||||
@@ -117,10 +122,10 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
node = NULL;
|
||||
}
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
|
||||
if (node != NULL)
|
||||
base_node_dalloc(node);
|
||||
arena_node_dalloc(arena, node);
|
||||
if (*zero) {
|
||||
if (!zeroed)
|
||||
memset(ret, 0, size);
|
||||
@@ -137,15 +142,15 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_core_dss(void *new_addr, size_t size, size_t alignment, bool base,
|
||||
bool *zero)
|
||||
chunk_alloc_core_dss(arena_t *arena, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
|
||||
new_addr, size, alignment, base, zero)) != NULL)
|
||||
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss,
|
||||
&arena->chunks_ad_dss, new_addr, size, alignment, zero)) != NULL)
|
||||
return (ret);
|
||||
ret = chunk_alloc_dss(new_addr, size, alignment, zero);
|
||||
ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@@ -156,7 +161,7 @@ chunk_alloc_core_dss(void *new_addr, size_t size, size_t alignment, bool base,
|
||||
* them if they are returned.
|
||||
*/
|
||||
static void *
|
||||
chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
|
||||
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
@@ -168,12 +173,13 @@ chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
|
||||
|
||||
/* "primary" dss. */
|
||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||
chunk_alloc_core_dss(new_addr, size, alignment, base, zero)) !=
|
||||
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
/* mmap. */
|
||||
if (!config_munmap && (ret = chunk_recycle(&chunks_szad_mmap,
|
||||
&chunks_ad_mmap, new_addr, size, alignment, base, zero)) != NULL)
|
||||
if (!config_munmap && (ret = chunk_recycle(arena,
|
||||
&arena->chunks_szad_mmap, &arena->chunks_ad_mmap, new_addr, size,
|
||||
alignment, zero)) != NULL)
|
||||
return (ret);
|
||||
/*
|
||||
* Requesting an address is not implemented for chunk_alloc_mmap(), so
|
||||
@@ -184,7 +190,7 @@ chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
|
||||
return (ret);
|
||||
/* "secondary" dss. */
|
||||
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
||||
chunk_alloc_core_dss(new_addr, size, alignment, base, zero)) !=
|
||||
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
|
||||
@@ -192,40 +198,6 @@ chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_register(void *chunk, size_t size, bool base)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
|
||||
if (config_ivsalloc && !base) {
|
||||
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, chunk))
|
||||
return (true);
|
||||
}
|
||||
if (config_stats || config_prof) {
|
||||
bool gdump;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
if (config_stats)
|
||||
stats_chunks.nchunks += (size / chunksize);
|
||||
stats_chunks.curchunks += (size / chunksize);
|
||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||
stats_chunks.highchunks =
|
||||
stats_chunks.curchunks;
|
||||
if (config_prof)
|
||||
gdump = true;
|
||||
} else if (config_prof)
|
||||
gdump = false;
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
if (config_prof && opt_prof && prof_gdump_get_unlocked() &&
|
||||
gdump)
|
||||
prof_gdump();
|
||||
}
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
|
||||
return (false);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_base(size_t size)
|
||||
{
|
||||
@@ -239,10 +211,10 @@ chunk_alloc_base(size_t size)
|
||||
*/
|
||||
zero = true;
|
||||
ret = chunk_alloc_mmap(size, chunksize, &zero);
|
||||
if (ret != NULL && chunk_register(ret, size, true)) {
|
||||
chunk_dalloc_core(ret, size);
|
||||
ret = NULL;
|
||||
}
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@@ -255,18 +227,16 @@ chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
|
||||
void *ret;
|
||||
|
||||
ret = chunk_alloc(new_addr, size, alignment, zero, arena_ind);
|
||||
if (ret != NULL && chunk_register(ret, size, false)) {
|
||||
chunk_dalloc(ret, size, arena_ind);
|
||||
ret = NULL;
|
||||
}
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/* Default arena chunk allocation routine in the absence of user override. */
|
||||
void *
|
||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
unsigned arena_ind)
|
||||
static arena_t *
|
||||
chunk_arena_get(unsigned arena_ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
@@ -278,32 +248,32 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
* already.
|
||||
*/
|
||||
assert(arena != NULL);
|
||||
return (arena);
|
||||
}
|
||||
|
||||
return (chunk_alloc_core(new_addr, size, alignment, false, zero,
|
||||
/* Default arena chunk allocation routine in the absence of user override. */
|
||||
void *
|
||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
arena = chunk_arena_get(arena_ind);
|
||||
return (chunk_alloc_core(arena, new_addr, size, alignment, zero,
|
||||
arena->dss_prec));
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
size_t size)
|
||||
chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, void *chunk, size_t size)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *xnode, *node, *prev, *xprev, key;
|
||||
extent_node_t *node, *prev, key;
|
||||
|
||||
unzeroed = pages_purge(chunk, size);
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
|
||||
/*
|
||||
* Allocate a node before acquiring chunks_mtx even though it might not
|
||||
* be needed, because base_node_alloc() may cause a new base chunk to
|
||||
* be allocated, which could cause deadlock if chunks_mtx were already
|
||||
* held.
|
||||
*/
|
||||
xnode = base_node_alloc();
|
||||
/* Use xprev to implement conditional deferred deallocation of prev. */
|
||||
xprev = NULL;
|
||||
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
key.addr = (void *)((uintptr_t)chunk + size);
|
||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||
/* Try to coalesce forward. */
|
||||
@@ -320,17 +290,16 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
} else {
|
||||
/* Coalescing forward failed, so insert a new node. */
|
||||
if (xnode == NULL) {
|
||||
node = arena_node_alloc(arena);
|
||||
if (node == NULL) {
|
||||
/*
|
||||
* base_node_alloc() failed, which is an exceedingly
|
||||
* Node allocation failed, which is an exceedingly
|
||||
* unlikely failure. Leak chunk; its pages have
|
||||
* already been purged, so this is only a virtual
|
||||
* memory leak.
|
||||
*/
|
||||
goto label_return;
|
||||
}
|
||||
node = xnode;
|
||||
xnode = NULL; /* Prevent deallocation below. */
|
||||
node->addr = chunk;
|
||||
node->size = size;
|
||||
node->zeroed = !unzeroed;
|
||||
@@ -356,37 +325,15 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
node->zeroed = (node->zeroed && prev->zeroed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
|
||||
xprev = prev;
|
||||
arena_node_dalloc(arena, prev);
|
||||
}
|
||||
|
||||
label_return:
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
/*
|
||||
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
|
||||
* avoid potential deadlock.
|
||||
*/
|
||||
if (xnode != NULL)
|
||||
base_node_dalloc(xnode);
|
||||
if (xprev != NULL)
|
||||
base_node_dalloc(xprev);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_unmap(void *chunk, size_t size)
|
||||
{
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (have_dss && chunk_in_dss(chunk))
|
||||
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
||||
else if (chunk_dalloc_mmap(chunk, size))
|
||||
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_dalloc_core(void *chunk, size_t size)
|
||||
chunk_unmap(arena_t *arena, void *chunk, size_t size)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
@@ -394,16 +341,13 @@ chunk_dalloc_core(void *chunk, size_t size)
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (config_ivsalloc)
|
||||
rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
||||
if (config_stats || config_prof) {
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
assert(stats_chunks.curchunks >= (size / chunksize));
|
||||
stats_chunks.curchunks -= (size / chunksize);
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
if (have_dss && chunk_in_dss(chunk)) {
|
||||
chunk_record(arena, &arena->chunks_szad_dss,
|
||||
&arena->chunks_ad_dss, chunk, size);
|
||||
} else if (chunk_dalloc_mmap(chunk, size)) {
|
||||
chunk_record(arena, &arena->chunks_szad_mmap,
|
||||
&arena->chunks_ad_mmap, chunk, size);
|
||||
}
|
||||
|
||||
chunk_unmap(chunk, size);
|
||||
}
|
||||
|
||||
/* Default arena chunk deallocation routine in the absence of user override. */
|
||||
@@ -411,7 +355,7 @@ bool
|
||||
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
||||
{
|
||||
|
||||
chunk_dalloc_core(chunk, size);
|
||||
chunk_unmap(chunk_arena_get(arena_ind), chunk, size);
|
||||
return (false);
|
||||
}
|
||||
|
||||
@@ -433,21 +377,11 @@ chunk_boot(void)
|
||||
chunksize_mask = chunksize - 1;
|
||||
chunk_npages = (chunksize >> LG_PAGE);
|
||||
|
||||
if (malloc_mutex_init(&chunks_mtx))
|
||||
return (true);
|
||||
if (config_stats || config_prof)
|
||||
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
||||
if (have_dss && chunk_dss_boot())
|
||||
return (true);
|
||||
extent_tree_szad_new(&chunks_szad_mmap);
|
||||
extent_tree_ad_new(&chunks_ad_mmap);
|
||||
extent_tree_szad_new(&chunks_szad_dss);
|
||||
extent_tree_ad_new(&chunks_ad_dss);
|
||||
if (config_ivsalloc) {
|
||||
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
|
||||
return (true);
|
||||
}
|
||||
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
@@ -456,7 +390,6 @@ void
|
||||
chunk_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_prefork(&chunks_mtx);
|
||||
chunk_dss_prefork();
|
||||
}
|
||||
|
||||
@@ -465,7 +398,6 @@ chunk_postfork_parent(void)
|
||||
{
|
||||
|
||||
chunk_dss_postfork_parent();
|
||||
malloc_mutex_postfork_parent(&chunks_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -473,5 +405,4 @@ chunk_postfork_child(void)
|
||||
{
|
||||
|
||||
chunk_dss_postfork_child();
|
||||
malloc_mutex_postfork_child(&chunks_mtx);
|
||||
}
|
||||
|
@@ -66,7 +66,8 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_dss(void *new_addr, size_t size, size_t alignment, bool *zero)
|
||||
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
@@ -133,7 +134,7 @@ chunk_alloc_dss(void *new_addr, size_t size, size_t alignment, bool *zero)
|
||||
dss_max = dss_next;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
if (cpad_size != 0)
|
||||
chunk_unmap(cpad, cpad_size);
|
||||
chunk_unmap(arena, cpad, cpad_size);
|
||||
if (*zero) {
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
||||
ret, size);
|
||||
|
26
src/ctl.c
26
src/ctl.c
@@ -144,9 +144,6 @@ CTL_PROTO(prof_gdump)
|
||||
CTL_PROTO(prof_reset)
|
||||
CTL_PROTO(prof_interval)
|
||||
CTL_PROTO(lg_prof_sample)
|
||||
CTL_PROTO(stats_chunks_current)
|
||||
CTL_PROTO(stats_chunks_total)
|
||||
CTL_PROTO(stats_chunks_high)
|
||||
CTL_PROTO(stats_arenas_i_small_allocated)
|
||||
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
||||
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
||||
@@ -363,12 +360,6 @@ static const ctl_named_node_t prof_node[] = {
|
||||
{NAME("lg_sample"), CTL(lg_prof_sample)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t stats_chunks_node[] = {
|
||||
{NAME("current"), CTL(stats_chunks_current)},
|
||||
{NAME("total"), CTL(stats_chunks_total)},
|
||||
{NAME("high"), CTL(stats_chunks_high)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
|
||||
{NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
|
||||
{NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
|
||||
@@ -473,7 +464,6 @@ static const ctl_named_node_t stats_node[] = {
|
||||
{NAME("active"), CTL(stats_active)},
|
||||
{NAME("metadata"), CTL(stats_metadata)},
|
||||
{NAME("mapped"), CTL(stats_mapped)},
|
||||
{NAME("chunks"), CHILD(named, stats_chunks)},
|
||||
{NAME("arenas"), CHILD(indexed, stats_arenas)}
|
||||
};
|
||||
|
||||
@@ -688,14 +678,6 @@ ctl_refresh(void)
|
||||
unsigned i;
|
||||
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
|
||||
|
||||
if (config_stats) {
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
ctl_stats.chunks.current = stats_chunks.curchunks;
|
||||
ctl_stats.chunks.total = stats_chunks.nchunks;
|
||||
ctl_stats.chunks.high = stats_chunks.highchunks;
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear sum stats, since they will be merged into by
|
||||
* ctl_arena_refresh().
|
||||
@@ -733,7 +715,8 @@ ctl_refresh(void)
|
||||
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped
|
||||
+ ctl_stats.arenas[ctl_stats.narenas].astats
|
||||
.metadata_allocated;
|
||||
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
|
||||
ctl_stats.mapped =
|
||||
ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
|
||||
}
|
||||
|
||||
ctl_epoch++;
|
||||
@@ -1950,11 +1933,6 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
|
||||
size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
|
||||
|
||||
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
||||
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
||||
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
|
||||
|
169
src/huge.c
169
src/huge.c
@@ -2,15 +2,33 @@
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
/* Protects chunk-related data structures. */
|
||||
static malloc_mutex_t huge_mtx;
|
||||
static extent_node_t *
|
||||
huge_node_get(const void *ptr)
|
||||
{
|
||||
extent_node_t *node;
|
||||
|
||||
/******************************************************************************/
|
||||
node = chunk_lookup(ptr);
|
||||
assert(node->size != 0);
|
||||
|
||||
/* Tree of chunks that are stand-alone huge allocations. */
|
||||
static extent_tree_t huge;
|
||||
return (node);
|
||||
}
|
||||
|
||||
static bool
|
||||
huge_node_set(const void *ptr, extent_node_t *node)
|
||||
{
|
||||
|
||||
assert(node->addr == ptr);
|
||||
assert(node->size != 0);
|
||||
return (chunk_register(ptr, node));
|
||||
}
|
||||
|
||||
static void
|
||||
huge_node_unset(const void *ptr, const extent_node_t *node)
|
||||
{
|
||||
|
||||
chunk_deregister(ptr, node);
|
||||
}
|
||||
|
||||
void *
|
||||
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
||||
@@ -55,15 +73,22 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/* Insert node into huge. */
|
||||
node->addr = ret;
|
||||
node->size = usize;
|
||||
node->zeroed = is_zeroed;
|
||||
node->arena = arena;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
extent_tree_ad_insert(&huge, node);
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
if (huge_node_set(ret, node)) {
|
||||
arena_chunk_dalloc_huge(arena, ret, usize);
|
||||
idalloctm(tsd, node, tcache, true);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/* Insert node into huge. */
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
ql_elm_new(node, link_ql);
|
||||
ql_tail_insert(&arena->huge, node, link_ql);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||
if (!is_zeroed)
|
||||
@@ -74,32 +99,6 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static extent_node_t *
|
||||
huge_node_locked(const void *ptr)
|
||||
{
|
||||
extent_node_t *node, key;
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = __DECONST(void *, ptr);
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
assert(node->addr == ptr);
|
||||
|
||||
return (node);
|
||||
}
|
||||
|
||||
static extent_node_t *
|
||||
huge_node(const void *ptr)
|
||||
{
|
||||
extent_node_t *node;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
node = huge_node_locked(ptr);
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
return (node);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef huge_dalloc_junk
|
||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
|
||||
@@ -152,15 +151,15 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
||||
} else
|
||||
zeroed = true;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
node = huge_node_locked(ptr);
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
assert(node->size != usize);
|
||||
node->size = usize;
|
||||
/* Clear node->zeroed if zeroing failed above. */
|
||||
node->zeroed = (node->zeroed && zeroed);
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
|
||||
|
||||
@@ -195,14 +194,14 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
||||
zeroed = false;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
node = huge_node_locked(ptr);
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
node->size = usize;
|
||||
/* Clear node->zeroed if zeroing failed above. */
|
||||
node->zeroed = (node->zeroed && zeroed);
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
/* Zap the excess chunks. */
|
||||
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
|
||||
@@ -221,11 +220,11 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
||||
return (true);
|
||||
}
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
node = huge_node_locked(ptr);
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
is_zeroed_subchunk = node->zeroed;
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
/*
|
||||
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
|
||||
@@ -237,10 +236,10 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
||||
&is_zeroed_chunk))
|
||||
return (true);
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
node->size = usize;
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||
if (!is_zeroed_subchunk) {
|
||||
@@ -356,11 +355,14 @@ void
|
||||
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
||||
{
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
node = huge_node_locked(ptr);
|
||||
extent_tree_ad_remove(&huge, node);
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
huge_node_unset(ptr, node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
ql_remove(&arena->huge, node, link_ql);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
huge_dalloc_junk(node->addr, node->size);
|
||||
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
|
||||
@@ -371,59 +373,50 @@ arena_t *
|
||||
huge_aalloc(const void *ptr)
|
||||
{
|
||||
|
||||
return (huge_node(ptr)->arena);
|
||||
return (huge_node_get(ptr)->arena);
|
||||
}
|
||||
|
||||
size_t
|
||||
huge_salloc(const void *ptr)
|
||||
{
|
||||
size_t size;
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
|
||||
return (huge_node(ptr)->size);
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
size = node->size;
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
return (size);
|
||||
}
|
||||
|
||||
prof_tctx_t *
|
||||
huge_prof_tctx_get(const void *ptr)
|
||||
{
|
||||
prof_tctx_t *tctx;
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
|
||||
return (huge_node(ptr)->prof_tctx);
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
tctx = node->prof_tctx;
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
return (tctx);
|
||||
}
|
||||
|
||||
void
|
||||
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
||||
{
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
|
||||
huge_node(ptr)->prof_tctx = tctx;
|
||||
}
|
||||
|
||||
bool
|
||||
huge_boot(void)
|
||||
{
|
||||
|
||||
/* Initialize chunks data. */
|
||||
if (malloc_mutex_init(&huge_mtx))
|
||||
return (true);
|
||||
extent_tree_ad_new(&huge);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
huge_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_prefork(&huge_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
huge_postfork_parent(void)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_parent(&huge_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
huge_postfork_child(void)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_child(&huge_mtx);
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
node->prof_tctx = tctx;
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
}
|
||||
|
@@ -1195,8 +1195,6 @@ malloc_init_hard_a0_locked(void)
|
||||
return (true);
|
||||
if (config_tcache && tcache_boot())
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
if (huge_boot())
|
||||
return (true);
|
||||
if (malloc_mutex_init(&arenas_lock))
|
||||
return (true);
|
||||
/*
|
||||
@@ -2310,12 +2308,10 @@ je_sallocx(const void *ptr, int flags)
|
||||
assert(malloc_initialized() || IS_INITIALIZER);
|
||||
malloc_thread_init();
|
||||
|
||||
if (config_ivsalloc)
|
||||
if (config_debug)
|
||||
usize = ivsalloc(ptr, config_prof);
|
||||
else {
|
||||
assert(ptr != NULL);
|
||||
else
|
||||
usize = isalloc(ptr, config_prof);
|
||||
}
|
||||
|
||||
return (usize);
|
||||
}
|
||||
@@ -2440,10 +2436,10 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
||||
assert(malloc_initialized() || IS_INITIALIZER);
|
||||
malloc_thread_init();
|
||||
|
||||
if (config_ivsalloc)
|
||||
if (config_debug)
|
||||
ret = ivsalloc(ptr, config_prof);
|
||||
else
|
||||
ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
|
||||
ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@@ -2504,7 +2500,6 @@ _malloc_prefork(void)
|
||||
}
|
||||
chunk_prefork();
|
||||
base_prefork();
|
||||
huge_prefork();
|
||||
}
|
||||
|
||||
#ifndef JEMALLOC_MUTEX_INIT_CB
|
||||
@@ -2524,7 +2519,6 @@ _malloc_postfork(void)
|
||||
assert(malloc_initialized());
|
||||
|
||||
/* Release all mutexes, now that fork() has completed. */
|
||||
huge_postfork_parent();
|
||||
base_postfork_parent();
|
||||
chunk_postfork_parent();
|
||||
for (i = 0; i < narenas_total; i++) {
|
||||
@@ -2544,7 +2538,6 @@ jemalloc_postfork_child(void)
|
||||
assert(malloc_initialized());
|
||||
|
||||
/* Release all mutexes, now that fork() has completed. */
|
||||
huge_postfork_child();
|
||||
base_postfork_child();
|
||||
chunk_postfork_child();
|
||||
for (i = 0; i < narenas_total; i++) {
|
||||
|
12
src/stats.c
12
src/stats.c
@@ -547,8 +547,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
if (config_stats) {
|
||||
size_t *cactive;
|
||||
size_t allocated, active, metadata, mapped;
|
||||
size_t chunks_current, chunks_high;
|
||||
uint64_t chunks_total;
|
||||
|
||||
CTL_GET("stats.cactive", &cactive, size_t *);
|
||||
CTL_GET("stats.allocated", &allocated, size_t);
|
||||
@@ -561,16 +559,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Current active ceiling: %zu\n", atomic_read_z(cactive));
|
||||
|
||||
/* Print chunk stats. */
|
||||
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
|
||||
CTL_GET("stats.chunks.high", &chunks_high, size_t);
|
||||
CTL_GET("stats.chunks.current", &chunks_current, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
|
||||
"highchunks curchunks\n");
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" %13"PRIu64" %12zu %12zu\n",
|
||||
chunks_total, chunks_high, chunks_current);
|
||||
|
||||
if (merged) {
|
||||
unsigned narenas;
|
||||
|
||||
|
@@ -102,7 +102,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
||||
/* Lock the arena bin associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *bin_arena = chunk->arena;
|
||||
arena_t *bin_arena = chunk->node.arena;
|
||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||
|
||||
if (config_prof && bin_arena == arena) {
|
||||
@@ -124,7 +124,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
||||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == bin_arena) {
|
||||
if (chunk->node.arena == bin_arena) {
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_bits_t *bitselm =
|
||||
@@ -182,7 +182,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
||||
/* Lock the arena associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *locked_arena = chunk->arena;
|
||||
arena_t *locked_arena = chunk->node.arena;
|
||||
UNUSED bool idump;
|
||||
|
||||
if (config_prof)
|
||||
@@ -208,7 +208,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
||||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == locked_arena) {
|
||||
if (chunk->node.arena == locked_arena) {
|
||||
arena_dalloc_large_junked_locked(locked_arena,
|
||||
chunk, ptr);
|
||||
} else {
|
||||
|
Reference in New Issue
Block a user