2010-01-17 01:53:50 +08:00
|
|
|
#define JEMALLOC_CHUNK_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
const char *opt_dss = DSS_DEFAULT;
|
|
|
|
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
malloc_mutex_t chunks_mtx;
|
2010-01-17 01:53:50 +08:00
|
|
|
chunk_stats_t stats_chunks;
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
/*
|
|
|
|
* Trees of chunks that were previously allocated (trees differ only in node
|
|
|
|
* ordering). These are used when allocating chunks, in an attempt to re-use
|
|
|
|
* address space. Depending on function, different tree orderings are needed,
|
|
|
|
* which is why there are two trees with the same contents.
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
static extent_tree_t chunks_szad_mmap;
|
|
|
|
static extent_tree_t chunks_ad_mmap;
|
|
|
|
static extent_tree_t chunks_szad_dss;
|
|
|
|
static extent_tree_t chunks_ad_dss;
|
2012-04-13 11:20:58 +08:00
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
rtree_t *chunks_rtree;
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Various chunk-related settings. */
|
|
|
|
size_t chunksize;
|
|
|
|
size_t chunksize_mask; /* (chunksize - 1). */
|
|
|
|
size_t chunk_npages;
|
2010-10-02 08:35:43 +08:00
|
|
|
size_t map_bias;
|
2010-01-17 01:53:50 +08:00
|
|
|
size_t arena_maxclass; /* Max size class for arenas. */
|
|
|
|
|
|
|
|
/******************************************************************************/
|
2012-04-13 11:20:58 +08:00
|
|
|
/* Function prototypes for non-inline static functions. */
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
static void *chunk_recycle(extent_tree_t *chunks_szad,
|
|
|
|
extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
|
2012-05-03 11:41:42 +08:00
|
|
|
bool *zero);
|
2012-10-12 04:53:15 +08:00
|
|
|
static void chunk_record(extent_tree_t *chunks_szad,
|
|
|
|
extent_tree_t *chunks_ad, void *chunk, size_t size);
|
2012-04-13 11:20:58 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
static void *
|
2012-10-12 04:53:15 +08:00
|
|
|
chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
|
|
|
size_t alignment, bool base, bool *zero)
|
2012-04-13 11:20:58 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
extent_node_t *node;
|
|
|
|
extent_node_t key;
|
|
|
|
size_t alloc_size, leadsize, trailsize;
|
2012-10-09 08:56:11 +08:00
|
|
|
bool zeroed;
|
2012-04-13 11:20:58 +08:00
|
|
|
|
2012-05-03 11:41:42 +08:00
|
|
|
if (base) {
|
|
|
|
/*
|
|
|
|
* This function may need to call base_node_{,de}alloc(), but
|
|
|
|
* the current chunk allocation request is on behalf of the
|
|
|
|
* base allocator. Avoid deadlock (and if that weren't an
|
|
|
|
* issue, potential for infinite recursion) by returning NULL.
|
|
|
|
*/
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
alloc_size = size + alignment - chunksize;
|
|
|
|
/* Beware size_t wrap-around. */
|
|
|
|
if (alloc_size < size)
|
|
|
|
return (NULL);
|
|
|
|
key.addr = NULL;
|
|
|
|
key.size = alloc_size;
|
|
|
|
malloc_mutex_lock(&chunks_mtx);
|
2012-10-12 04:53:15 +08:00
|
|
|
node = extent_tree_szad_nsearch(chunks_szad, &key);
|
2012-04-13 11:20:58 +08:00
|
|
|
if (node == NULL) {
|
|
|
|
malloc_mutex_unlock(&chunks_mtx);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
|
|
|
|
(uintptr_t)node->addr;
|
2012-05-10 05:48:35 +08:00
|
|
|
assert(node->size >= leadsize + size);
|
|
|
|
trailsize = node->size - leadsize - size;
|
2012-04-13 11:20:58 +08:00
|
|
|
ret = (void *)((uintptr_t)node->addr + leadsize);
|
|
|
|
/* Remove node from the tree. */
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_remove(chunks_szad, node);
|
|
|
|
extent_tree_ad_remove(chunks_ad, node);
|
2012-04-13 11:20:58 +08:00
|
|
|
if (leadsize != 0) {
|
|
|
|
/* Insert the leading space as a smaller chunk. */
|
|
|
|
node->size = leadsize;
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_insert(chunks_szad, node);
|
|
|
|
extent_tree_ad_insert(chunks_ad, node);
|
2012-04-13 11:20:58 +08:00
|
|
|
node = NULL;
|
|
|
|
}
|
|
|
|
if (trailsize != 0) {
|
|
|
|
/* Insert the trailing space as a smaller chunk. */
|
|
|
|
if (node == NULL) {
|
|
|
|
/*
|
|
|
|
* An additional node is required, but
|
|
|
|
* base_node_alloc() can cause a new base chunk to be
|
|
|
|
* allocated. Drop chunks_mtx in order to avoid
|
|
|
|
* deadlock, and if node allocation fails, deallocate
|
|
|
|
* the result before returning an error.
|
|
|
|
*/
|
|
|
|
malloc_mutex_unlock(&chunks_mtx);
|
|
|
|
node = base_node_alloc();
|
|
|
|
if (node == NULL) {
|
|
|
|
chunk_dealloc(ret, size, true);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
malloc_mutex_lock(&chunks_mtx);
|
|
|
|
}
|
|
|
|
node->addr = (void *)((uintptr_t)(ret) + size);
|
|
|
|
node->size = trailsize;
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_insert(chunks_szad, node);
|
|
|
|
extent_tree_ad_insert(chunks_ad, node);
|
2012-04-13 11:20:58 +08:00
|
|
|
node = NULL;
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(&chunks_mtx);
|
|
|
|
|
2012-10-09 08:56:11 +08:00
|
|
|
zeroed = false;
|
|
|
|
if (node != NULL) {
|
|
|
|
if (node->zeroed) {
|
|
|
|
zeroed = true;
|
|
|
|
*zero = true;
|
|
|
|
}
|
2012-04-13 11:20:58 +08:00
|
|
|
base_node_dealloc(node);
|
2012-10-09 08:56:11 +08:00
|
|
|
}
|
2012-12-13 02:12:18 +08:00
|
|
|
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
|
|
|
if (zeroed == false && *zero)
|
2012-04-13 11:20:58 +08:00
|
|
|
memset(ret, 0, size);
|
|
|
|
return (ret);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2010-01-25 09:13:07 +08:00
|
|
|
/*
|
|
|
|
* If the caller specifies (*zero == false), it is still possible to receive
|
|
|
|
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
|
|
|
|
* takes advantage of this to avoid demanding zeroed chunks, but taking
|
|
|
|
* advantage of them if they are returned.
|
|
|
|
*/
|
2010-01-17 01:53:50 +08:00
|
|
|
void *
|
2012-10-12 04:53:15 +08:00
|
|
|
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
|
|
|
dss_prec_t dss_prec)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
assert((size & chunksize_mask) == 0);
|
2012-05-10 04:05:04 +08:00
|
|
|
assert(alignment != 0);
|
2012-04-11 01:50:33 +08:00
|
|
|
assert((alignment & chunksize_mask) == 0);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
/* "primary" dss. */
|
2012-10-17 13:06:56 +08:00
|
|
|
if (config_dss && dss_prec == dss_prec_primary) {
|
|
|
|
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
|
|
|
alignment, base, zero)) != NULL)
|
|
|
|
goto label_return;
|
|
|
|
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
|
|
|
goto label_return;
|
|
|
|
}
|
2012-10-12 04:53:15 +08:00
|
|
|
/* mmap. */
|
|
|
|
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
|
|
|
|
alignment, base, zero)) != NULL)
|
|
|
|
goto label_return;
|
|
|
|
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
|
|
|
|
goto label_return;
|
|
|
|
/* "secondary" dss. */
|
2012-10-17 13:06:56 +08:00
|
|
|
if (config_dss && dss_prec == dss_prec_secondary) {
|
|
|
|
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
|
|
|
alignment, base, zero)) != NULL)
|
|
|
|
goto label_return;
|
|
|
|
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
|
|
|
goto label_return;
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/* All strategies for allocation failed. */
|
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_ivsalloc && base == false && ret != NULL) {
|
2010-09-06 01:35:13 +08:00
|
|
|
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
|
2011-11-12 06:41:59 +08:00
|
|
|
chunk_dealloc(ret, size, true);
|
2010-09-06 01:35:13 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if ((config_stats || config_prof) && ret != NULL) {
|
2010-10-24 09:37:06 +08:00
|
|
|
bool gdump;
|
2010-01-28 05:10:55 +08:00
|
|
|
malloc_mutex_lock(&chunks_mtx);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats)
|
|
|
|
stats_chunks.nchunks += (size / chunksize);
|
2010-01-17 01:53:50 +08:00
|
|
|
stats_chunks.curchunks += (size / chunksize);
|
2010-02-11 02:37:56 +08:00
|
|
|
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
2010-01-28 05:10:55 +08:00
|
|
|
stats_chunks.highchunks = stats_chunks.curchunks;
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof)
|
|
|
|
gdump = true;
|
|
|
|
} else if (config_prof)
|
2010-10-24 09:37:06 +08:00
|
|
|
gdump = false;
|
2010-01-28 05:10:55 +08:00
|
|
|
malloc_mutex_unlock(&chunks_mtx);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
2010-10-24 09:37:06 +08:00
|
|
|
prof_gdump();
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2012-04-22 07:04:51 +08:00
|
|
|
if (config_debug && *zero && ret != NULL) {
|
|
|
|
size_t i;
|
|
|
|
size_t *p = (size_t *)(uintptr_t)ret;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2012-04-24 13:41:36 +08:00
|
|
|
VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
2012-04-22 07:04:51 +08:00
|
|
|
for (i = 0; i < size / sizeof(size_t); i++)
|
|
|
|
assert(p[i] == 0);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
assert(CHUNK_ADDR2BASE(ret) == ret);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
static void
|
2012-10-12 04:53:15 +08:00
|
|
|
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
|
|
|
size_t size)
|
2012-04-13 11:20:58 +08:00
|
|
|
{
|
2012-10-09 08:56:11 +08:00
|
|
|
bool unzeroed;
|
2012-04-13 11:20:58 +08:00
|
|
|
extent_node_t *xnode, *node, *prev, key;
|
|
|
|
|
2012-10-09 08:56:11 +08:00
|
|
|
unzeroed = pages_purge(chunk, size);
|
2012-04-13 11:20:58 +08:00
|
|
|
|
2012-05-10 05:48:35 +08:00
|
|
|
/*
|
|
|
|
* Allocate a node before acquiring chunks_mtx even though it might not
|
|
|
|
* be needed, because base_node_alloc() may cause a new base chunk to
|
|
|
|
* be allocated, which could cause deadlock if chunks_mtx were already
|
|
|
|
* held.
|
|
|
|
*/
|
|
|
|
xnode = base_node_alloc();
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
malloc_mutex_lock(&chunks_mtx);
|
2012-05-10 05:48:35 +08:00
|
|
|
key.addr = (void *)((uintptr_t)chunk + size);
|
2012-10-12 04:53:15 +08:00
|
|
|
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
2012-05-10 05:48:35 +08:00
|
|
|
/* Try to coalesce forward. */
|
|
|
|
if (node != NULL && node->addr == key.addr) {
|
|
|
|
/*
|
|
|
|
* Coalesce chunk with the following address range. This does
|
|
|
|
* not change the position within chunks_ad, so only
|
|
|
|
* remove/insert from/into chunks_szad.
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_remove(chunks_szad, node);
|
2012-05-10 05:48:35 +08:00
|
|
|
node->addr = chunk;
|
|
|
|
node->size += size;
|
2012-10-09 08:56:11 +08:00
|
|
|
node->zeroed = (node->zeroed && (unzeroed == false));
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_insert(chunks_szad, node);
|
2012-05-10 05:48:35 +08:00
|
|
|
if (xnode != NULL)
|
|
|
|
base_node_dealloc(xnode);
|
|
|
|
} else {
|
|
|
|
/* Coalescing forward failed, so insert a new node. */
|
|
|
|
if (xnode == NULL) {
|
2012-04-13 11:20:58 +08:00
|
|
|
/*
|
2012-05-10 05:48:35 +08:00
|
|
|
* base_node_alloc() failed, which is an exceedingly
|
|
|
|
* unlikely failure. Leak chunk; its pages have
|
|
|
|
* already been purged, so this is only a virtual
|
|
|
|
* memory leak.
|
2012-04-13 11:20:58 +08:00
|
|
|
*/
|
|
|
|
malloc_mutex_unlock(&chunks_mtx);
|
2012-05-10 05:48:35 +08:00
|
|
|
return;
|
2012-04-13 11:20:58 +08:00
|
|
|
}
|
2012-05-10 05:48:35 +08:00
|
|
|
node = xnode;
|
|
|
|
node->addr = chunk;
|
|
|
|
node->size = size;
|
2012-10-09 08:56:11 +08:00
|
|
|
node->zeroed = (unzeroed == false);
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_ad_insert(chunks_ad, node);
|
|
|
|
extent_tree_szad_insert(chunks_szad, node);
|
2012-04-13 11:20:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to coalesce backward. */
|
2012-10-12 04:53:15 +08:00
|
|
|
prev = extent_tree_ad_prev(chunks_ad, node);
|
2012-04-13 11:20:58 +08:00
|
|
|
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
|
|
|
|
chunk) {
|
|
|
|
/*
|
|
|
|
* Coalesce chunk with the previous address range. This does
|
|
|
|
* not change the position within chunks_ad, so only
|
|
|
|
* remove/insert node from/into chunks_szad.
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_remove(chunks_szad, prev);
|
|
|
|
extent_tree_ad_remove(chunks_ad, prev);
|
2012-04-13 11:20:58 +08:00
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_remove(chunks_szad, node);
|
2012-04-13 11:20:58 +08:00
|
|
|
node->addr = prev->addr;
|
|
|
|
node->size += prev->size;
|
2012-10-09 08:56:11 +08:00
|
|
|
node->zeroed = (node->zeroed && prev->zeroed);
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_insert(chunks_szad, node);
|
2012-04-13 11:20:58 +08:00
|
|
|
|
|
|
|
base_node_dealloc(prev);
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(&chunks_mtx);
|
|
|
|
}
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
void
|
|
|
|
chunk_unmap(void *chunk, size_t size)
|
|
|
|
{
|
|
|
|
assert(chunk != NULL);
|
|
|
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
|
|
|
assert(size != 0);
|
|
|
|
assert((size & chunksize_mask) == 0);
|
|
|
|
|
|
|
|
if (config_dss && chunk_in_dss(chunk))
|
|
|
|
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
|
|
|
else if (chunk_dealloc_mmap(chunk, size))
|
|
|
|
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
void
|
2011-11-12 06:41:59 +08:00
|
|
|
chunk_dealloc(void *chunk, size_t size, bool unmap)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
assert(chunk != NULL);
|
|
|
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
|
|
|
assert(size != 0);
|
|
|
|
assert((size & chunksize_mask) == 0);
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_ivsalloc)
|
|
|
|
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
|
|
|
if (config_stats || config_prof) {
|
|
|
|
malloc_mutex_lock(&chunks_mtx);
|
2012-10-12 04:53:15 +08:00
|
|
|
assert(stats_chunks.curchunks >= (size / chunksize));
|
2012-02-11 12:22:09 +08:00
|
|
|
stats_chunks.curchunks -= (size / chunksize);
|
|
|
|
malloc_mutex_unlock(&chunks_mtx);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
if (unmap)
|
|
|
|
chunk_unmap(chunk, size);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2012-04-22 10:17:21 +08:00
|
|
|
chunk_boot(void)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
/* Set variables according to the value of opt_lg_chunk. */
|
2010-09-06 01:35:13 +08:00
|
|
|
chunksize = (ZU(1) << opt_lg_chunk);
|
2012-04-02 22:04:34 +08:00
|
|
|
assert(chunksize >= PAGE);
|
2010-01-17 01:53:50 +08:00
|
|
|
chunksize_mask = chunksize - 1;
|
2012-04-02 22:04:34 +08:00
|
|
|
chunk_npages = (chunksize >> LG_PAGE);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats || config_prof) {
|
|
|
|
if (malloc_mutex_init(&chunks_mtx))
|
|
|
|
return (true);
|
|
|
|
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
|
|
|
}
|
|
|
|
if (config_dss && chunk_dss_boot())
|
2010-01-17 01:53:50 +08:00
|
|
|
return (true);
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_new(&chunks_szad_mmap);
|
|
|
|
extent_tree_ad_new(&chunks_ad_mmap);
|
|
|
|
extent_tree_szad_new(&chunks_szad_dss);
|
|
|
|
extent_tree_ad_new(&chunks_ad_dss);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_ivsalloc) {
|
|
|
|
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
|
|
|
opt_lg_chunk);
|
|
|
|
if (chunks_rtree == NULL)
|
|
|
|
return (true);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
2012-10-10 05:46:22 +08:00
|
|
|
|
|
|
|
void
|
|
|
|
chunk_prefork(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_lock(&chunks_mtx);
|
|
|
|
if (config_ivsalloc)
|
|
|
|
rtree_prefork(chunks_rtree);
|
|
|
|
chunk_dss_prefork();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
chunk_postfork_parent(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
chunk_dss_postfork_parent();
|
|
|
|
if (config_ivsalloc)
|
|
|
|
rtree_postfork_parent(chunks_rtree);
|
|
|
|
malloc_mutex_postfork_parent(&chunks_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
chunk_postfork_child(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
chunk_dss_postfork_child();
|
|
|
|
if (config_ivsalloc)
|
|
|
|
rtree_postfork_child(chunks_rtree);
|
|
|
|
malloc_mutex_postfork_child(&chunks_mtx);
|
|
|
|
}
|