2010-01-17 01:53:50 +08:00
|
|
|
#define JEMALLOC_CHUNK_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
|
|
|
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
2010-01-24 18:53:40 +08:00
|
|
|
#ifdef JEMALLOC_SWAP
|
|
|
|
bool opt_overcommit = true;
|
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
2010-01-28 05:10:55 +08:00
|
|
|
malloc_mutex_t chunks_mtx;
|
2010-01-17 01:53:50 +08:00
|
|
|
chunk_stats_t stats_chunks;
|
|
|
|
#endif
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
#ifdef JEMALLOC_IVSALLOC
|
|
|
|
rtree_t *chunks_rtree;
|
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Various chunk-related settings. */
|
|
|
|
size_t chunksize;
|
|
|
|
size_t chunksize_mask; /* (chunksize - 1). */
|
|
|
|
size_t chunk_npages;
|
2010-10-02 08:35:43 +08:00
|
|
|
size_t map_bias;
|
2010-01-17 01:53:50 +08:00
|
|
|
size_t arena_maxclass; /* Max size class for arenas. */
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2010-01-25 09:13:07 +08:00
|
|
|
/*
|
|
|
|
* If the caller specifies (*zero == false), it is still possible to receive
|
|
|
|
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
|
|
|
|
* takes advantage of this to avoid demanding zeroed chunks, but taking
|
|
|
|
* advantage of them if they are returned.
|
|
|
|
*/
|
2010-01-17 01:53:50 +08:00
|
|
|
void *
|
2010-09-06 01:35:13 +08:00
|
|
|
chunk_alloc(size_t size, bool base, bool *zero)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
assert((size & chunksize_mask) == 0);
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
#ifdef JEMALLOC_SWAP
|
|
|
|
if (swap_enabled) {
|
|
|
|
ret = chunk_alloc_swap(size, zero);
|
|
|
|
if (ret != NULL)
|
|
|
|
goto RETURN;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
if (swap_enabled == false || opt_overcommit) {
|
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_DSS
|
|
|
|
ret = chunk_alloc_dss(size, zero);
|
|
|
|
if (ret != NULL)
|
|
|
|
goto RETURN;
|
|
|
|
#endif
|
|
|
|
ret = chunk_alloc_mmap(size);
|
2010-01-25 09:13:07 +08:00
|
|
|
if (ret != NULL) {
|
|
|
|
*zero = true;
|
2010-01-24 18:53:40 +08:00
|
|
|
goto RETURN;
|
2010-01-25 09:13:07 +08:00
|
|
|
}
|
2010-01-24 18:53:40 +08:00
|
|
|
#ifdef JEMALLOC_SWAP
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* All strategies for allocation failed. */
|
|
|
|
ret = NULL;
|
|
|
|
RETURN:
|
2010-09-06 01:35:13 +08:00
|
|
|
#ifdef JEMALLOC_IVSALLOC
|
|
|
|
if (base == false && ret != NULL) {
|
|
|
|
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
|
2011-11-12 06:41:59 +08:00
|
|
|
chunk_dealloc(ret, size, true);
|
2010-09-06 01:35:13 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
2010-01-17 01:53:50 +08:00
|
|
|
if (ret != NULL) {
|
2010-02-11 02:37:56 +08:00
|
|
|
# ifdef JEMALLOC_PROF
|
2010-10-24 09:37:06 +08:00
|
|
|
bool gdump;
|
2010-02-11 02:37:56 +08:00
|
|
|
# endif
|
2010-01-28 05:10:55 +08:00
|
|
|
malloc_mutex_lock(&chunks_mtx);
|
2010-02-11 02:37:56 +08:00
|
|
|
# ifdef JEMALLOC_STATS
|
2010-01-17 01:53:50 +08:00
|
|
|
stats_chunks.nchunks += (size / chunksize);
|
2010-02-11 02:37:56 +08:00
|
|
|
# endif
|
2010-01-17 01:53:50 +08:00
|
|
|
stats_chunks.curchunks += (size / chunksize);
|
2010-02-11 02:37:56 +08:00
|
|
|
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
2010-01-28 05:10:55 +08:00
|
|
|
stats_chunks.highchunks = stats_chunks.curchunks;
|
2010-02-11 02:37:56 +08:00
|
|
|
# ifdef JEMALLOC_PROF
|
2010-10-24 09:37:06 +08:00
|
|
|
gdump = true;
|
2010-02-11 02:37:56 +08:00
|
|
|
# endif
|
|
|
|
}
|
|
|
|
# ifdef JEMALLOC_PROF
|
|
|
|
else
|
2010-10-24 09:37:06 +08:00
|
|
|
gdump = false;
|
2010-02-11 02:37:56 +08:00
|
|
|
# endif
|
2010-01-28 05:10:55 +08:00
|
|
|
malloc_mutex_unlock(&chunks_mtx);
|
2010-02-11 02:37:56 +08:00
|
|
|
# ifdef JEMALLOC_PROF
|
2010-10-24 09:37:06 +08:00
|
|
|
if (opt_prof && opt_prof_gdump && gdump)
|
|
|
|
prof_gdump();
|
2010-02-11 02:37:56 +08:00
|
|
|
# endif
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
assert(CHUNK_ADDR2BASE(ret) == ret);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2011-11-12 06:41:59 +08:00
|
|
|
chunk_dealloc(void *chunk, size_t size, bool unmap)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
assert(chunk != NULL);
|
|
|
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
|
|
|
assert(size != 0);
|
|
|
|
assert((size & chunksize_mask) == 0);
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
#ifdef JEMALLOC_IVSALLOC
|
|
|
|
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
2010-01-28 05:10:55 +08:00
|
|
|
malloc_mutex_lock(&chunks_mtx);
|
2010-01-17 01:53:50 +08:00
|
|
|
stats_chunks.curchunks -= (size / chunksize);
|
2010-01-28 05:10:55 +08:00
|
|
|
malloc_mutex_unlock(&chunks_mtx);
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif
|
|
|
|
|
2011-11-12 06:41:59 +08:00
|
|
|
if (unmap) {
|
2010-01-24 18:53:40 +08:00
|
|
|
#ifdef JEMALLOC_SWAP
|
2011-11-12 06:41:59 +08:00
|
|
|
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
|
|
|
|
return;
|
2010-01-24 18:53:40 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef JEMALLOC_DSS
|
2011-11-12 06:41:59 +08:00
|
|
|
if (chunk_dealloc_dss(chunk, size) == false)
|
|
|
|
return;
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif
|
2011-11-12 06:41:59 +08:00
|
|
|
chunk_dealloc_mmap(chunk, size);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
chunk_boot(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Set variables according to the value of opt_lg_chunk. */
|
2010-09-06 01:35:13 +08:00
|
|
|
chunksize = (ZU(1) << opt_lg_chunk);
|
2010-01-17 01:53:50 +08:00
|
|
|
assert(chunksize >= PAGE_SIZE);
|
|
|
|
chunksize_mask = chunksize - 1;
|
|
|
|
chunk_npages = (chunksize >> PAGE_SHIFT);
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
2010-01-28 05:10:55 +08:00
|
|
|
if (malloc_mutex_init(&chunks_mtx))
|
|
|
|
return (true);
|
2010-01-17 01:53:50 +08:00
|
|
|
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
|
|
|
#endif
|
2010-01-24 18:53:40 +08:00
|
|
|
#ifdef JEMALLOC_SWAP
|
|
|
|
if (chunk_swap_boot())
|
|
|
|
return (true);
|
|
|
|
#endif
|
2010-09-06 01:35:13 +08:00
|
|
|
if (chunk_mmap_boot())
|
|
|
|
return (true);
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef JEMALLOC_DSS
|
2010-01-24 18:53:40 +08:00
|
|
|
if (chunk_dss_boot())
|
2010-01-17 01:53:50 +08:00
|
|
|
return (true);
|
|
|
|
#endif
|
2010-12-01 08:50:58 +08:00
|
|
|
#ifdef JEMALLOC_IVSALLOC
|
|
|
|
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
|
|
|
|
if (chunks_rtree == NULL)
|
|
|
|
return (true);
|
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|