2010-01-17 01:53:50 +08:00
|
|
|
#define JEMALLOC_HUGE_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2014-05-16 13:22:27 +08:00
|
|
|
/* Protects chunk-related data structures. */
|
|
|
|
static malloc_mutex_t huge_mtx;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
/* Tree of chunks that are stand-alone huge allocations. */
|
|
|
|
static extent_tree_t huge;
|
|
|
|
|
|
|
|
void *
|
2014-09-23 12:09:23 +08:00
|
|
|
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero)
|
2012-04-11 01:50:33 +08:00
|
|
|
{
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
return (huge_palloc(tsd, arena, size, chunksize, zero));
|
2012-04-11 01:50:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2014-09-23 12:09:23 +08:00
|
|
|
huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
|
|
|
bool zero)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
size_t csize;
|
|
|
|
extent_node_t *node;
|
2012-04-22 07:04:51 +08:00
|
|
|
bool is_zeroed;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/* Allocate one or more contiguous chunks for this request. */
|
|
|
|
|
|
|
|
csize = CHUNK_CEILING(size);
|
|
|
|
if (csize == 0) {
|
|
|
|
/* size is large enough to cause size_t wrap-around. */
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate an extent node with which to track the chunk. */
|
|
|
|
node = base_node_alloc();
|
|
|
|
if (node == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
2012-04-22 07:04:51 +08:00
|
|
|
/*
|
|
|
|
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
|
|
|
|
* it is possible to make correct junk/zero fill decisions below.
|
|
|
|
*/
|
|
|
|
is_zeroed = zero;
|
2014-09-23 12:09:23 +08:00
|
|
|
arena = choose_arena(tsd, arena);
|
2014-05-16 13:22:27 +08:00
|
|
|
ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
|
2010-01-17 01:53:50 +08:00
|
|
|
if (ret == NULL) {
|
2014-05-16 13:22:27 +08:00
|
|
|
base_node_dalloc(node);
|
2010-01-17 01:53:50 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert node into huge. */
|
|
|
|
node->addr = ret;
|
|
|
|
node->size = csize;
|
2014-05-06 06:16:56 +08:00
|
|
|
node->arena = arena;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
malloc_mutex_lock(&huge_mtx);
|
|
|
|
extent_tree_ad_insert(&huge, node);
|
|
|
|
malloc_mutex_unlock(&huge_mtx);
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_fill && zero == false) {
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(opt_junk))
|
2010-01-17 01:53:50 +08:00
|
|
|
memset(ret, 0xa5, csize);
|
2014-09-12 07:20:44 +08:00
|
|
|
else if (unlikely(opt_zero) && is_zeroed == false)
|
2010-01-17 01:53:50 +08:00
|
|
|
memset(ret, 0, csize);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
bool
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
/*
|
|
|
|
* Avoid moving the allocation if the size class can be left the same.
|
|
|
|
*/
|
|
|
|
if (oldsize > arena_maxclass
|
|
|
|
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
|
|
|
|
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
|
|
|
|
assert(CHUNK_CEILING(oldsize) == oldsize);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (false);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
/* Reallocation would require a move. */
|
2014-01-13 07:05:44 +08:00
|
|
|
return (true);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2014-09-23 12:09:23 +08:00
|
|
|
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
2014-05-16 13:22:27 +08:00
|
|
|
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
size_t copysize;
|
|
|
|
|
|
|
|
/* Try to avoid moving the allocation. */
|
2014-01-13 07:05:44 +08:00
|
|
|
if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
|
|
|
|
return (ptr);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
* size and oldsize are different enough that we need to use a
|
|
|
|
* different size class. In that case, fall back to allocating new
|
|
|
|
* space and copying.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
2011-02-01 11:58:22 +08:00
|
|
|
if (alignment > chunksize)
|
2014-09-23 12:09:23 +08:00
|
|
|
ret = huge_palloc(tsd, arena, size + extra, alignment, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
else
|
2014-09-23 12:09:23 +08:00
|
|
|
ret = huge_malloc(tsd, arena, size + extra, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
if (ret == NULL) {
|
|
|
|
if (extra == 0)
|
|
|
|
return (NULL);
|
|
|
|
/* Try again, this time without extra. */
|
2011-02-01 11:58:22 +08:00
|
|
|
if (alignment > chunksize)
|
2014-09-23 12:09:23 +08:00
|
|
|
ret = huge_palloc(tsd, arena, size, alignment, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
else
|
2014-09-23 12:09:23 +08:00
|
|
|
ret = huge_malloc(tsd, arena, size, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
if (ret == NULL)
|
|
|
|
return (NULL);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
/*
|
|
|
|
* Copy at most size bytes (not size+extra), since the caller has no
|
|
|
|
* expectation that the extra bytes will be reliably preserved.
|
|
|
|
*/
|
2010-01-17 01:53:50 +08:00
|
|
|
copysize = (size < oldsize) ? size : oldsize;
|
2014-05-16 13:22:27 +08:00
|
|
|
memcpy(ret, ptr, copysize);
|
2014-09-23 12:09:23 +08:00
|
|
|
iqalloc(tsd, ptr, try_tcache_dalloc);
|
2010-01-17 01:53:50 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-01-08 08:47:56 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
#undef huge_dalloc_junk
|
|
|
|
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
|
|
|
|
#endif
|
|
|
|
static void
|
|
|
|
huge_dalloc_junk(void *ptr, size_t usize)
|
|
|
|
{
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_fill && have_dss && unlikely(opt_junk)) {
|
2014-01-08 08:47:56 +08:00
|
|
|
/*
|
|
|
|
* Only bother junk filling if the chunk isn't about to be
|
|
|
|
* unmapped.
|
|
|
|
*/
|
2014-04-16 03:09:48 +08:00
|
|
|
if (config_munmap == false || (have_dss && chunk_in_dss(ptr)))
|
2014-01-08 08:47:56 +08:00
|
|
|
memset(ptr, 0x5a, usize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
#undef huge_dalloc_junk
|
|
|
|
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
|
|
|
|
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
void
|
2014-05-16 13:22:27 +08:00
|
|
|
huge_dalloc(void *ptr)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
extent_node_t *node, key;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&huge_mtx);
|
|
|
|
|
|
|
|
/* Extract from tree of huge allocations. */
|
|
|
|
key.addr = ptr;
|
|
|
|
node = extent_tree_ad_search(&huge, &key);
|
|
|
|
assert(node != NULL);
|
|
|
|
assert(node->addr == ptr);
|
|
|
|
extent_tree_ad_remove(&huge, node);
|
|
|
|
|
|
|
|
malloc_mutex_unlock(&huge_mtx);
|
|
|
|
|
2014-05-16 13:22:27 +08:00
|
|
|
huge_dalloc_junk(node->addr, node->size);
|
|
|
|
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
|
|
|
|
base_node_dalloc(node);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
huge_salloc(const void *ptr)
|
|
|
|
{
|
|
|
|
size_t ret;
|
|
|
|
extent_node_t *node, key;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&huge_mtx);
|
|
|
|
|
|
|
|
/* Extract from tree of huge allocations. */
|
|
|
|
key.addr = __DECONST(void *, ptr);
|
|
|
|
node = extent_tree_ad_search(&huge, &key);
|
|
|
|
assert(node != NULL);
|
|
|
|
|
|
|
|
ret = node->size;
|
|
|
|
|
|
|
|
malloc_mutex_unlock(&huge_mtx);
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_t *
|
|
|
|
huge_prof_tctx_get(const void *ptr)
|
2010-02-11 02:37:56 +08:00
|
|
|
{
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_t *ret;
|
2010-02-11 02:37:56 +08:00
|
|
|
extent_node_t *node, key;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&huge_mtx);
|
|
|
|
|
|
|
|
/* Extract from tree of huge allocations. */
|
|
|
|
key.addr = __DECONST(void *, ptr);
|
|
|
|
node = extent_tree_ad_search(&huge, &key);
|
|
|
|
assert(node != NULL);
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
ret = node->prof_tctx;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
|
|
|
malloc_mutex_unlock(&huge_mtx);
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-08-19 07:22:13 +08:00
|
|
|
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
2010-02-11 02:37:56 +08:00
|
|
|
{
|
|
|
|
extent_node_t *node, key;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&huge_mtx);
|
|
|
|
|
|
|
|
/* Extract from tree of huge allocations. */
|
|
|
|
key.addr = __DECONST(void *, ptr);
|
|
|
|
node = extent_tree_ad_search(&huge, &key);
|
|
|
|
assert(node != NULL);
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
node->prof_tctx = tctx;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
|
|
|
malloc_mutex_unlock(&huge_mtx);
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
bool
|
|
|
|
huge_boot(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Initialize chunks data. */
|
|
|
|
if (malloc_mutex_init(&huge_mtx))
|
|
|
|
return (true);
|
|
|
|
extent_tree_ad_new(&huge);
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
|
|
|
|
void
|
|
|
|
huge_prefork(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_prefork(&huge_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
huge_postfork_parent(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_postfork_parent(&huge_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
huge_postfork_child(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_postfork_child(&huge_mtx);
|
|
|
|
}
|