Resolve bootstrapping issues when embedded in FreeBSD libc.

b2c0d6322d (Add witness, a simple online
locking validator.) caused a broad propagation of tsd throughout the
internal API, but tsd_fetch() was designed to fail prior to tsd
bootstrapping.  Fix this by splitting tsd_t into non-nullable tsd_t and
nullable tsdn_t, and modifying all internal APIs that do not critically
rely on tsd to take nullable pointers.  Furthermore, add the
tsd_booted_get() function so that tsdn_fetch() can probe whether tsd
bootstrapping is complete and return NULL if not.  All dangerous
conversions of nullable pointers are tsdn_tsd() calls that assert-fail
on invalid conversion.
This commit is contained in:
Jason Evans
2016-05-10 22:21:10 -07:00
parent 0c12dcabc5
commit c1e00ef2a6
34 changed files with 1709 additions and 1556 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -14,11 +14,11 @@ static size_t base_mapped;
/******************************************************************************/
static extent_node_t *
base_node_try_alloc(tsd_t *tsd)
base_node_try_alloc(tsdn_t *tsdn)
{
extent_node_t *node;
malloc_mutex_assert_owner(tsd, &base_mtx);
malloc_mutex_assert_owner(tsdn, &base_mtx);
if (base_nodes == NULL)
return (NULL);
@@ -29,10 +29,10 @@ base_node_try_alloc(tsd_t *tsd)
}
static void
base_node_dalloc(tsd_t *tsd, extent_node_t *node)
base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
{
malloc_mutex_assert_owner(tsd, &base_mtx);
malloc_mutex_assert_owner(tsdn, &base_mtx);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = base_nodes;
@@ -40,22 +40,22 @@ base_node_dalloc(tsd_t *tsd, extent_node_t *node)
}
static extent_node_t *
base_chunk_alloc(tsd_t *tsd, size_t minsize)
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{
extent_node_t *node;
size_t csize, nsize;
void *addr;
malloc_mutex_assert_owner(tsd, &base_mtx);
malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0);
node = base_node_try_alloc(tsd);
node = base_node_try_alloc(tsdn);
/* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize);
addr = chunk_alloc_base(csize);
if (addr == NULL) {
if (node != NULL)
base_node_dalloc(tsd, node);
base_node_dalloc(tsdn, node);
return (NULL);
}
base_mapped += csize;
@@ -78,7 +78,7 @@ base_chunk_alloc(tsd_t *tsd, size_t minsize)
* physical memory usage.
*/
void *
base_alloc(tsd_t *tsd, size_t size)
base_alloc(tsdn_t *tsdn, size_t size)
{
void *ret;
size_t csize, usize;
@@ -93,14 +93,14 @@ base_alloc(tsd_t *tsd, size_t size)
usize = s2u(csize);
extent_node_init(&key, NULL, NULL, usize, false, false);
malloc_mutex_lock(tsd, &base_mtx);
malloc_mutex_lock(tsdn, &base_mtx);
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (node != NULL) {
/* Use existing space. */
extent_tree_szad_remove(&base_avail_szad, node);
} else {
/* Try to allocate more space. */
node = base_chunk_alloc(tsd, csize);
node = base_chunk_alloc(tsdn, csize);
}
if (node == NULL) {
ret = NULL;
@@ -113,7 +113,7 @@ base_alloc(tsd_t *tsd, size_t size)
extent_node_size_set(node, extent_node_size_get(node) - csize);
extent_tree_szad_insert(&base_avail_szad, node);
} else
base_node_dalloc(tsd, node);
base_node_dalloc(tsdn, node);
if (config_stats) {
base_allocated += csize;
/*
@@ -125,21 +125,22 @@ base_alloc(tsd_t *tsd, size_t size)
}
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return:
malloc_mutex_unlock(tsd, &base_mtx);
malloc_mutex_unlock(tsdn, &base_mtx);
return (ret);
}
void
base_stats_get(tsd_t *tsd, size_t *allocated, size_t *resident, size_t *mapped)
base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
size_t *mapped)
{
malloc_mutex_lock(tsd, &base_mtx);
malloc_mutex_lock(tsdn, &base_mtx);
assert(base_allocated <= base_resident);
assert(base_resident <= base_mapped);
*allocated = base_allocated;
*resident = base_resident;
*mapped = base_mapped;
malloc_mutex_unlock(tsd, &base_mtx);
malloc_mutex_unlock(tsdn, &base_mtx);
}
bool
@@ -155,22 +156,22 @@ base_boot(void)
}
void
base_prefork(tsd_t *tsd)
base_prefork(tsdn_t *tsdn)
{
malloc_mutex_prefork(tsd, &base_mtx);
malloc_mutex_prefork(tsdn, &base_mtx);
}
void
base_postfork_parent(tsd_t *tsd)
base_postfork_parent(tsdn_t *tsdn)
{
malloc_mutex_postfork_parent(tsd, &base_mtx);
malloc_mutex_postfork_parent(tsdn, &base_mtx);
}
void
base_postfork_child(tsd_t *tsd)
base_postfork_child(tsdn_t *tsdn)
{
malloc_mutex_postfork_child(tsd, &base_mtx);
malloc_mutex_postfork_child(tsdn, &base_mtx);
}

View File

@@ -49,7 +49,7 @@ const chunk_hooks_t chunk_hooks_default = {
* definition.
*/
static void chunk_record(tsd_t *tsd, arena_t *arena,
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
bool committed);
@@ -64,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena)
}
chunk_hooks_t
chunk_hooks_get(tsd_t *tsd, arena_t *arena)
chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
{
chunk_hooks_t chunk_hooks;
malloc_mutex_lock(tsd, &arena->chunks_mtx);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks = chunk_hooks_get_locked(arena);
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (chunk_hooks);
}
chunk_hooks_t
chunk_hooks_set(tsd_t *tsd, arena_t *arena, const chunk_hooks_t *chunk_hooks)
chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
{
chunk_hooks_t old_chunk_hooks;
malloc_mutex_lock(tsd, &arena->chunks_mtx);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
old_chunk_hooks = arena->chunk_hooks;
/*
* Copy each field atomically so that it is impossible for readers to
@@ -105,13 +105,13 @@ chunk_hooks_set(tsd_t *tsd, arena_t *arena, const chunk_hooks_t *chunk_hooks)
ATOMIC_COPY_HOOK(split);
ATOMIC_COPY_HOOK(merge);
#undef ATOMIC_COPY_HOOK
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (old_chunk_hooks);
}
static void
chunk_hooks_assure_initialized_impl(tsd_t *tsd, arena_t *arena,
chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, bool locked)
{
static const chunk_hooks_t uninitialized_hooks =
@@ -120,28 +120,28 @@ chunk_hooks_assure_initialized_impl(tsd_t *tsd, arena_t *arena,
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
0) {
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
chunk_hooks_get(tsd, arena);
chunk_hooks_get(tsdn, arena);
}
}
static void
chunk_hooks_assure_initialized_locked(tsd_t *tsd, arena_t *arena,
chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
chunk_hooks_assure_initialized_impl(tsd, arena, chunk_hooks, true);
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
}
static void
chunk_hooks_assure_initialized(tsd_t *tsd, arena_t *arena,
chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
chunk_hooks_assure_initialized_impl(tsd, arena, chunk_hooks, false);
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
}
bool
chunk_register(tsd_t *tsd, const void *chunk, const extent_node_t *node)
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
{
assert(extent_node_addr_get(node) == chunk);
@@ -161,7 +161,7 @@ chunk_register(tsd_t *tsd, const void *chunk, const extent_node_t *node)
high = atomic_read_z(&highchunks);
}
if (cur > high && prof_gdump_get_unlocked())
prof_gdump(tsd);
prof_gdump(tsdn);
}
return (false);
@@ -199,7 +199,7 @@ chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
}
static void *
chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool dalloc_node)
@@ -221,8 +221,8 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
malloc_mutex_lock(tsd, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsd, arena, chunk_hooks);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, false,
@@ -234,7 +234,7 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) {
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
@@ -253,7 +253,7 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
/* Remove node from the tree. */
@@ -273,19 +273,19 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL)
arena_node_dalloc(tsd, arena, node);
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
chunk_record(tsd, arena, chunk_hooks, chunks_szad,
arena_node_dalloc(tsdn, arena, node);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
chunks_ad, cache, ret, size + trailsize, zeroed,
committed);
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
node = arena_node_alloc(tsd, arena);
node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
chunk_record(tsd, arena, chunk_hooks,
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks,
chunks_szad, chunks_ad, cache, ret, size +
trailsize, zeroed, committed);
return (NULL);
@@ -299,16 +299,16 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
node = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
chunk_record(tsd, arena, chunk_hooks, chunks_szad, chunks_ad,
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad,
cache, ret, size, zeroed, committed);
return (NULL);
}
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
arena_node_dalloc(tsd, arena, node);
arena_node_dalloc(tsdn, arena, node);
if (*zero) {
if (!zeroed)
memset(ret, 0, size);
@@ -331,7 +331,7 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
* them if they are returned.
*/
static void *
chunk_alloc_core(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
{
void *ret;
@@ -343,7 +343,7 @@ chunk_alloc_core(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
chunk_alloc_dss(tsd, arena, new_addr, size, alignment, zero,
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL)
return (ret);
/* mmap. */
@@ -352,7 +352,7 @@ chunk_alloc_core(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
chunk_alloc_dss(tsd, arena, new_addr, size, alignment, zero,
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL)
return (ret);
@@ -383,7 +383,7 @@ chunk_alloc_base(size_t size)
}
void *
chunk_alloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
void *ret;
@@ -395,9 +395,9 @@ chunk_alloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert((alignment & chunksize_mask) == 0);
commit = true;
ret = chunk_recycle(tsd, arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
&commit, dalloc_node);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
new_addr, size, alignment, zero, &commit, dalloc_node);
if (ret == NULL)
return (NULL);
assert(commit);
@@ -407,11 +407,11 @@ chunk_alloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static arena_t *
chunk_arena_get(tsd_t *tsd, unsigned arena_ind)
chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
{
arena_t *arena;
arena = arena_get(tsd, arena_ind, false);
arena = arena_get(tsdn, arena_ind, false);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
@@ -425,12 +425,12 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
{
void *ret;
tsd_t *tsd;
tsdn_t *tsdn;
arena_t *arena;
tsd = tsd_fetch();
arena = chunk_arena_get(tsd, arena_ind);
ret = chunk_alloc_core(tsd, arena, new_addr, size, alignment, zero,
tsdn = tsdn_fetch();
arena = chunk_arena_get(tsdn, arena_ind);
ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
if (ret == NULL)
return (NULL);
@@ -441,7 +441,7 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
}
static void *
chunk_alloc_retained(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
@@ -451,7 +451,7 @@ chunk_alloc_retained(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(tsd, arena, chunk_hooks,
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, zero, commit, true);
@@ -462,14 +462,14 @@ chunk_alloc_retained(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
void *
chunk_alloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
chunk_hooks_assure_initialized(tsd, arena, chunk_hooks);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
ret = chunk_alloc_retained(tsd, arena, chunk_hooks, new_addr, size,
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
alignment, zero, commit);
if (ret == NULL) {
ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
@@ -484,7 +484,7 @@ chunk_alloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static void
chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed)
{
@@ -496,8 +496,8 @@ chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
malloc_mutex_lock(tsd, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsd, arena, chunk_hooks);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
@@ -522,7 +522,7 @@ chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
node = arena_node_alloc(tsd, arena);
node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
/*
* Node allocation failed, which is an exceedingly
@@ -531,7 +531,7 @@ chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
* a virtual memory leak.
*/
if (cache) {
chunk_purge_wrapper(tsd, arena, chunk_hooks,
chunk_purge_wrapper(tsdn, arena, chunk_hooks,
chunk, size, 0, size);
}
goto label_return;
@@ -568,15 +568,15 @@ chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
arena_node_dalloc(tsd, arena, prev);
arena_node_dalloc(tsdn, arena, prev);
}
label_return:
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
}
void
chunk_dalloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed)
{
@@ -585,9 +585,9 @@ chunk_dalloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_record(tsd, arena, chunk_hooks, &arena->chunks_szad_cached,
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, chunk, size, false, committed);
arena_maybe_purge(tsd, arena);
arena_maybe_purge(tsdn, arena);
}
static bool
@@ -595,13 +595,13 @@ chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
if (!have_dss || !chunk_in_dss(tsd_fetch(), chunk))
if (!have_dss || !chunk_in_dss(tsdn_fetch(), chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
chunk_dalloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed)
{
@@ -610,7 +610,7 @@ chunk_dalloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_hooks_assure_initialized(tsd, arena, chunk_hooks);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
/* Try to deallocate. */
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
return;
@@ -621,7 +621,7 @@ chunk_dalloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
chunk_record(tsd, arena, chunk_hooks, &arena->chunks_szad_retained,
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
if (config_stats)
@@ -662,11 +662,11 @@ chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
}
bool
chunk_purge_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, size_t offset, size_t length)
{
chunk_hooks_assure_initialized(tsd, arena, chunk_hooks);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
}
@@ -688,8 +688,8 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
if (!maps_coalesce)
return (true);
if (have_dss) {
tsd_t *tsd = tsd_fetch();
if (chunk_in_dss(tsd, chunk_a) != chunk_in_dss(tsd, chunk_b))
tsdn_t *tsdn = tsdn_fetch();
if (chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn, chunk_b))
return (true);
}
@@ -700,7 +700,7 @@ static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
return ((rtree_node_elm_t *)base_alloc(tsd_fetch(), nelms *
return ((rtree_node_elm_t *)base_alloc(tsdn_fetch(), nelms *
sizeof(rtree_node_elm_t)));
}
@@ -747,22 +747,22 @@ chunk_boot(void)
}
void
chunk_prefork(tsd_t *tsd)
chunk_prefork(tsdn_t *tsdn)
{
chunk_dss_prefork(tsd);
chunk_dss_prefork(tsdn);
}
void
chunk_postfork_parent(tsd_t *tsd)
chunk_postfork_parent(tsdn_t *tsdn)
{
chunk_dss_postfork_parent(tsd);
chunk_dss_postfork_parent(tsdn);
}
void
chunk_postfork_child(tsd_t *tsd)
chunk_postfork_child(tsdn_t *tsdn)
{
chunk_dss_postfork_child(tsd);
chunk_dss_postfork_child(tsdn);
}

View File

@@ -41,32 +41,32 @@ chunk_dss_sbrk(intptr_t increment)
}
dss_prec_t
chunk_dss_prec_get(tsd_t *tsd)
chunk_dss_prec_get(tsdn_t *tsdn)
{
dss_prec_t ret;
if (!have_dss)
return (dss_prec_disabled);
malloc_mutex_lock(tsd, &dss_mtx);
malloc_mutex_lock(tsdn, &dss_mtx);
ret = dss_prec_default;
malloc_mutex_unlock(tsd, &dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
return (ret);
}
bool
chunk_dss_prec_set(tsd_t *tsd, dss_prec_t dss_prec)
chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec)
{
if (!have_dss)
return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(tsd, &dss_mtx);
malloc_mutex_lock(tsdn, &dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(tsd, &dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
return (false);
}
void *
chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit)
{
cassert(have_dss);
@@ -80,7 +80,7 @@ chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
if ((intptr_t)size < 0)
return (NULL);
malloc_mutex_lock(tsd, &dss_mtx);
malloc_mutex_lock(tsdn, &dss_mtx);
if (dss_prev != (void *)-1) {
/*
@@ -122,7 +122,7 @@ chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
/* Wrap-around. */
malloc_mutex_unlock(tsd, &dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
return (NULL);
}
incr = gap_size + cpad_size + size;
@@ -130,11 +130,11 @@ chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
if (dss_prev == dss_max) {
/* Success. */
dss_max = dss_next;
malloc_mutex_unlock(tsd, &dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
if (cpad_size != 0) {
chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(tsd, arena,
chunk_dalloc_wrapper(tsdn, arena,
&chunk_hooks, cpad, cpad_size,
false, true);
}
@@ -149,25 +149,25 @@ chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
}
} while (dss_prev != (void *)-1);
}
malloc_mutex_unlock(tsd, &dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
return (NULL);
}
bool
chunk_in_dss(tsd_t *tsd, void *chunk)
chunk_in_dss(tsdn_t *tsdn, void *chunk)
{
bool ret;
cassert(have_dss);
malloc_mutex_lock(tsd, &dss_mtx);
malloc_mutex_lock(tsdn, &dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(tsd, &dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
return (ret);
}
@@ -188,27 +188,27 @@ chunk_dss_boot(void)
}
void
chunk_dss_prefork(tsd_t *tsd)
chunk_dss_prefork(tsdn_t *tsdn)
{
if (have_dss)
malloc_mutex_prefork(tsd, &dss_mtx);
malloc_mutex_prefork(tsdn, &dss_mtx);
}
void
chunk_dss_postfork_parent(tsd_t *tsd)
chunk_dss_postfork_parent(tsdn_t *tsdn)
{
if (have_dss)
malloc_mutex_postfork_parent(tsd, &dss_mtx);
malloc_mutex_postfork_parent(tsdn, &dss_mtx);
}
void
chunk_dss_postfork_child(tsd_t *tsd)
chunk_dss_postfork_child(tsdn_t *tsdn)
{
if (have_dss)
malloc_mutex_postfork_child(tsd, &dss_mtx);
malloc_mutex_postfork_child(tsdn, &dss_mtx);
}
/******************************************************************************/

View File

@@ -40,8 +40,8 @@
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
static bool ckh_grow(tsdn_t *tsdn, ckh_t *ckh);
static void ckh_shrink(tsdn_t *tsdn, ckh_t *ckh);
/******************************************************************************/
@@ -244,7 +244,7 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
}
static bool
ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh_grow(tsdn_t *tsdn, ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
@@ -270,8 +270,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ret = true;
goto label_return;
}
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
true, arena_ichoose(tsd, NULL));
tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL,
true, arena_ichoose(tsdn, NULL));
if (tab == NULL) {
ret = true;
goto label_return;
@@ -283,12 +283,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd, tab, NULL, true, true);
idalloctm(tsdn, tab, NULL, true, true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd, ckh->tab, NULL, true, true);
idalloctm(tsdn, ckh->tab, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
@@ -299,7 +299,7 @@ label_return:
}
static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh_shrink(tsdn_t *tsdn, ckh_t *ckh)
{
ckhc_t *tab, *ttab;
size_t usize;
@@ -314,8 +314,8 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return;
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
arena_ichoose(tsd, NULL));
tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL, true,
arena_ichoose(tsdn, NULL));
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -330,7 +330,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd, tab, NULL, true, true);
idalloctm(tsdn, tab, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -338,7 +338,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd, ckh->tab, NULL, true, true);
idalloctm(tsdn, ckh->tab, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -347,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
bool
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_new(tsdn_t *tsdn, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp)
{
bool ret;
@@ -391,8 +391,8 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ret = true;
goto label_return;
}
ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
arena_ichoose(tsd, NULL));
ckh->tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL,
true, arena_ichoose(tsdn, NULL));
if (ckh->tab == NULL) {
ret = true;
goto label_return;
@@ -404,7 +404,7 @@ label_return:
}
void
ckh_delete(tsd_t *tsd, ckh_t *ckh)
ckh_delete(tsdn_t *tsdn, ckh_t *ckh)
{
assert(ckh != NULL);
@@ -421,7 +421,7 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
idalloctm(tsd, ckh->tab, NULL, true, true);
idalloctm(tsdn, ckh->tab, NULL, true, true);
if (config_debug)
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
}
@@ -456,7 +456,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
}
bool
ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
ckh_insert(tsdn_t *tsdn, ckh_t *ckh, const void *key, const void *data)
{
bool ret;
@@ -468,7 +468,7 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
#endif
while (ckh_try_insert(ckh, &key, &data)) {
if (ckh_grow(tsd, ckh)) {
if (ckh_grow(tsdn, ckh)) {
ret = true;
goto label_return;
}
@@ -480,7 +480,7 @@ label_return:
}
bool
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
ckh_remove(tsdn_t *tsdn, ckh_t *ckh, const void *searchkey, void **key,
void **data)
{
size_t cell;
@@ -502,7 +502,7 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
ckh_shrink(tsd, ckh);
ckh_shrink(tsdn, ckh);
}
return (false);

229
src/ctl.c
View File

@@ -46,20 +46,20 @@ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
static const ctl_named_node_t *n##_index(tsd_t *tsd, \
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
const size_t *mib, size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
static void ctl_arena_stats_amerge(tsd_t *tsd, ctl_arena_stats_t *cstats,
static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
static void ctl_arena_refresh(tsd_t *tsd, arena_t *arena, unsigned i);
static bool ctl_grow(tsd_t *tsd);
static void ctl_refresh(tsd_t *tsd);
static bool ctl_init(tsd_t *tsd);
static int ctl_lookup(tsd_t *tsd, const char *name,
static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
static bool ctl_grow(tsdn_t *tsdn);
static void ctl_refresh(tsdn_t *tsdn);
static bool ctl_init(tsdn_t *tsdn);
static int ctl_lookup(tsdn_t *tsdn, const char *name,
ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
CTL_PROTO(version)
@@ -117,7 +117,7 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
static void arena_i_purge(tsd_t *tsd, unsigned arena_ind, bool all);
static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all);
CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_decay)
CTL_PROTO(arena_i_reset)
@@ -560,12 +560,12 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
}
static void
ctl_arena_stats_amerge(tsd_t *tsd, ctl_arena_stats_t *cstats, arena_t *arena)
ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
if (config_stats) {
arena_stats_merge(tsd, arena, &cstats->nthreads, &cstats->dss,
arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
&cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty, &cstats->astats,
cstats->bstats, cstats->lstats, cstats->hstats);
@@ -578,7 +578,7 @@ ctl_arena_stats_amerge(tsd_t *tsd, ctl_arena_stats_t *cstats, arena_t *arena)
cstats->nrequests_small += cstats->bstats[i].nrequests;
}
} else {
arena_basic_stats_merge(tsd, arena, &cstats->nthreads,
arena_basic_stats_merge(tsdn, arena, &cstats->nthreads,
&cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty);
}
@@ -656,24 +656,24 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
}
static void
ctl_arena_refresh(tsd_t *tsd, arena_t *arena, unsigned i)
ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i)
{
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats);
ctl_arena_stats_amerge(tsd, astats, arena);
ctl_arena_stats_amerge(tsdn, astats, arena);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge(sstats, astats);
}
static bool
ctl_grow(tsd_t *tsd)
ctl_grow(tsdn_t *tsdn)
{
ctl_arena_stats_t *astats;
/* Initialize new arena. */
if (arena_init(tsd, ctl_stats.narenas) == NULL)
if (arena_init(tsdn, ctl_stats.narenas) == NULL)
return (true);
/* Allocate extended arena stats. */
@@ -708,7 +708,7 @@ ctl_grow(tsd_t *tsd)
}
static void
ctl_refresh(tsd_t *tsd)
ctl_refresh(tsdn_t *tsdn)
{
unsigned i;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
@@ -720,19 +720,19 @@ ctl_refresh(tsd_t *tsd)
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
for (i = 0; i < ctl_stats.narenas; i++)
tarenas[i] = arena_get(tsd, i, false);
tarenas[i] = arena_get(tsdn, i, false);
for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized;
if (initialized)
ctl_arena_refresh(tsd, tarenas[i], i);
ctl_arena_refresh(tsdn, tarenas[i], i);
}
if (config_stats) {
size_t base_allocated, base_resident, base_mapped;
base_stats_get(tsd, &base_allocated, &base_resident,
base_stats_get(tsdn, &base_allocated, &base_resident,
&base_mapped);
ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
@@ -758,11 +758,11 @@ ctl_refresh(tsd_t *tsd)
}
static bool
ctl_init(tsd_t *tsd)
ctl_init(tsdn_t *tsdn)
{
bool ret;
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsdn, &ctl_mtx);
if (!ctl_initialized) {
/*
* Allocate space for one extra arena stats element, which
@@ -804,18 +804,18 @@ ctl_init(tsd_t *tsd)
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
ctl_refresh(tsd);
ctl_refresh(tsdn);
ctl_initialized = true;
}
ret = false;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
}
static int
ctl_lookup(tsd_t *tsd, const char *name, ctl_node_t const **nodesp,
ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
size_t *mibp, size_t *depthp)
{
int ret;
@@ -868,7 +868,7 @@ ctl_lookup(tsd_t *tsd, const char *name, ctl_node_t const **nodesp,
}
inode = ctl_indexed_node(node->children);
node = inode->index(tsd, mibp, *depthp, (size_t)index);
node = inode->index(tsdn, mibp, *depthp, (size_t)index);
if (node == NULL) {
ret = ENOENT;
goto label_return;
@@ -921,13 +921,13 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init(tsd)) {
if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
ret = EAGAIN;
goto label_return;
}
depth = CTL_MAX_DEPTH;
ret = ctl_lookup(tsd, name, nodes, mib, &depth);
ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
if (ret != 0)
goto label_return;
@@ -944,16 +944,16 @@ label_return:
}
int
ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp)
ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
if (!ctl_initialized && ctl_init(tsd)) {
if (!ctl_initialized && ctl_init(tsdn)) {
ret = EAGAIN;
goto label_return;
}
ret = ctl_lookup(tsd, name, NULL, mibp, miblenp);
ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
label_return:
return(ret);
}
@@ -966,7 +966,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
const ctl_named_node_t *node;
size_t i;
if (!ctl_initialized && ctl_init(tsd)) {
if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
ret = EAGAIN;
goto label_return;
}
@@ -988,7 +988,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
node = inode->index(tsd, mib, miblen, mib[i]);
node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
@@ -1021,24 +1021,24 @@ ctl_boot(void)
}
void
ctl_prefork(tsd_t *tsd)
ctl_prefork(tsdn_t *tsdn)
{
malloc_mutex_prefork(tsd, &ctl_mtx);
malloc_mutex_prefork(tsdn, &ctl_mtx);
}
void
ctl_postfork_parent(tsd_t *tsd)
ctl_postfork_parent(tsdn_t *tsdn)
{
malloc_mutex_postfork_parent(tsd, &ctl_mtx);
malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
}
void
ctl_postfork_child(tsd_t *tsd)
ctl_postfork_child(tsdn_t *tsdn)
{
malloc_mutex_postfork_child(tsd, &ctl_mtx);
malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
/******************************************************************************/
@@ -1104,7 +1104,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
if (!(c)) \
return (ENOENT); \
if (l) \
malloc_mutex_lock(tsd, &ctl_mtx); \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
@@ -1112,7 +1112,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
ret = 0; \
label_return: \
if (l) \
malloc_mutex_unlock(tsd, &ctl_mtx); \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
}
@@ -1126,14 +1126,14 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
\
if (!(c)) \
return (ENOENT); \
malloc_mutex_lock(tsd, &ctl_mtx); \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(tsd, &ctl_mtx); \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
}
@@ -1145,14 +1145,14 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
int ret; \
t oldval; \
\
malloc_mutex_lock(tsd, &ctl_mtx); \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(tsd, &ctl_mtx); \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
}
@@ -1243,15 +1243,15 @@ epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
UNUSED uint64_t newval;
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(newval, uint64_t);
if (newp != NULL)
ctl_refresh(tsd);
ctl_refresh(tsd_tsdn(tsd));
READ(ctl_epoch, uint64_t);
ret = 0;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
@@ -1317,7 +1317,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (oldarena == NULL)
return (EAGAIN);
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
newind = oldind = oldarena->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
@@ -1331,7 +1331,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
/* Initialize arena if necessary. */
newarena = arena_get(tsd, newind, true);
newarena = arena_get(tsd_tsdn(tsd), newind, true);
if (newarena == NULL) {
ret = EAGAIN;
goto label_return;
@@ -1341,15 +1341,15 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (config_tcache) {
tcache_t *tcache = tsd_tcache_get(tsd);
if (tcache != NULL) {
tcache_arena_reassociate(tsd, tcache, oldarena,
newarena);
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
oldarena, newarena);
}
}
}
ret = 0;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
@@ -1476,9 +1476,9 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (!config_tcache)
return (ENOENT);
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (tcaches_create(tsd, &tcache_ind)) {
if (tcaches_create(tsd_tsdn(tsd), &tcache_ind)) {
ret = EFAULT;
goto label_return;
}
@@ -1486,7 +1486,7 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
@@ -1541,10 +1541,10 @@ label_return:
/******************************************************************************/
static void
arena_i_purge(tsd_t *tsd, unsigned arena_ind, bool all)
arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
{
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsdn, &ctl_mtx);
{
unsigned narenas = ctl_stats.narenas;
@@ -1553,30 +1553,30 @@ arena_i_purge(tsd_t *tsd, unsigned arena_ind, bool all)
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
for (i = 0; i < narenas; i++)
tarenas[i] = arena_get(tsd, i, false);
tarenas[i] = arena_get(tsdn, i, false);
/*
* No further need to hold ctl_mtx, since narenas and
* tarenas contain everything needed below.
*/
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsdn, &ctl_mtx);
for (i = 0; i < narenas; i++) {
if (tarenas[i] != NULL)
arena_purge(tsd, tarenas[i], all);
arena_purge(tsdn, tarenas[i], all);
}
} else {
arena_t *tarena;
assert(arena_ind < narenas);
tarena = arena_get(tsd, arena_ind, false);
tarena = arena_get(tsdn, arena_ind, false);
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsdn, &ctl_mtx);
if (tarena != NULL)
arena_purge(tsd, tarena, all);
arena_purge(tsdn, tarena, all);
}
}
}
@@ -1589,7 +1589,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
READONLY();
WRITEONLY();
arena_i_purge(tsd, (unsigned)mib[1], true);
arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true);
ret = 0;
label_return:
@@ -1604,7 +1604,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
READONLY();
WRITEONLY();
arena_i_purge(tsd, (unsigned)mib[1], false);
arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false);
ret = 0;
label_return:
@@ -1630,13 +1630,13 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
arena_ind = (unsigned)mib[1];
if (config_debug) {
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
assert(arena_ind < ctl_stats.narenas);
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
}
assert(arena_ind >= opt_narenas);
arena = arena_get(tsd, arena_ind, false);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
arena_reset(tsd, arena);
@@ -1655,7 +1655,7 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(dss, const char *);
if (dss != NULL) {
int i;
@@ -1676,20 +1676,20 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
if (arena_ind < ctl_stats.narenas) {
arena_t *arena = arena_get(tsd, arena_ind, false);
arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL || (dss_prec != dss_prec_limit &&
arena_dss_prec_set(tsd, arena, dss_prec))) {
arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
ret = EFAULT;
goto label_return;
}
dss_prec_old = arena_dss_prec_get(tsd, arena);
dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
} else {
if (dss_prec != dss_prec_limit &&
chunk_dss_prec_set(tsd, dss_prec)) {
chunk_dss_prec_set(tsd_tsdn(tsd), dss_prec)) {
ret = EFAULT;
goto label_return;
}
dss_prec_old = chunk_dss_prec_get(tsd);
dss_prec_old = chunk_dss_prec_get(tsd_tsdn(tsd));
}
dss = dss_prec_names[dss_prec_old];
@@ -1697,7 +1697,7 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
@@ -1709,14 +1709,14 @@ arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
arena = arena_get(tsd, arena_ind, false);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_lg_dirty_mult_get(tsd, arena);
size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -1724,7 +1724,8 @@ arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL;
goto label_return;
}
if (arena_lg_dirty_mult_set(tsd, arena, *(ssize_t *)newp)) {
if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -1743,14 +1744,14 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
arena = arena_get(tsd, arena_ind, false);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_get(tsd, arena);
size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -1758,7 +1759,8 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
if (arena_decay_time_set(tsd, arena, *(ssize_t *)newp)) {
if (arena_decay_time_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -1777,18 +1779,18 @@ arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
if (arena_ind < narenas_total_get() && (arena =
arena_get(tsd, arena_ind, false)) != NULL) {
arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
if (newp != NULL) {
chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
WRITE(new_chunk_hooks, chunk_hooks_t);
old_chunk_hooks = chunk_hooks_set(tsd, arena,
old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena,
&new_chunk_hooks);
READ(old_chunk_hooks, chunk_hooks_t);
} else {
chunk_hooks_t old_chunk_hooks = chunk_hooks_get(tsd,
arena);
chunk_hooks_t old_chunk_hooks =
chunk_hooks_get(tsd_tsdn(tsd), arena);
READ(old_chunk_hooks, chunk_hooks_t);
}
} else {
@@ -1797,16 +1799,16 @@ arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
ret = 0;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static const ctl_named_node_t *
arena_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t *ret;
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsdn, &ctl_mtx);
if (i > ctl_stats.narenas) {
ret = NULL;
goto label_return;
@@ -1814,7 +1816,7 @@ arena_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
ret = super_arena_i_node;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
}
@@ -1827,7 +1829,7 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned narenas;
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
@@ -1838,7 +1840,7 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
@@ -1849,7 +1851,7 @@ arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned nread, i;
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL;
@@ -1864,7 +1866,7 @@ arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
@@ -1929,7 +1931,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
static const ctl_named_node_t *
arenas_bin_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > NBINS)
@@ -1940,7 +1942,7 @@ arenas_bin_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
static const ctl_named_node_t *
arenas_lrun_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > nlclasses)
@@ -1952,7 +1954,7 @@ CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
size_t)
static const ctl_named_node_t *
arenas_hchunk_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > nhclasses)
@@ -1967,9 +1969,9 @@ arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned narenas;
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (ctl_grow(tsd)) {
if (ctl_grow(tsd_tsdn(tsd))) {
ret = EAGAIN;
goto label_return;
}
@@ -1978,7 +1980,7 @@ arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
@@ -1999,9 +2001,10 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL;
goto label_return;
}
oldval = prof_thread_active_init_set(tsd, *(bool *)newp);
oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
*(bool *)newp);
} else
oldval = prof_thread_active_init_get(tsd);
oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
READ(oldval, bool);
ret = 0;
@@ -2024,9 +2027,9 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
oldval = prof_active_set(tsd, *(bool *)newp);
oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
} else
oldval = prof_active_get(tsd);
oldval = prof_active_get(tsd_tsdn(tsd));
READ(oldval, bool);
ret = 0;
@@ -2072,9 +2075,9 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
oldval = prof_gdump_set(tsd, *(bool *)newp);
oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
} else
oldval = prof_gdump_get(tsd);
oldval = prof_gdump_get(tsd_tsdn(tsd));
READ(oldval, bool);
ret = 0;
@@ -2097,7 +2100,7 @@ prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (lg_sample >= (sizeof(uint64_t) << 3))
lg_sample = (sizeof(uint64_t) << 3) - 1;
prof_reset(tsd, lg_sample);
prof_reset(tsd_tsdn(tsd), lg_sample);
ret = 0;
label_return:
@@ -2185,7 +2188,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsd_t *tsd, const size_t *mib, size_t miblen,
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{
@@ -2204,7 +2207,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(tsd_t *tsd, const size_t *mib, size_t miblen,
stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{
@@ -2224,7 +2227,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
static const ctl_named_node_t *
stats_arenas_i_hchunks_j_index(tsd_t *tsd, const size_t *mib, size_t miblen,
stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{
@@ -2234,11 +2237,11 @@ stats_arenas_i_hchunks_j_index(tsd_t *tsd, const size_t *mib, size_t miblen,
}
static const ctl_named_node_t *
stats_arenas_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
malloc_mutex_lock(tsd, &ctl_mtx);
malloc_mutex_lock(tsdn, &ctl_mtx);
if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
ret = NULL;
goto label_return;
@@ -2246,6 +2249,6 @@ stats_arenas_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
ret = super_stats_arenas_i_node;
label_return:
malloc_mutex_unlock(tsd, &ctl_mtx);
malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
}

View File

@@ -15,12 +15,12 @@ huge_node_get(const void *ptr)
}
static bool
huge_node_set(tsd_t *tsd, const void *ptr, extent_node_t *node)
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
return (chunk_register(tsd, ptr, node));
return (chunk_register(tsdn, ptr, node));
}
static void
@@ -31,16 +31,16 @@ huge_node_unset(const void *ptr, const extent_node_t *node)
}
void *
huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero)
huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
{
assert(usize == s2u(usize));
return (huge_palloc(tsd, arena, usize, chunksize, zero));
return (huge_palloc(tsdn, arena, usize, chunksize, zero));
}
void *
huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero)
{
void *ret;
@@ -50,15 +50,17 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
/* Allocate one or more contiguous chunks for this request. */
assert(!tsdn_null(tsdn) || arena != NULL);
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
return (NULL);
assert(ausize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
assert(tsd != NULL || arena != NULL);
node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, NULL, true, arena_ichoose(tsd, arena));
assert(tsdn != NULL || arena != NULL);
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena));
if (node == NULL)
return (NULL);
@@ -67,26 +69,26 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsd, arena,
usize, alignment, &is_zeroed)) == NULL) {
idalloctm(tsd, node, NULL, true, true);
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
extent_node_init(node, arena, ret, usize, is_zeroed, true);
if (huge_node_set(tsd, ret, node)) {
arena_chunk_dalloc_huge(tsd, arena, ret, usize);
idalloctm(tsd, node, NULL, true, true);
if (huge_node_set(tsdn, ret, node)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
/* Insert node into huge. */
malloc_mutex_lock(tsd, &arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->huge, node, ql_link);
malloc_mutex_unlock(tsd, &arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed)
@@ -94,7 +96,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
} else if (config_fill && unlikely(opt_junk_alloc))
memset(ret, JEMALLOC_ALLOC_JUNK, usize);
arena_decay_tick(tsd, arena);
arena_decay_tick(tsdn, arena);
return (ret);
}
@@ -103,7 +105,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
static void
huge_dalloc_junk(tsd_t *tsd, void *ptr, size_t usize)
huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
{
if (config_fill && have_dss && unlikely(opt_junk_free)) {
@@ -111,7 +113,7 @@ huge_dalloc_junk(tsd_t *tsd, void *ptr, size_t usize)
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
if (!config_munmap || (have_dss && chunk_in_dss(tsd, ptr)))
if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr)))
memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
}
@@ -122,7 +124,7 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
huge_ralloc_no_move_similar(tsd_t *tsd, void *ptr, size_t oldsize,
huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero)
{
size_t usize, usize_next;
@@ -151,22 +153,22 @@ huge_ralloc_no_move_similar(tsd_t *tsd, void *ptr, size_t oldsize,
JEMALLOC_FREE_JUNK, sdiff);
post_zeroed = false;
} else {
post_zeroed = !chunk_purge_wrapper(tsd, arena,
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
&chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
sdiff);
}
} else
post_zeroed = pre_zeroed;
malloc_mutex_lock(tsd, &arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsd, &arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
arena_chunk_ralloc_huge_similar(tsd, arena, ptr, oldsize, usize);
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
/* Fill if necessary (growing). */
if (oldsize < usize) {
@@ -183,7 +185,8 @@ huge_ralloc_no_move_similar(tsd_t *tsd, void *ptr, size_t oldsize,
}
static bool
huge_ralloc_no_move_shrink(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize)
huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize)
{
extent_node_t *node;
arena_t *arena;
@@ -194,7 +197,7 @@ huge_ralloc_no_move_shrink(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
chunk_hooks = chunk_hooks_get(tsd, arena);
chunk_hooks = chunk_hooks_get(tsdn, arena);
assert(oldsize > usize);
@@ -207,11 +210,11 @@ huge_ralloc_no_move_shrink(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize)
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
huge_dalloc_junk(tsd, (void *)((uintptr_t)ptr + usize),
huge_dalloc_junk(tsdn, (void *)((uintptr_t)ptr + usize),
sdiff);
post_zeroed = false;
} else {
post_zeroed = !chunk_purge_wrapper(tsd, arena,
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
&chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
usize), CHUNK_CEILING(oldsize),
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
@@ -219,31 +222,31 @@ huge_ralloc_no_move_shrink(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize)
} else
post_zeroed = pre_zeroed;
malloc_mutex_lock(tsd, &arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
extent_node_size_set(node, usize);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsd, &arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* Zap the excess chunks. */
arena_chunk_ralloc_huge_shrink(tsd, arena, ptr, oldsize, usize);
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
return (false);
}
static bool
huge_ralloc_no_move_expand(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize,
bool zero) {
huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize, bool zero) {
extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(tsd, &arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
is_zeroed_subchunk = extent_node_zeroed_get(node);
malloc_mutex_unlock(tsd, &arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/*
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
@@ -251,14 +254,14 @@ huge_ralloc_no_move_expand(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize,
*/
is_zeroed_chunk = zero;
if (arena_chunk_ralloc_huge_expand(tsd, arena, ptr, oldsize, usize,
if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
&is_zeroed_chunk))
return (true);
malloc_mutex_lock(tsd, &arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
extent_node_size_set(node, usize);
malloc_mutex_unlock(tsd, &arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) {
@@ -279,7 +282,7 @@ huge_ralloc_no_move_expand(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize,
}
bool
huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
@@ -293,16 +296,16 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
/* Attempt to expand the allocation in-place. */
if (!huge_ralloc_no_move_expand(tsd, ptr, oldsize, usize_max,
if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
zero)) {
arena_decay_tick(tsd, huge_aalloc(ptr));
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsd,
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
ptr, oldsize, usize_min, zero)) {
arena_decay_tick(tsd, huge_aalloc(ptr));
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
}
@@ -313,16 +316,17 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
*/
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
huge_ralloc_no_move_similar(tsd, ptr, oldsize, usize_min,
huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
usize_max, zero);
arena_decay_tick(tsd, huge_aalloc(ptr));
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
/* Attempt to shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
if (!huge_ralloc_no_move_shrink(tsd, ptr, oldsize, usize_max)) {
arena_decay_tick(tsd, huge_aalloc(ptr));
if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
usize_max)) {
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
}
@@ -330,18 +334,18 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
}
static void *
huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero)
{
if (alignment <= chunksize)
return (huge_malloc(tsd, arena, usize, zero));
return (huge_palloc(tsd, arena, usize, alignment, zero));
return (huge_malloc(tsdn, arena, usize, zero));
return (huge_palloc(tsdn, arena, usize, alignment, zero));
}
void *
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
size_t copysize;
@@ -350,7 +354,8 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
assert(usize > 0 && usize <= HUGE_MAXCLASS);
/* Try to avoid moving the allocation. */
if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
zero))
return (ptr);
/*
@@ -358,7 +363,8 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
* different size class. In that case, fall back to allocating new
* space and copying.
*/
ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero);
ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
zero);
if (ret == NULL)
return (NULL);
@@ -369,7 +375,7 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
}
void
huge_dalloc(tsd_t *tsd, void *ptr)
huge_dalloc(tsdn_t *tsdn, void *ptr)
{
extent_node_t *node;
arena_t *arena;
@@ -377,17 +383,17 @@ huge_dalloc(tsd_t *tsd, void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
huge_node_unset(ptr, node);
malloc_mutex_lock(tsd, &arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_remove(&arena->huge, node, ql_link);
malloc_mutex_unlock(tsd, &arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
huge_dalloc_junk(tsd, extent_node_addr_get(node),
huge_dalloc_junk(tsdn, extent_node_addr_get(node),
extent_node_size_get(node));
arena_chunk_dalloc_huge(tsd, extent_node_arena_get(node),
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node));
idalloctm(tsd, node, NULL, true, true);
idalloctm(tsdn, node, NULL, true, true);
arena_decay_tick(tsd, arena);
arena_decay_tick(tsdn, arena);
}
arena_t *
@@ -398,7 +404,7 @@ huge_aalloc(const void *ptr)
}
size_t
huge_salloc(tsd_t *tsd, const void *ptr)
huge_salloc(tsdn_t *tsdn, const void *ptr)
{
size_t size;
extent_node_t *node;
@@ -406,15 +412,15 @@ huge_salloc(tsd_t *tsd, const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(tsd, &arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
size = extent_node_size_get(node);
malloc_mutex_unlock(tsd, &arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (size);
}
prof_tctx_t *
huge_prof_tctx_get(tsd_t *tsd, const void *ptr)
huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
prof_tctx_t *tctx;
extent_node_t *node;
@@ -422,29 +428,29 @@ huge_prof_tctx_get(tsd_t *tsd, const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(tsd, &arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
tctx = extent_node_prof_tctx_get(node);
malloc_mutex_unlock(tsd, &arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (tctx);
}
void
huge_prof_tctx_set(tsd_t *tsd, const void *ptr, prof_tctx_t *tctx)
huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
{
extent_node_t *node;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(tsd, &arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_node_prof_tctx_set(node, tctx);
malloc_mutex_unlock(tsd, &arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
}
void
huge_prof_tctx_reset(tsd_t *tsd, const void *ptr)
huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
{
huge_prof_tctx_set(tsd, ptr, (prof_tctx_t *)(uintptr_t)1U);
huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
}

File diff suppressed because it is too large Load Diff

View File

@@ -109,25 +109,25 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
}
void
malloc_mutex_prefork(tsd_t *tsd, malloc_mutex_t *mutex)
malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
malloc_mutex_lock(tsd, mutex);
malloc_mutex_lock(tsdn, mutex);
}
void
malloc_mutex_postfork_parent(tsd_t *tsd, malloc_mutex_t *mutex)
malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
malloc_mutex_unlock(tsd, mutex);
malloc_mutex_unlock(tsdn, mutex);
}
void
malloc_mutex_postfork_child(tsd_t *tsd, malloc_mutex_t *mutex)
malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock(tsd, mutex);
malloc_mutex_unlock(tsdn, mutex);
#else
if (malloc_mutex_init(mutex, mutex->witness.name,
mutex->witness.rank)) {

File diff suppressed because it is too large Load Diff

View File

@@ -13,24 +13,22 @@
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine);
static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine,
size_t upper_bound);
/******************************************************************************/
static quarantine_t *
quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs)
{
quarantine_t *quarantine;
size_t size;
assert(tsd_nominal(tsd));
size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
sizeof(quarantine_obj_t));
quarantine = (quarantine_t *)iallocztm(tsd, size, size2index(size),
false, NULL, true, arena_get(NULL, 0, true), true);
quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size),
false, NULL, true, arena_get(TSDN_NULL, 0, true), true);
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
@@ -49,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (!tsd_nominal(tsd))
return;
quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT);
/*
* Check again whether quarantine has been initialized, because
* quarantine_init() may have triggered recursive initialization.
@@ -57,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (tsd_quarantine_get(tsd) == NULL)
tsd_quarantine_set(tsd, quarantine);
else
idalloctm(tsd, quarantine, NULL, true, true);
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
}
static quarantine_t *
@@ -65,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_t *ret;
ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1);
if (ret == NULL) {
quarantine_drain_one(tsd, quarantine);
quarantine_drain_one(tsd_tsdn(tsd), quarantine);
return (quarantine);
}
@@ -89,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
idalloctm(tsd, quarantine, NULL, true, true);
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
tsd_quarantine_set(tsd, ret);
return (ret);
}
static void
quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(tsd, obj->ptr, config_prof));
idalloctm(tsd, obj->ptr, NULL, false, true);
assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof));
idalloctm(tsdn, obj->ptr, NULL, false, true);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
@@ -108,24 +106,24 @@ quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
}
static void
quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
quarantine_drain_one(tsd, quarantine);
quarantine_drain_one(tsdn, quarantine);
}
void
quarantine(tsd_t *tsd, void *ptr)
{
quarantine_t *quarantine;
size_t usize = isalloc(tsd, ptr, config_prof);
size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
cassert(config_fill);
assert(opt_quarantine);
if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
idalloctm(tsd, ptr, NULL, false, true);
idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
return;
}
/*
@@ -135,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr)
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
quarantine_drain(tsd, quarantine, upper_bound);
quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
@@ -164,7 +162,7 @@ quarantine(tsd_t *tsd, void *ptr)
}
} else {
assert(quarantine->curbytes == 0);
idalloctm(tsd, ptr, NULL, false, true);
idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
}
}
@@ -178,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd)
quarantine = tsd_quarantine_get(tsd);
if (quarantine != NULL) {
quarantine_drain(tsd, quarantine, 0);
idalloctm(tsd, quarantine, NULL, true, true);
quarantine_drain(tsd_tsdn(tsd), quarantine, 0);
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
tsd_quarantine_set(tsd, NULL);
}
}

View File

@@ -24,10 +24,10 @@ static tcaches_t *tcaches_avail;
/******************************************************************************/
size_t
tcache_salloc(tsd_t *tsd, const void *ptr)
tcache_salloc(tsdn_t *tsdn, const void *ptr)
{
return (arena_salloc(tsd, ptr, false));
return (arena_salloc(tsdn, ptr, false));
}
void
@@ -71,12 +71,12 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
}
void *
tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
{
void *ret;
arena_tcache_fill_small(tsd, arena, tbin, binind, config_prof ?
arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
@@ -107,13 +107,13 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) {
if (arena_prof_accum(tsd, arena,
if (arena_prof_accum(tsd_tsdn(tsd), arena,
tcache->prof_accumbytes))
prof_idump(tsd);
prof_idump(tsd_tsdn(tsd));
tcache->prof_accumbytes = 0;
}
malloc_mutex_lock(tsd, &bin->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (config_stats && bin_arena == arena) {
assert(!merged_stats);
merged_stats = true;
@@ -131,8 +131,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
arena_bitselm_get_mutable(chunk, pageind);
arena_dalloc_bin_junked_locked(tsd, bin_arena,
chunk, ptr, bitselm);
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
bin_arena, chunk, ptr, bitselm);
} else {
/*
* This object was allocated via a different
@@ -144,8 +144,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
ndeferred++;
}
}
malloc_mutex_unlock(tsd, &bin->lock);
arena_decay_ticks(tsd, bin_arena, nflush - ndeferred);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
}
if (config_stats && !merged_stats) {
/*
@@ -153,11 +153,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(tsd, &bin->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(tsd, &bin->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
@@ -190,7 +190,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
if (config_prof)
idump = false;
malloc_mutex_lock(tsd, &locked_arena->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
@@ -213,7 +213,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) ==
locked_arena) {
arena_dalloc_large_junked_locked(tsd,
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
locked_arena, chunk, ptr);
} else {
/*
@@ -226,22 +226,23 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
ndeferred++;
}
}
malloc_mutex_unlock(tsd, &locked_arena->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
if (config_prof && idump)
prof_idump(tsd);
arena_decay_ticks(tsd, locked_arena, nflush - ndeferred);
prof_idump(tsd_tsdn(tsd));
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
ndeferred);
}
if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
malloc_mutex_lock(tsd, &arena->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(tsd, &arena->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
@@ -251,35 +252,26 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
tbin->low_water = tbin->ncached;
}
void
tcache_arena_associate(tsd_t *tsd, tcache_t *tcache, arena_t *arena)
static void
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(tsd, &arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(tsd, &arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
}
}
void
tcache_arena_reassociate(tsd_t *tsd, tcache_t *tcache, arena_t *oldarena,
arena_t *newarena)
{
tcache_arena_dissociate(tsd, tcache, oldarena);
tcache_arena_associate(tsd, tcache, newarena);
}
void
tcache_arena_dissociate(tsd_t *tsd, tcache_t *tcache, arena_t *arena)
static void
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(tsd, &arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_debug) {
bool in_ql = false;
tcache_t *iter;
@@ -292,11 +284,20 @@ tcache_arena_dissociate(tsd_t *tsd, tcache_t *tcache, arena_t *arena)
assert(in_ql);
}
ql_remove(&arena->tcache_ql, tcache, link);
tcache_stats_merge(tsd, tcache, arena);
malloc_mutex_unlock(tsd, &arena->lock);
tcache_stats_merge(tsdn, tcache, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
}
}
void
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
arena_t *newarena)
{
tcache_arena_dissociate(tsdn, tcache, oldarena);
tcache_arena_associate(tsdn, tcache, newarena);
}
tcache_t *
tcache_get_hard(tsd_t *tsd)
{
@@ -310,11 +311,11 @@ tcache_get_hard(tsd_t *tsd)
arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL))
return (NULL);
return (tcache_create(tsd, arena));
return (tcache_create(tsd_tsdn(tsd), arena));
}
tcache_t *
tcache_create(tsd_t *tsd, arena_t *arena)
tcache_create(tsdn_t *tsdn, arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
@@ -328,12 +329,12 @@ tcache_create(tsd_t *tsd, arena_t *arena)
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
tcache = ipallocztm(tsd, size, CACHELINE, true, NULL, true,
arena_get(NULL, 0, true));
tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
arena_get(TSDN_NULL, 0, true));
if (tcache == NULL)
return (NULL);
tcache_arena_associate(tsd, tcache, arena);
tcache_arena_associate(tsdn, tcache, arena);
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
@@ -360,7 +361,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
unsigned i;
arena = arena_choose(tsd, NULL);
tcache_arena_dissociate(tsd, tcache, arena);
tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
@@ -368,9 +369,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
if (config_stats && tbin->tstats.nrequests != 0) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd, &bin->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(tsd, &bin->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
}
@@ -379,19 +380,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
malloc_mutex_lock(tsd, &arena->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(tsd, &arena->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
arena_prof_accum(tsd, arena, tcache->prof_accumbytes))
prof_idump(tsd);
arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
prof_idump(tsd_tsdn(tsd));
idalloctm(tsd, tcache, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
}
void
@@ -416,21 +417,21 @@ tcache_enabled_cleanup(tsd_t *tsd)
}
void
tcache_stats_merge(tsd_t *tsd, tcache_t *tcache, arena_t *arena)
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
unsigned i;
cassert(config_stats);
malloc_mutex_assert_owner(tsd, &arena->lock);
malloc_mutex_assert_owner(tsdn, &arena->lock);
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(tsd, &bin->lock);
malloc_mutex_lock(tsdn, &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(tsd, &bin->lock);
malloc_mutex_unlock(tsdn, &bin->lock);
tbin->tstats.nrequests = 0;
}
@@ -444,14 +445,14 @@ tcache_stats_merge(tsd_t *tsd, tcache_t *tcache, arena_t *arena)
}
bool
tcaches_create(tsd_t *tsd, unsigned *r_ind)
tcaches_create(tsdn_t *tsdn, unsigned *r_ind)
{
arena_t *arena;
tcache_t *tcache;
tcaches_t *elm;
if (tcaches == NULL) {
tcaches = base_alloc(tsd, sizeof(tcache_t *) *
tcaches = base_alloc(tsdn, sizeof(tcache_t *) *
(MALLOCX_TCACHE_MAX+1));
if (tcaches == NULL)
return (true);
@@ -459,10 +460,10 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
return (true);
arena = arena_ichoose(tsd, NULL);
arena = arena_ichoose(tsdn, NULL);
if (unlikely(arena == NULL))
return (true);
tcache = tcache_create(tsd, arena);
tcache = tcache_create(tsdn, arena);
if (tcache == NULL)
return (true);
@@ -508,7 +509,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
}
bool
tcache_boot(tsd_t *tsd)
tcache_boot(tsdn_t *tsdn)
{
unsigned i;
@@ -526,7 +527,7 @@ tcache_boot(tsd_t *tsd)
nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsd, nhbins *
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);

View File

@@ -34,17 +34,19 @@ witness_lock_error_t *witness_lock_error = JEMALLOC_N(witness_lock_error_impl);
#endif
void
witness_lock(tsd_t *tsd, witness_t *witness)
witness_lock(tsdn_t *tsdn, witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (tsd == NULL)
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witness_assert_not_owner(tsd, witness);
witness_assert_not_owner(tsdn, witness);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
@@ -69,16 +71,18 @@ witness_lock(tsd_t *tsd, witness_t *witness)
}
void
witness_unlock(tsd_t *tsd, witness_t *witness)
witness_unlock(tsdn_t *tsdn, witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
if (tsd == NULL)
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witness_assert_owner(tsd, witness);
witness_assert_owner(tsdn, witness);
witnesses = tsd_witnessesp_get(tsd);
ql_remove(witnesses, witness, link);
@@ -104,13 +108,15 @@ witness_owner_error_t *witness_owner_error =
#endif
void
witness_assert_owner(tsd_t *tsd, const witness_t *witness)
witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (tsd == NULL)
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
@@ -142,13 +148,15 @@ witness_not_owner_error_t *witness_not_owner_error =
#endif
void
witness_assert_not_owner(tsd_t *tsd, const witness_t *witness)
witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (tsd == NULL)
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
@@ -183,13 +191,15 @@ witness_lockless_error_t *witness_lockless_error =
#endif
void
witness_assert_lockless(tsd_t *tsd)
witness_assert_lockless(tsdn_t *tsdn)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (tsd == NULL)
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
@@ -202,7 +212,7 @@ void
witnesses_cleanup(tsd_t *tsd)
{
witness_assert_lockless(tsd);
witness_assert_lockless(tsd_tsdn(tsd));
/* Do nothing. */
}

View File

@@ -56,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
return (ivsalloc(tsd_fetch(), ptr, config_prof));
return (ivsalloc(tsdn_fetch(), ptr, config_prof));
}
static void *
@@ -87,7 +87,7 @@ static void
zone_free(malloc_zone_t *zone, void *ptr)
{
if (ivsalloc(tsd_fetch(), ptr, config_prof) != 0) {
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
je_free(ptr);
return;
}
@@ -99,7 +99,7 @@ static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(tsd_fetch(), ptr, config_prof) != 0)
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
@@ -123,7 +123,7 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
size_t alloc_size;
alloc_size = ivsalloc(tsd_fetch(), ptr, config_prof);
alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
if (alloc_size != 0) {
assert(alloc_size == size);
je_free(ptr);