Rename huge to large.
This commit is contained in:
143
src/arena.c
143
src/arena.c
@@ -256,71 +256,71 @@ arena_nactive_sub(arena_t *arena, size_t sub_pages)
|
||||
}
|
||||
|
||||
static void
|
||||
arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
|
||||
arena_large_malloc_stats_update(arena_t *arena, size_t usize)
|
||||
{
|
||||
szind_t index = size2index(usize);
|
||||
szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
arena->stats.nmalloc_huge++;
|
||||
arena->stats.allocated_huge += usize;
|
||||
arena->stats.hstats[hindex].nmalloc++;
|
||||
arena->stats.hstats[hindex].nrequests++;
|
||||
arena->stats.hstats[hindex].curhchunks++;
|
||||
arena->stats.nmalloc_large++;
|
||||
arena->stats.allocated_large += usize;
|
||||
arena->stats.lstats[hindex].nmalloc++;
|
||||
arena->stats.lstats[hindex].nrequests++;
|
||||
arena->stats.lstats[hindex].curlextents++;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
|
||||
arena_large_malloc_stats_update_undo(arena_t *arena, size_t usize)
|
||||
{
|
||||
szind_t index = size2index(usize);
|
||||
szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
arena->stats.nmalloc_huge--;
|
||||
arena->stats.allocated_huge -= usize;
|
||||
arena->stats.hstats[hindex].nmalloc--;
|
||||
arena->stats.hstats[hindex].nrequests--;
|
||||
arena->stats.hstats[hindex].curhchunks--;
|
||||
arena->stats.nmalloc_large--;
|
||||
arena->stats.allocated_large -= usize;
|
||||
arena->stats.lstats[hindex].nmalloc--;
|
||||
arena->stats.lstats[hindex].nrequests--;
|
||||
arena->stats.lstats[hindex].curlextents--;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
|
||||
arena_large_dalloc_stats_update(arena_t *arena, size_t usize)
|
||||
{
|
||||
szind_t index = size2index(usize);
|
||||
szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
arena->stats.ndalloc_huge++;
|
||||
arena->stats.allocated_huge -= usize;
|
||||
arena->stats.hstats[hindex].ndalloc++;
|
||||
arena->stats.hstats[hindex].curhchunks--;
|
||||
arena->stats.ndalloc_large++;
|
||||
arena->stats.allocated_large -= usize;
|
||||
arena->stats.lstats[hindex].ndalloc++;
|
||||
arena->stats.lstats[hindex].curlextents--;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
|
||||
arena_large_reset_stats_cancel(arena_t *arena, size_t usize)
|
||||
{
|
||||
szind_t index = size2index(usize);
|
||||
szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
arena->stats.ndalloc_huge++;
|
||||
arena->stats.hstats[hindex].ndalloc--;
|
||||
arena->stats.ndalloc_large++;
|
||||
arena->stats.lstats[hindex].ndalloc--;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_huge_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize)
|
||||
arena_large_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize)
|
||||
{
|
||||
|
||||
arena_huge_dalloc_stats_update(arena, oldusize);
|
||||
arena_huge_malloc_stats_update(arena, usize);
|
||||
arena_large_dalloc_stats_update(arena, oldusize);
|
||||
arena_large_malloc_stats_update(arena, usize);
|
||||
}
|
||||
|
||||
static extent_t *
|
||||
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_chunk_alloc_large_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero)
|
||||
{
|
||||
extent_t *extent;
|
||||
@@ -332,7 +332,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
/* Revert optimistic stats updates. */
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
if (config_stats) {
|
||||
arena_huge_malloc_stats_update_undo(arena, usize);
|
||||
arena_large_malloc_stats_update_undo(arena, usize);
|
||||
arena->stats.mapped -= usize;
|
||||
}
|
||||
arena_nactive_sub(arena, (usize + large_pad) >> LG_PAGE);
|
||||
@@ -343,7 +343,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
extent_t *
|
||||
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
arena_chunk_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool *zero)
|
||||
{
|
||||
extent_t *extent;
|
||||
@@ -353,7 +353,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
|
||||
/* Optimistically update stats. */
|
||||
if (config_stats) {
|
||||
arena_huge_malloc_stats_update(arena, usize);
|
||||
arena_large_malloc_stats_update(arena, usize);
|
||||
arena->stats.mapped += usize;
|
||||
}
|
||||
arena_nactive_add(arena, (usize + large_pad) >> LG_PAGE);
|
||||
@@ -362,7 +362,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
usize, large_pad, alignment, zero, false);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
if (extent == NULL) {
|
||||
extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
||||
extent = arena_chunk_alloc_large_hard(tsdn, arena, &chunk_hooks,
|
||||
usize, alignment, zero);
|
||||
}
|
||||
|
||||
@@ -370,7 +370,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
}
|
||||
|
||||
void
|
||||
arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
arena_chunk_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
bool locked)
|
||||
{
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
@@ -378,7 +378,8 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
if (!locked)
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
if (config_stats) {
|
||||
arena_huge_dalloc_stats_update(arena, extent_usize_get(extent));
|
||||
arena_large_dalloc_stats_update(arena,
|
||||
extent_usize_get(extent));
|
||||
arena->stats.mapped -= extent_size_get(extent);
|
||||
}
|
||||
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
|
||||
@@ -389,7 +390,7 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
}
|
||||
|
||||
void
|
||||
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
arena_chunk_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
size_t oldusize)
|
||||
{
|
||||
size_t usize = extent_usize_get(extent);
|
||||
@@ -397,7 +398,7 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
if (config_stats) {
|
||||
arena_huge_ralloc_stats_update(arena, oldusize, usize);
|
||||
arena_large_ralloc_stats_update(arena, oldusize, usize);
|
||||
arena->stats.mapped -= udiff;
|
||||
}
|
||||
arena_nactive_sub(arena, udiff >> LG_PAGE);
|
||||
@@ -405,7 +406,7 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
}
|
||||
|
||||
void
|
||||
arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
arena_chunk_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
size_t oldusize)
|
||||
{
|
||||
size_t usize = extent_usize_get(extent);
|
||||
@@ -413,7 +414,7 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
if (config_stats) {
|
||||
arena_huge_ralloc_stats_update(arena, oldusize, usize);
|
||||
arena_large_ralloc_stats_update(arena, oldusize, usize);
|
||||
arena->stats.mapped += udiff;
|
||||
}
|
||||
arena_nactive_add(arena, udiff >> LG_PAGE);
|
||||
@@ -891,26 +892,26 @@ arena_reset(tsd_t *tsd, arena_t *arena)
|
||||
* stats refreshes would impose an inconvenient burden.
|
||||
*/
|
||||
|
||||
/* Huge allocations. */
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
|
||||
for (extent = ql_last(&arena->huge, ql_link); extent != NULL; extent =
|
||||
ql_last(&arena->huge, ql_link)) {
|
||||
/* Large allocations. */
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||
for (extent = ql_last(&arena->large, ql_link); extent != NULL; extent =
|
||||
ql_last(&arena->large, ql_link)) {
|
||||
void *ptr = extent_base_get(extent);
|
||||
size_t usize;
|
||||
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||
if (config_stats || (config_prof && opt_prof))
|
||||
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
|
||||
/* Remove huge allocation from prof sample set. */
|
||||
/* Remove large allocation from prof sample set. */
|
||||
if (config_prof && opt_prof)
|
||||
prof_free(tsd, extent, ptr, usize);
|
||||
huge_dalloc(tsd_tsdn(tsd), extent);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
|
||||
large_dalloc(tsd_tsdn(tsd), extent);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||
/* Cancel out unwanted effects on stats. */
|
||||
if (config_stats)
|
||||
arena_huge_reset_stats_cancel(arena, usize);
|
||||
arena_large_reset_stats_cancel(arena, usize);
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
||||
|
||||
@@ -1283,7 +1284,7 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
||||
|
||||
if (likely(size <= SMALL_MAXCLASS))
|
||||
return (arena_malloc_small(tsdn, arena, ind, zero));
|
||||
return (huge_malloc(tsdn, arena, index2size(ind), zero));
|
||||
return (large_malloc(tsdn, arena, index2size(ind), zero));
|
||||
}
|
||||
|
||||
void *
|
||||
@@ -1299,9 +1300,9 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
tcache, true);
|
||||
} else {
|
||||
if (likely(alignment <= CACHELINE))
|
||||
ret = huge_malloc(tsdn, arena, usize, zero);
|
||||
ret = large_malloc(tsdn, arena, usize, zero);
|
||||
else
|
||||
ret = huge_palloc(tsdn, arena, usize, alignment, zero);
|
||||
ret = large_palloc(tsdn, arena, usize, alignment, zero);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@@ -1360,10 +1361,10 @@ arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
||||
|
||||
usize = arena_prof_demote(tsdn, extent, ptr);
|
||||
if (usize <= tcache_maxclass) {
|
||||
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr, usize,
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, usize,
|
||||
slow_path);
|
||||
} else
|
||||
huge_dalloc(tsdn, extent);
|
||||
large_dalloc(tsdn, extent);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1493,9 +1494,9 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
||||
size_t usize_min, usize_max;
|
||||
|
||||
/* Calls with non-zero extra had to clamp extra. */
|
||||
assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
|
||||
assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
|
||||
|
||||
if (unlikely(size > HUGE_MAXCLASS))
|
||||
if (unlikely(size > LARGE_MAXCLASS))
|
||||
return (true);
|
||||
|
||||
usize_min = s2u(size);
|
||||
@@ -1515,7 +1516,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return (false);
|
||||
} else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
|
||||
return (huge_ralloc_no_move(tsdn, extent, usize_min, usize_max,
|
||||
return (large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
|
||||
zero));
|
||||
}
|
||||
|
||||
@@ -1531,7 +1532,7 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
return (arena_malloc(tsdn, arena, usize, size2index(usize),
|
||||
zero, tcache, true));
|
||||
usize = sa2u(usize, alignment);
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
|
||||
return (NULL);
|
||||
return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
|
||||
}
|
||||
@@ -1544,7 +1545,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
||||
size_t usize, copysize;
|
||||
|
||||
usize = s2u(size);
|
||||
if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
|
||||
if (unlikely(usize == 0 || size > LARGE_MAXCLASS))
|
||||
return (NULL);
|
||||
|
||||
if (likely(usize <= SMALL_MAXCLASS)) {
|
||||
@@ -1555,8 +1556,8 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
||||
}
|
||||
|
||||
if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
|
||||
return (huge_ralloc(tsdn, arena, extent, usize, alignment, zero,
|
||||
tcache));
|
||||
return (large_ralloc(tsdn, arena, extent, usize, alignment,
|
||||
zero, tcache));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1670,7 +1671,7 @@ void
|
||||
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
||||
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
|
||||
malloc_bin_stats_t *bstats, malloc_huge_stats_t *hstats)
|
||||
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
@@ -1687,16 +1688,16 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
astats->purged += arena->stats.purged;
|
||||
astats->metadata_mapped += arena->stats.metadata_mapped;
|
||||
astats->metadata_allocated += arena_metadata_allocated_get(arena);
|
||||
astats->allocated_huge += arena->stats.allocated_huge;
|
||||
astats->nmalloc_huge += arena->stats.nmalloc_huge;
|
||||
astats->ndalloc_huge += arena->stats.ndalloc_huge;
|
||||
astats->nrequests_huge += arena->stats.nrequests_huge;
|
||||
astats->allocated_large += arena->stats.allocated_large;
|
||||
astats->nmalloc_large += arena->stats.nmalloc_large;
|
||||
astats->ndalloc_large += arena->stats.ndalloc_large;
|
||||
astats->nrequests_large += arena->stats.nrequests_large;
|
||||
|
||||
for (i = 0; i < NSIZES - NBINS; i++) {
|
||||
hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
|
||||
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
|
||||
hstats[i].nrequests += arena->stats.hstats[i].nrequests;
|
||||
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
|
||||
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
|
||||
lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
|
||||
lstats[i].nrequests += arena->stats.lstats[i].nrequests;
|
||||
lstats[i].curlextents += arena->stats.lstats[i].curlextents;
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
|
||||
@@ -1786,9 +1787,9 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
||||
if (opt_purge == purge_mode_decay)
|
||||
arena_decay_init(arena, arena_decay_time_default_get());
|
||||
|
||||
ql_new(&arena->huge);
|
||||
if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
|
||||
WITNESS_RANK_ARENA_HUGE))
|
||||
ql_new(&arena->large);
|
||||
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
|
||||
WITNESS_RANK_ARENA_LARGE))
|
||||
return (NULL);
|
||||
|
||||
for (i = 0; i < NPSIZES; i++) {
|
||||
@@ -1859,7 +1860,7 @@ arena_prefork3(tsdn_t *tsdn, arena_t *arena)
|
||||
|
||||
for (i = 0; i < NBINS; i++)
|
||||
malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
|
||||
malloc_mutex_prefork(tsdn, &arena->huge_mtx);
|
||||
malloc_mutex_prefork(tsdn, &arena->large_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1867,7 +1868,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
|
||||
for (i = 0; i < NBINS; i++)
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx);
|
||||
@@ -1880,7 +1881,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
|
||||
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
|
||||
for (i = 0; i < NBINS; i++)
|
||||
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
|
||||
malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx);
|
||||
|
@@ -78,7 +78,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
|
||||
/*
|
||||
* sbrk() uses a signed increment argument, so take care not to
|
||||
* interpret a huge allocation request as a negative increment.
|
||||
* interpret a large allocation request as a negative increment.
|
||||
*/
|
||||
if ((intptr_t)size < 0)
|
||||
return (NULL);
|
||||
|
@@ -267,7 +267,7 @@ ckh_grow(tsdn_t *tsdn, ckh_t *ckh)
|
||||
|
||||
lg_curcells++;
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
@@ -315,7 +315,7 @@ ckh_shrink(tsdn_t *tsdn, ckh_t *ckh)
|
||||
lg_prevbuckets = ckh->lg_curbuckets;
|
||||
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
|
||||
return;
|
||||
tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL, true,
|
||||
arena_ichoose(tsdn, NULL));
|
||||
@@ -390,7 +390,7 @@ ckh_new(tsdn_t *tsdn, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh->keycomp = keycomp;
|
||||
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
146
src/ctl.c
146
src/ctl.c
@@ -126,8 +126,8 @@ CTL_PROTO(arenas_bin_i_size)
|
||||
CTL_PROTO(arenas_bin_i_nregs)
|
||||
CTL_PROTO(arenas_bin_i_slab_size)
|
||||
INDEX_PROTO(arenas_bin_i)
|
||||
CTL_PROTO(arenas_hchunk_i_size)
|
||||
INDEX_PROTO(arenas_hchunk_i)
|
||||
CTL_PROTO(arenas_lextent_i_size)
|
||||
INDEX_PROTO(arenas_lextent_i)
|
||||
CTL_PROTO(arenas_narenas)
|
||||
CTL_PROTO(arenas_initialized)
|
||||
CTL_PROTO(arenas_lg_dirty_mult)
|
||||
@@ -137,7 +137,7 @@ CTL_PROTO(arenas_page)
|
||||
CTL_PROTO(arenas_tcache_max)
|
||||
CTL_PROTO(arenas_nbins)
|
||||
CTL_PROTO(arenas_nhbins)
|
||||
CTL_PROTO(arenas_nhchunks)
|
||||
CTL_PROTO(arenas_nlextents)
|
||||
CTL_PROTO(arenas_extend)
|
||||
CTL_PROTO(prof_thread_active_init)
|
||||
CTL_PROTO(prof_active)
|
||||
@@ -150,10 +150,10 @@ CTL_PROTO(stats_arenas_i_small_allocated)
|
||||
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
||||
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
||||
CTL_PROTO(stats_arenas_i_small_nrequests)
|
||||
CTL_PROTO(stats_arenas_i_huge_allocated)
|
||||
CTL_PROTO(stats_arenas_i_huge_nmalloc)
|
||||
CTL_PROTO(stats_arenas_i_huge_ndalloc)
|
||||
CTL_PROTO(stats_arenas_i_huge_nrequests)
|
||||
CTL_PROTO(stats_arenas_i_large_allocated)
|
||||
CTL_PROTO(stats_arenas_i_large_nmalloc)
|
||||
CTL_PROTO(stats_arenas_i_large_ndalloc)
|
||||
CTL_PROTO(stats_arenas_i_large_nrequests)
|
||||
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
|
||||
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
|
||||
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
|
||||
@@ -164,11 +164,11 @@ CTL_PROTO(stats_arenas_i_bins_j_nslabs)
|
||||
CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
|
||||
CTL_PROTO(stats_arenas_i_bins_j_curslabs)
|
||||
INDEX_PROTO(stats_arenas_i_bins_j)
|
||||
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
|
||||
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
|
||||
CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
|
||||
CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
|
||||
INDEX_PROTO(stats_arenas_i_hchunks_j)
|
||||
CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
|
||||
CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
|
||||
CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
|
||||
CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
|
||||
INDEX_PROTO(stats_arenas_i_lextents_j)
|
||||
CTL_PROTO(stats_arenas_i_nthreads)
|
||||
CTL_PROTO(stats_arenas_i_dss)
|
||||
CTL_PROTO(stats_arenas_i_lg_dirty_mult)
|
||||
@@ -310,15 +310,15 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
|
||||
{INDEX(arenas_bin_i)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t arenas_hchunk_i_node[] = {
|
||||
{NAME("size"), CTL(arenas_hchunk_i_size)}
|
||||
static const ctl_named_node_t arenas_lextent_i_node[] = {
|
||||
{NAME("size"), CTL(arenas_lextent_i_size)}
|
||||
};
|
||||
static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
|
||||
{NAME(""), CHILD(named, arenas_hchunk_i)}
|
||||
static const ctl_named_node_t super_arenas_lextent_i_node[] = {
|
||||
{NAME(""), CHILD(named, arenas_lextent_i)}
|
||||
};
|
||||
|
||||
static const ctl_indexed_node_t arenas_hchunk_node[] = {
|
||||
{INDEX(arenas_hchunk_i)}
|
||||
static const ctl_indexed_node_t arenas_lextent_node[] = {
|
||||
{INDEX(arenas_lextent_i)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t arenas_node[] = {
|
||||
@@ -332,8 +332,8 @@ static const ctl_named_node_t arenas_node[] = {
|
||||
{NAME("nbins"), CTL(arenas_nbins)},
|
||||
{NAME("nhbins"), CTL(arenas_nhbins)},
|
||||
{NAME("bin"), CHILD(indexed, arenas_bin)},
|
||||
{NAME("nhchunks"), CTL(arenas_nhchunks)},
|
||||
{NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
|
||||
{NAME("nlextents"), CTL(arenas_nlextents)},
|
||||
{NAME("lextent"), CHILD(indexed, arenas_lextent)},
|
||||
{NAME("extend"), CTL(arenas_extend)}
|
||||
};
|
||||
|
||||
@@ -359,11 +359,11 @@ static const ctl_named_node_t stats_arenas_i_small_node[] = {
|
||||
{NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t stats_arenas_i_huge_node[] = {
|
||||
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
|
||||
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
|
||||
{NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
|
||||
{NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)}
|
||||
static const ctl_named_node_t stats_arenas_i_large_node[] = {
|
||||
{NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
|
||||
{NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
|
||||
{NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
|
||||
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
|
||||
@@ -385,18 +385,18 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
|
||||
{INDEX(stats_arenas_i_bins_j)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
|
||||
{NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
|
||||
{NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
|
||||
{NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)},
|
||||
{NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)}
|
||||
static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
|
||||
{NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
|
||||
{NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
|
||||
{NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
|
||||
{NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
|
||||
};
|
||||
static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
|
||||
{NAME(""), CHILD(named, stats_arenas_i_hchunks_j)}
|
||||
static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
|
||||
{NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
|
||||
};
|
||||
|
||||
static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
|
||||
{INDEX(stats_arenas_i_hchunks_j)}
|
||||
static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
|
||||
{INDEX(stats_arenas_i_lextents_j)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t stats_arenas_i_node[] = {
|
||||
@@ -413,9 +413,9 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
|
||||
{NAME("purged"), CTL(stats_arenas_i_purged)},
|
||||
{NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
|
||||
{NAME("small"), CHILD(named, stats_arenas_i_small)},
|
||||
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
|
||||
{NAME("large"), CHILD(named, stats_arenas_i_large)},
|
||||
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
|
||||
{NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
|
||||
{NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}
|
||||
};
|
||||
static const ctl_named_node_t super_stats_arenas_i_node[] = {
|
||||
{NAME(""), CHILD(named, stats_arenas_i)}
|
||||
@@ -476,8 +476,8 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
|
||||
astats->ndalloc_small = 0;
|
||||
astats->nrequests_small = 0;
|
||||
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
|
||||
memset(astats->hstats, 0, (NSIZES - NBINS) *
|
||||
sizeof(malloc_huge_stats_t));
|
||||
memset(astats->lstats, 0, (NSIZES - NBINS) *
|
||||
sizeof(malloc_large_stats_t));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -490,7 +490,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
|
||||
arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
|
||||
&cstats->lg_dirty_mult, &cstats->decay_time,
|
||||
&cstats->pactive, &cstats->pdirty, &cstats->astats,
|
||||
cstats->bstats, cstats->hstats);
|
||||
cstats->bstats, cstats->lstats);
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
cstats->allocated_small += cstats->bstats[i].curregs *
|
||||
@@ -532,10 +532,12 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
|
||||
sstats->ndalloc_small += astats->ndalloc_small;
|
||||
sstats->nrequests_small += astats->nrequests_small;
|
||||
|
||||
sstats->astats.allocated_huge += astats->astats.allocated_huge;
|
||||
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
|
||||
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
|
||||
sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
|
||||
sstats->astats.allocated_large +=
|
||||
astats->astats.allocated_large;
|
||||
sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
|
||||
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
|
||||
sstats->astats.nrequests_large +=
|
||||
astats->astats.nrequests_large;
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
|
||||
@@ -556,12 +558,12 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
|
||||
}
|
||||
|
||||
for (i = 0; i < NSIZES - NBINS; i++) {
|
||||
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
|
||||
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
|
||||
sstats->hstats[i].nrequests +=
|
||||
astats->hstats[i].nrequests;
|
||||
sstats->hstats[i].curhchunks +=
|
||||
astats->hstats[i].curhchunks;
|
||||
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
|
||||
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
|
||||
sstats->lstats[i].nrequests +=
|
||||
astats->lstats[i].nrequests;
|
||||
sstats->lstats[i].curlextents +=
|
||||
astats->lstats[i].curlextents;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -643,7 +645,7 @@ ctl_refresh(tsdn_t *tsdn)
|
||||
&base_mapped);
|
||||
ctl_stats.allocated =
|
||||
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
|
||||
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
|
||||
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large;
|
||||
ctl_stats.active =
|
||||
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
|
||||
ctl_stats.metadata = base_allocated +
|
||||
@@ -1812,15 +1814,15 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
|
||||
return (super_arenas_bin_i_node);
|
||||
}
|
||||
|
||||
CTL_RO_NL_GEN(arenas_nhchunks, NSIZES - NBINS, unsigned)
|
||||
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
|
||||
CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
|
||||
CTL_RO_NL_GEN(arenas_lextent_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
|
||||
static const ctl_named_node_t *
|
||||
arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
|
||||
arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
|
||||
if (i > NSIZES - NBINS)
|
||||
return (NULL);
|
||||
return (super_arenas_hchunk_i_node);
|
||||
return (super_arenas_lextent_i_node);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -2012,14 +2014,14 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
|
||||
ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
|
||||
ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
|
||||
ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
|
||||
ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
|
||||
ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
|
||||
ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
|
||||
ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
|
||||
ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
|
||||
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
|
||||
ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) /* Intentional. */
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
|
||||
ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
|
||||
@@ -2050,23 +2052,23 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
||||
return (super_stats_arenas_i_bins_j_node);
|
||||
}
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
|
||||
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
|
||||
ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
|
||||
ctl_stats.arenas[mib[2]].hstats[mib[4]].nrequests, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
|
||||
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
|
||||
ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
|
||||
ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
|
||||
ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
|
||||
ctl_stats.arenas[mib[2]].lstats[mib[4]].curlextents, size_t)
|
||||
|
||||
static const ctl_named_node_t *
|
||||
stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
||||
stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
||||
size_t j)
|
||||
{
|
||||
|
||||
if (j > NSIZES - NBINS)
|
||||
return (NULL);
|
||||
return (super_stats_arenas_i_hchunks_j_node);
|
||||
return (super_stats_arenas_i_lextents_j_node);
|
||||
}
|
||||
|
||||
static const ctl_named_node_t *
|
||||
|
@@ -40,7 +40,7 @@ extent_size_quantize_floor(size_t size)
|
||||
pszind_t pind;
|
||||
|
||||
assert(size > 0);
|
||||
assert(size - large_pad <= HUGE_MAXCLASS);
|
||||
assert(size - large_pad <= LARGE_MAXCLASS);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
|
||||
assert(size != 0);
|
||||
@@ -77,7 +77,7 @@ extent_size_quantize_ceil(size_t size)
|
||||
size_t ret;
|
||||
|
||||
assert(size > 0);
|
||||
assert(size - large_pad <= HUGE_MAXCLASS);
|
||||
assert(size - large_pad <= LARGE_MAXCLASS);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
|
||||
ret = extent_size_quantize_floor(size);
|
||||
|
@@ -1457,7 +1457,7 @@ ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
|
||||
|
||||
if (config_stats || (config_prof && opt_prof)) {
|
||||
*usize = index2size(ind);
|
||||
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
|
||||
assert(*usize > 0 && *usize <= LARGE_MAXCLASS);
|
||||
}
|
||||
|
||||
if (config_prof && opt_prof)
|
||||
@@ -1589,7 +1589,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
|
||||
}
|
||||
|
||||
usize = sa2u(size, alignment);
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||
result = NULL;
|
||||
goto label_oom;
|
||||
}
|
||||
@@ -1663,7 +1663,7 @@ je_calloc(size_t num, size_t size)
|
||||
if (num == 0 || size == 0)
|
||||
num_size = 1;
|
||||
else
|
||||
num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
|
||||
num_size = LARGE_MAXCLASS + 1; /* Trigger OOM. */
|
||||
/*
|
||||
* Try to avoid division here. We know that it isn't possible to
|
||||
* overflow during multiplication if neither operand uses any of the
|
||||
@@ -1671,7 +1671,7 @@ je_calloc(size_t num, size_t size)
|
||||
*/
|
||||
} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
|
||||
2))) && (num_size / size != num)))
|
||||
num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
|
||||
num_size = LARGE_MAXCLASS + 1; /* size_t overflow. */
|
||||
|
||||
if (likely(!malloc_slow)) {
|
||||
ret = ialloc_body(num_size, true, &tsdn, &usize, false);
|
||||
@@ -1819,7 +1819,7 @@ je_realloc(void *ptr, size_t size)
|
||||
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
|
||||
if (config_prof && opt_prof) {
|
||||
usize = s2u(size);
|
||||
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
|
||||
ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
|
||||
NULL : irealloc_prof(tsd, extent, ptr, old_usize,
|
||||
usize);
|
||||
} else {
|
||||
@@ -1956,7 +1956,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
||||
*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
|
||||
*usize = sa2u(size, *alignment);
|
||||
}
|
||||
if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
|
||||
if (unlikely(*usize == 0 || *usize > LARGE_MAXCLASS))
|
||||
return (true);
|
||||
*zero = MALLOCX_ZERO_GET(flags);
|
||||
if ((flags & MALLOCX_TCACHE_MASK) != 0) {
|
||||
@@ -2084,7 +2084,7 @@ imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
|
||||
return (NULL);
|
||||
if (config_stats || (config_prof && opt_prof)) {
|
||||
*usize = index2size(ind);
|
||||
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
|
||||
assert(*usize > 0 && *usize <= LARGE_MAXCLASS);
|
||||
}
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
@@ -2233,7 +2233,7 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
|
||||
goto label_oom;
|
||||
p = irallocx_prof(tsd, extent, ptr, old_usize, size, alignment,
|
||||
&usize, zero, tcache, arena);
|
||||
@@ -2314,17 +2314,17 @@ ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize,
|
||||
*/
|
||||
if (alignment == 0) {
|
||||
usize_max = s2u(size+extra);
|
||||
assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
|
||||
assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
|
||||
} else {
|
||||
usize_max = sa2u(size+extra, alignment);
|
||||
if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
|
||||
if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
|
||||
/*
|
||||
* usize_max is out of range, and chances are that
|
||||
* allocation will fail, but use the maximum possible
|
||||
* value and carry on with prof_alloc_prep(), just in
|
||||
* case allocation succeeds.
|
||||
*/
|
||||
usize_max = HUGE_MAXCLASS;
|
||||
usize_max = LARGE_MAXCLASS;
|
||||
}
|
||||
}
|
||||
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
|
||||
@@ -2368,18 +2368,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
||||
/*
|
||||
* The API explicitly absolves itself of protecting against (size +
|
||||
* extra) numerical overflow, but we may need to clamp extra to avoid
|
||||
* exceeding HUGE_MAXCLASS.
|
||||
* exceeding LARGE_MAXCLASS.
|
||||
*
|
||||
* Ordinarily, size limit checking is handled deeper down, but here we
|
||||
* have to check as part of (size + extra) clamping, since we need the
|
||||
* clamped value in the above helper functions.
|
||||
*/
|
||||
if (unlikely(size > HUGE_MAXCLASS)) {
|
||||
if (unlikely(size > LARGE_MAXCLASS)) {
|
||||
usize = old_usize;
|
||||
goto label_not_resized;
|
||||
}
|
||||
if (unlikely(HUGE_MAXCLASS - size < extra))
|
||||
extra = HUGE_MAXCLASS - size;
|
||||
if (unlikely(LARGE_MAXCLASS - size < extra))
|
||||
extra = LARGE_MAXCLASS - size;
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
usize = ixallocx_prof(tsd, extent, ptr, old_usize, size, extra,
|
||||
@@ -2512,7 +2512,7 @@ je_nallocx(size_t size, int flags)
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
usize = inallocx(tsdn, size, flags);
|
||||
if (unlikely(usize > HUGE_MAXCLASS))
|
||||
if (unlikely(usize > LARGE_MAXCLASS))
|
||||
return (0);
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
@@ -1,19 +1,19 @@
|
||||
#define JEMALLOC_HUGE_C_
|
||||
#define JEMALLOC_LARGE_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
|
||||
large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
|
||||
{
|
||||
|
||||
assert(usize == s2u(usize));
|
||||
|
||||
return (huge_palloc(tsdn, arena, usize, CACHELINE, zero));
|
||||
return (large_palloc(tsdn, arena, usize, CACHELINE, zero));
|
||||
}
|
||||
|
||||
void *
|
||||
huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero)
|
||||
{
|
||||
size_t ausize;
|
||||
@@ -24,7 +24,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||
|
||||
ausize = sa2u(usize, alignment);
|
||||
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
|
||||
if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS))
|
||||
return (NULL);
|
||||
|
||||
/*
|
||||
@@ -34,15 +34,15 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
is_zeroed = zero;
|
||||
if (likely(!tsdn_null(tsdn)))
|
||||
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
||||
if (unlikely(arena == NULL) || (extent = arena_chunk_alloc_huge(tsdn,
|
||||
if (unlikely(arena == NULL) || (extent = arena_chunk_alloc_large(tsdn,
|
||||
arena, usize, alignment, &is_zeroed)) == NULL)
|
||||
return (NULL);
|
||||
|
||||
/* Insert extent into huge. */
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
/* Insert extent into large. */
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
ql_elm_new(extent, ql_link);
|
||||
ql_tail_insert(&arena->huge, extent, ql_link);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
ql_tail_insert(&arena->large, extent, ql_link);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
if (config_prof && arena_prof_accum(tsdn, arena, usize))
|
||||
prof_idump(tsdn);
|
||||
|
||||
@@ -61,23 +61,23 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef huge_dalloc_junk
|
||||
#define huge_dalloc_junk JEMALLOC_N(n_huge_dalloc_junk)
|
||||
#undef large_dalloc_junk
|
||||
#define large_dalloc_junk JEMALLOC_N(n_large_dalloc_junk)
|
||||
#endif
|
||||
void
|
||||
huge_dalloc_junk(void *ptr, size_t usize)
|
||||
large_dalloc_junk(void *ptr, size_t usize)
|
||||
{
|
||||
|
||||
memset(ptr, JEMALLOC_FREE_JUNK, usize);
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef huge_dalloc_junk
|
||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
|
||||
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(n_huge_dalloc_junk);
|
||||
#undef large_dalloc_junk
|
||||
#define large_dalloc_junk JEMALLOC_N(large_dalloc_junk)
|
||||
large_dalloc_junk_t *large_dalloc_junk = JEMALLOC_N(n_large_dalloc_junk);
|
||||
#endif
|
||||
|
||||
static void
|
||||
huge_dalloc_maybe_junk(tsdn_t *tsdn, void *ptr, size_t usize)
|
||||
large_dalloc_maybe_junk(tsdn_t *tsdn, void *ptr, size_t usize)
|
||||
{
|
||||
|
||||
if (config_fill && have_dss && unlikely(opt_junk_free)) {
|
||||
@@ -86,13 +86,13 @@ huge_dalloc_maybe_junk(tsdn_t *tsdn, void *ptr, size_t usize)
|
||||
* unmapped.
|
||||
*/
|
||||
if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr)))
|
||||
huge_dalloc_junk(ptr, usize);
|
||||
large_dalloc_junk(ptr, usize);
|
||||
memset(ptr, JEMALLOC_FREE_JUNK, usize);
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
|
||||
large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
|
||||
{
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
@@ -109,20 +109,20 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
|
||||
return (true);
|
||||
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
huge_dalloc_maybe_junk(tsdn, extent_addr_get(trail),
|
||||
large_dalloc_maybe_junk(tsdn, extent_addr_get(trail),
|
||||
extent_usize_get(trail));
|
||||
}
|
||||
|
||||
arena_chunk_cache_dalloc(tsdn, arena, &chunk_hooks, trail);
|
||||
}
|
||||
|
||||
arena_chunk_ralloc_huge_shrink(tsdn, arena, extent, oldusize);
|
||||
arena_chunk_ralloc_large_shrink(tsdn, arena, extent, oldusize);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||
large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||
bool zero)
|
||||
{
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
@@ -173,34 +173,35 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||
JEMALLOC_ALLOC_JUNK, usize - oldusize);
|
||||
}
|
||||
|
||||
arena_chunk_ralloc_huge_expand(tsdn, arena, extent, oldusize);
|
||||
arena_chunk_ralloc_large_expand(tsdn, arena, extent, oldusize);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
bool
|
||||
huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
size_t usize_max, bool zero)
|
||||
{
|
||||
|
||||
assert(s2u(extent_usize_get(extent)) == extent_usize_get(extent));
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
|
||||
/* Both allocation sizes must be huge to avoid a move. */
|
||||
assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
|
||||
/* Both allocation sizes must be large to avoid a move. */
|
||||
assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize_max >=
|
||||
LARGE_MINCLASS);
|
||||
|
||||
if (usize_max > extent_usize_get(extent)) {
|
||||
/* Attempt to expand the allocation in-place. */
|
||||
if (!huge_ralloc_no_move_expand(tsdn, extent, usize_max,
|
||||
if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
|
||||
zero)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return (false);
|
||||
}
|
||||
/* Try again, this time with usize_min. */
|
||||
if (usize_min < usize_max && usize_min >
|
||||
extent_usize_get(extent) && huge_ralloc_no_move_expand(tsdn,
|
||||
extent, usize_min, zero)) {
|
||||
extent_usize_get(extent) &&
|
||||
large_ralloc_no_move_expand(tsdn, extent, usize_min,
|
||||
zero)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return (false);
|
||||
}
|
||||
@@ -218,7 +219,7 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
|
||||
/* Attempt to shrink the allocation in-place. */
|
||||
if (extent_usize_get(extent) > usize_max) {
|
||||
if (!huge_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
|
||||
if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return (false);
|
||||
}
|
||||
@@ -227,30 +228,30 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
}
|
||||
|
||||
static void *
|
||||
huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero)
|
||||
{
|
||||
|
||||
if (alignment <= CACHELINE)
|
||||
return (huge_malloc(tsdn, arena, usize, zero));
|
||||
return (huge_palloc(tsdn, arena, usize, alignment, zero));
|
||||
return (large_malloc(tsdn, arena, usize, zero));
|
||||
return (large_palloc(tsdn, arena, usize, alignment, zero));
|
||||
}
|
||||
|
||||
void *
|
||||
huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
||||
large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache)
|
||||
{
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize > 0 && usize <= HUGE_MAXCLASS);
|
||||
/* Both allocation sizes must be huge to avoid a move. */
|
||||
assert(usize > 0 && usize <= LARGE_MAXCLASS);
|
||||
/* Both allocation sizes must be large to avoid a move. */
|
||||
assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize >=
|
||||
LARGE_MINCLASS);
|
||||
|
||||
/* Try to avoid moving the allocation. */
|
||||
if (!huge_ralloc_no_move(tsdn, extent, usize, usize, zero))
|
||||
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero))
|
||||
return (extent_addr_get(extent));
|
||||
|
||||
/*
|
||||
@@ -258,7 +259,7 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
||||
* different size class. In that case, fall back to allocating new
|
||||
* space and copying.
|
||||
*/
|
||||
ret = huge_ralloc_move_helper(tsdn, arena, usize, alignment, zero);
|
||||
ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
@@ -271,82 +272,82 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
||||
}
|
||||
|
||||
static void
|
||||
huge_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked)
|
||||
large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
arena = extent_arena_get(extent);
|
||||
if (!junked_locked)
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
ql_remove(&arena->huge, extent, ql_link);
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
ql_remove(&arena->large, extent, ql_link);
|
||||
if (!junked_locked) {
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
|
||||
huge_dalloc_maybe_junk(tsdn, extent_addr_get(extent),
|
||||
large_dalloc_maybe_junk(tsdn, extent_addr_get(extent),
|
||||
extent_usize_get(extent));
|
||||
}
|
||||
arena_chunk_dalloc_huge(tsdn, arena, extent, junked_locked);
|
||||
arena_chunk_dalloc_large(tsdn, arena, extent, junked_locked);
|
||||
|
||||
if (!junked_locked)
|
||||
arena_decay_tick(tsdn, arena);
|
||||
}
|
||||
|
||||
void
|
||||
huge_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent)
|
||||
large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent)
|
||||
{
|
||||
|
||||
huge_dalloc_impl(tsdn, extent, true);
|
||||
large_dalloc_impl(tsdn, extent, true);
|
||||
}
|
||||
|
||||
void
|
||||
huge_dalloc(tsdn_t *tsdn, extent_t *extent)
|
||||
large_dalloc(tsdn_t *tsdn, extent_t *extent)
|
||||
{
|
||||
|
||||
huge_dalloc_impl(tsdn, extent, false);
|
||||
large_dalloc_impl(tsdn, extent, false);
|
||||
}
|
||||
|
||||
size_t
|
||||
huge_salloc(tsdn_t *tsdn, const extent_t *extent)
|
||||
large_salloc(tsdn_t *tsdn, const extent_t *extent)
|
||||
{
|
||||
size_t usize;
|
||||
arena_t *arena;
|
||||
|
||||
arena = extent_arena_get(extent);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
usize = extent_usize_get(extent);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
|
||||
return (usize);
|
||||
}
|
||||
|
||||
prof_tctx_t *
|
||||
huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent)
|
||||
large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent)
|
||||
{
|
||||
prof_tctx_t *tctx;
|
||||
arena_t *arena;
|
||||
|
||||
arena = extent_arena_get(extent);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
tctx = extent_prof_tctx_get(extent);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
|
||||
return (tctx);
|
||||
}
|
||||
|
||||
void
|
||||
huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx)
|
||||
large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
arena = extent_arena_get(extent);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
extent_prof_tctx_set(extent, tctx);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent)
|
||||
large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent)
|
||||
{
|
||||
|
||||
huge_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
|
||||
large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
|
||||
}
|
71
src/stats.c
71
src/stats.c
@@ -37,10 +37,10 @@ size_t stats_cactive = 0;
|
||||
|
||||
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
|
||||
void *cbopaque, unsigned i);
|
||||
static void stats_arena_hchunks_print(
|
||||
static void stats_arena_lextents_print(
|
||||
void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
|
||||
static void stats_arena_print(void (*write_cb)(void *, const char *),
|
||||
void *cbopaque, unsigned i, bool bins, bool huge);
|
||||
void *cbopaque, unsigned i, bool bins, bool large);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
@@ -157,34 +157,34 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
}
|
||||
|
||||
static void
|
||||
stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
|
||||
stats_arena_lextents_print(void (*write_cb)(void *, const char *),
|
||||
void *cbopaque, unsigned i)
|
||||
{
|
||||
unsigned nbins, nhchunks, j;
|
||||
unsigned nbins, nlextents, j;
|
||||
bool in_gap;
|
||||
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"huge: size ind allocated nmalloc ndalloc"
|
||||
" nrequests curhchunks\n");
|
||||
"large: size ind allocated nmalloc ndalloc"
|
||||
" nrequests curlextents\n");
|
||||
CTL_GET("arenas.nbins", &nbins, unsigned);
|
||||
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
|
||||
for (j = 0, in_gap = false; j < nhchunks; j++) {
|
||||
CTL_GET("arenas.nlextents", &nlextents, unsigned);
|
||||
for (j = 0, in_gap = false; j < nlextents; j++) {
|
||||
uint64_t nmalloc, ndalloc, nrequests;
|
||||
size_t hchunk_size, curhchunks;
|
||||
size_t lextent_size, curlextents;
|
||||
|
||||
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j,
|
||||
CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
|
||||
&nmalloc, uint64_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j,
|
||||
CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
|
||||
&ndalloc, uint64_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
|
||||
CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
|
||||
&nrequests, uint64_t);
|
||||
if (nrequests == 0)
|
||||
in_gap = true;
|
||||
else {
|
||||
CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size,
|
||||
CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size,
|
||||
size_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i,
|
||||
j, &curhchunks, size_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents",
|
||||
i, j, &curlextents, size_t);
|
||||
if (in_gap) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" ---\n");
|
||||
@@ -193,9 +193,9 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
|
||||
" %12"FMTu64" %12zu\n",
|
||||
hchunk_size, nbins + j,
|
||||
curhchunks * hchunk_size, nmalloc, ndalloc,
|
||||
nrequests, curhchunks);
|
||||
lextent_size, nbins + j,
|
||||
curlextents * lextent_size, nmalloc, ndalloc,
|
||||
nrequests, curlextents);
|
||||
}
|
||||
}
|
||||
if (in_gap) {
|
||||
@@ -206,7 +206,7 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
|
||||
|
||||
static void
|
||||
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
unsigned i, bool bins, bool huge)
|
||||
unsigned i, bool bins, bool large)
|
||||
{
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
@@ -216,8 +216,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
uint64_t npurge, nmadvise, purged;
|
||||
size_t small_allocated;
|
||||
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
||||
size_t huge_allocated;
|
||||
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
|
||||
size_t large_allocated;
|
||||
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
||||
|
||||
CTL_GET("arenas.page", &page, size_t);
|
||||
|
||||
@@ -268,20 +268,21 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
"small: %12zu %12"FMTu64" %12"FMTu64
|
||||
" %12"FMTu64"\n",
|
||||
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
|
||||
CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
|
||||
CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
|
||||
CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
|
||||
size_t);
|
||||
CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
|
||||
uint64_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"huge: %12zu %12"FMTu64" %12"FMTu64
|
||||
"large: %12zu %12"FMTu64" %12"FMTu64
|
||||
" %12"FMTu64"\n",
|
||||
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
|
||||
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"total: %12zu %12"FMTu64" %12"FMTu64
|
||||
" %12"FMTu64"\n",
|
||||
small_allocated + huge_allocated, small_nmalloc + huge_nmalloc,
|
||||
small_ndalloc + huge_ndalloc, small_nrequests + huge_nrequests);
|
||||
small_allocated + large_allocated, small_nmalloc + large_nmalloc,
|
||||
small_ndalloc + large_ndalloc, small_nrequests + large_nrequests);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"active: %12zu\n", pactive * page);
|
||||
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
|
||||
@@ -300,8 +301,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
|
||||
if (bins)
|
||||
stats_arena_bins_print(write_cb, cbopaque, i);
|
||||
if (huge)
|
||||
stats_arena_hchunks_print(write_cb, cbopaque, i);
|
||||
if (large)
|
||||
stats_arena_lextents_print(write_cb, cbopaque, i);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -315,7 +316,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
bool merged = true;
|
||||
bool unmerged = true;
|
||||
bool bins = true;
|
||||
bool huge = true;
|
||||
bool large = true;
|
||||
|
||||
/*
|
||||
* Refresh stats, in case mallctl() was called by the application.
|
||||
@@ -356,7 +357,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
bins = false;
|
||||
break;
|
||||
case 'l':
|
||||
huge = false;
|
||||
large = false;
|
||||
break;
|
||||
default:;
|
||||
}
|
||||
@@ -568,7 +569,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"\nMerged arenas stats:\n");
|
||||
stats_arena_print(write_cb, cbopaque,
|
||||
narenas, bins, huge);
|
||||
narenas, bins, large);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -594,7 +595,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
cbopaque,
|
||||
"\narenas[%u]:\n", i);
|
||||
stats_arena_print(write_cb,
|
||||
cbopaque, i, bins, huge);
|
||||
cbopaque, i, bins, large);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
28
src/tcache.c
28
src/tcache.c
@@ -46,7 +46,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
|
||||
tbin->ncached - tbin->low_water + (tbin->low_water
|
||||
>> 2));
|
||||
} else {
|
||||
tcache_bin_flush_huge(tsd, tbin, binind, tbin->ncached
|
||||
tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
|
||||
- tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
}
|
||||
/*
|
||||
@@ -164,7 +164,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
unsigned rem, tcache_t *tcache)
|
||||
{
|
||||
arena_t *arena;
|
||||
@@ -194,9 +194,9 @@ tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
}
|
||||
if (config_stats) {
|
||||
merged_stats = true;
|
||||
arena->stats.nrequests_huge +=
|
||||
arena->stats.nrequests_large +=
|
||||
tbin->tstats.nrequests;
|
||||
arena->stats.hstats[binind - NBINS].nrequests +=
|
||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
@@ -207,7 +207,7 @@ tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
assert(ptr != NULL);
|
||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||
if (extent_arena_get(extent) == locked_arena) {
|
||||
huge_dalloc_junked_locked(tsd_tsdn(tsd),
|
||||
large_dalloc_junked_locked(tsd_tsdn(tsd),
|
||||
extent);
|
||||
} else {
|
||||
/*
|
||||
@@ -232,8 +232,8 @@ tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
||||
arena->stats.nrequests_huge += tbin->tstats.nrequests;
|
||||
arena->stats.hstats[binind - NBINS].nrequests +=
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
||||
@@ -371,12 +371,12 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
|
||||
for (; i < nhbins; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
tcache_bin_flush_huge(tsd, tbin, i, 0, tcache);
|
||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
||||
arena->stats.nrequests_huge += tbin->tstats.nrequests;
|
||||
arena->stats.hstats[i - NBINS].nrequests +=
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[i - NBINS].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
||||
}
|
||||
@@ -431,10 +431,10 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
|
||||
}
|
||||
|
||||
for (; i < nhbins; i++) {
|
||||
malloc_huge_stats_t *hstats = &arena->stats.hstats[i - NBINS];
|
||||
malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
arena->stats.nrequests_huge += tbin->tstats.nrequests;
|
||||
hstats->nrequests += tbin->tstats.nrequests;
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
lstats->nrequests += tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
}
|
||||
@@ -537,7 +537,7 @@ tcache_boot(tsdn_t *tsdn)
|
||||
stack_nelms += tcache_bin_info[i].ncached_max;
|
||||
}
|
||||
for (; i < nhbins; i++) {
|
||||
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_HUGE;
|
||||
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
|
||||
stack_nelms += tcache_bin_info[i].ncached_max;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user