Use huge size class infrastructure for large size classes.

This commit is contained in:
Jason Evans
2016-05-28 00:17:28 -07:00
parent b46261d58b
commit ed2c2427a7
34 changed files with 463 additions and 1979 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -74,7 +74,8 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
base_resident += PAGE_CEILING(nsize);
}
}
extent_init(extent, NULL, addr, csize, true, false, true, true, false);
extent_init(extent, NULL, addr, csize, 0, true, false, true, true,
false);
return (extent);
}

View File

@@ -369,7 +369,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0) {
extent_t *lead = extent;
extent = chunk_split_wrapper(tsdn, arena, chunk_hooks, lead,
leadsize, size + trailsize);
leadsize, leadsize, size + trailsize, usize + trailsize);
if (extent == NULL) {
chunk_leak(tsdn, arena, chunk_hooks, cache, lead);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@@ -382,7 +382,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Split the trail. */
if (trailsize != 0) {
extent_t *trail = chunk_split_wrapper(tsdn, arena, chunk_hooks,
extent, size, trailsize);
extent, size, usize, trailsize, trailsize);
if (trail == NULL) {
chunk_leak(tsdn, arena, chunk_hooks, cache, extent);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@@ -390,6 +390,12 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
extent_heaps_insert(extent_heaps, trail);
arena_chunk_cache_maybe_insert(arena, trail, cache);
} else if (leadsize == 0) {
/*
* Splitting causes usize to be set as a side effect, but no
* splitting occurred.
*/
extent_usize_set(extent, usize);
}
if (!extent_committed_get(extent) &&
@@ -552,7 +558,8 @@ chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
extent_dalloc(tsdn, arena, extent);
return (NULL);
}
extent_init(extent, arena, addr, size, true, false, zero, commit, slab);
extent_init(extent, arena, addr, size, usize, true, false, zero, commit,
slab);
if (pad != 0)
extent_addr_randomize(tsdn, extent, alignment);
if (chunk_register(tsdn, extent)) {
@@ -635,6 +642,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
extent_usize_set(extent, 0);
extent_active_set(extent, false);
extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
if (extent_slab_get(extent)) {
@@ -801,7 +809,8 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
extent_t *
chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_t *extent, size_t size_a, size_t size_b)
extent_t *extent, size_t size_a, size_t usize_a, size_t size_b,
size_t usize_b)
{
extent_t *trail;
rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
@@ -818,9 +827,9 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_t lead;
extent_init(&lead, arena, extent_addr_get(extent), size_a,
extent_active_get(extent), extent_dirty_get(extent),
extent_zeroed_get(extent), extent_committed_get(extent),
extent_slab_get(extent));
usize_a, extent_active_get(extent),
extent_dirty_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_slab_get(extent));
if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
&lead_elm_b))
@@ -828,7 +837,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
size_a), size_b, extent_active_get(extent),
size_a), size_b, usize_b, extent_active_get(extent),
extent_dirty_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_slab_get(extent));
if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
@@ -840,6 +849,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
goto label_error_d;
extent_size_set(extent, size_a);
extent_usize_set(extent, usize_a);
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
@@ -905,6 +915,7 @@ chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
b_elm_b = b_elm_a;
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);

View File

@@ -121,7 +121,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
pad_size = (uintptr_t)ret - (uintptr_t)pad_addr;
if (pad_size != 0) {
extent_init(pad, arena, pad_addr, pad_size,
false, true, false, true, false);
pad_size, false, true, false, true, false);
}
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||

175
src/ctl.c
View File

@@ -49,7 +49,6 @@ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
const size_t *mib, size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
arena_t *arena);
@@ -127,8 +126,6 @@ CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
INDEX_PROTO(arenas_bin_i)
CTL_PROTO(arenas_lrun_i_size)
INDEX_PROTO(arenas_lrun_i)
CTL_PROTO(arenas_hchunk_i_size)
INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas)
@@ -140,7 +137,6 @@ CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_nhchunks)
CTL_PROTO(arenas_extend)
CTL_PROTO(prof_thread_active_init)
@@ -154,10 +150,6 @@ CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
CTL_PROTO(stats_arenas_i_small_nrequests)
CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests)
CTL_PROTO(stats_arenas_i_huge_allocated)
CTL_PROTO(stats_arenas_i_huge_nmalloc)
CTL_PROTO(stats_arenas_i_huge_ndalloc)
@@ -172,11 +164,6 @@ CTL_PROTO(stats_arenas_i_bins_j_nruns)
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
CTL_PROTO(stats_arenas_i_bins_j_curruns)
INDEX_PROTO(stats_arenas_i_bins_j)
CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j)
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
@@ -323,17 +310,6 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
{INDEX(arenas_bin_i)}
};
static const ctl_named_node_t arenas_lrun_i_node[] = {
{NAME("size"), CTL(arenas_lrun_i_size)}
};
static const ctl_named_node_t super_arenas_lrun_i_node[] = {
{NAME(""), CHILD(named, arenas_lrun_i)}
};
static const ctl_indexed_node_t arenas_lrun_node[] = {
{INDEX(arenas_lrun_i)}
};
static const ctl_named_node_t arenas_hchunk_i_node[] = {
{NAME("size"), CTL(arenas_hchunk_i_size)}
};
@@ -356,8 +332,6 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("nbins"), CTL(arenas_nbins)},
{NAME("nhbins"), CTL(arenas_nhbins)},
{NAME("bin"), CHILD(indexed, arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)},
{NAME("lrun"), CHILD(indexed, arenas_lrun)},
{NAME("nhchunks"), CTL(arenas_nhchunks)},
{NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
{NAME("extend"), CTL(arenas_extend)}
@@ -385,13 +359,6 @@ static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
};
static const ctl_named_node_t stats_arenas_i_large_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
};
static const ctl_named_node_t stats_arenas_i_huge_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
@@ -418,20 +385,6 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
{INDEX(stats_arenas_i_bins_j)}
};
static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
{NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
{NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
};
static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
};
static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
{INDEX(stats_arenas_i_lruns_j)}
};
static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
{NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
@@ -460,10 +413,8 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("purged"), CTL(stats_arenas_i_purged)},
{NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)},
{NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
@@ -508,27 +459,6 @@ static const ctl_named_node_t super_root_node[] = {
/******************************************************************************/
static bool
ctl_arena_init(ctl_arena_stats_t *astats)
{
if (astats->lstats == NULL) {
astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
sizeof(malloc_large_stats_t));
if (astats->lstats == NULL)
return (true);
}
if (astats->hstats == NULL) {
astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
sizeof(malloc_huge_stats_t));
if (astats->hstats == NULL)
return (true);
}
return (false);
}
static void
ctl_arena_clear(ctl_arena_stats_t *astats)
{
@@ -546,9 +476,7 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
astats->ndalloc_small = 0;
astats->nrequests_small = 0;
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
memset(astats->lstats, 0, nlclasses *
sizeof(malloc_large_stats_t));
memset(astats->hstats, 0, nhclasses *
memset(astats->hstats, 0, (NSIZES - NBINS) *
sizeof(malloc_huge_stats_t));
}
}
@@ -562,7 +490,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
&cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty, &cstats->astats,
cstats->bstats, cstats->lstats, cstats->hstats);
cstats->bstats, cstats->hstats);
for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].curregs *
@@ -604,16 +532,10 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->ndalloc_small += astats->ndalloc_small;
sstats->nrequests_small += astats->nrequests_small;
sstats->astats.allocated_large +=
astats->astats.allocated_large;
sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large +=
astats->astats.nrequests_large;
sstats->astats.allocated_huge += astats->astats.allocated_huge;
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
for (i = 0; i < NBINS; i++) {
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
@@ -632,17 +554,11 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
for (i = 0; i < nlclasses; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests +=
astats->lstats[i].nrequests;
sstats->lstats[i].curruns += astats->lstats[i].curruns;
}
for (i = 0; i < nhclasses; i++) {
for (i = 0; i < NSIZES - NBINS; i++) {
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
sstats->hstats[i].nrequests +=
astats->hstats[i].nrequests;
sstats->hstats[i].curhchunks +=
astats->hstats[i].curhchunks;
}
@@ -680,10 +596,6 @@ ctl_grow(tsdn_t *tsdn)
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
a0dalloc(astats);
return (true);
}
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t tstats;
@@ -730,7 +642,6 @@ ctl_refresh(tsdn_t *tsdn)
&base_mapped);
ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
ctl_stats.active =
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
@@ -771,30 +682,6 @@ ctl_init(tsdn_t *tsdn)
}
memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
/*
* Initialize all stats structures, regardless of whether they
* ever get used. Lazy initialization would allow errors to
* cause inconsistent state to be viewable by the application.
*/
if (config_stats) {
unsigned i;
for (i = 0; i <= ctl_stats.narenas; i++) {
if (ctl_arena_init(&ctl_stats.arenas[i])) {
unsigned j;
for (j = 0; j < i; j++) {
a0dalloc(
ctl_stats.arenas[j].lstats);
a0dalloc(
ctl_stats.arenas[j].hstats);
}
a0dalloc(ctl_stats.arenas);
ctl_stats.arenas = NULL;
ret = true;
goto label_return;
}
}
}
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
@@ -1924,25 +1811,13 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
return (super_arenas_bin_i_node);
}
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
static const ctl_named_node_t *
arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > nlclasses)
return (NULL);
return (super_arenas_lrun_i_node);
}
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
size_t)
CTL_RO_NL_GEN(arenas_nhchunks, NSIZES - NBINS, unsigned)
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
static const ctl_named_node_t *
arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > nhclasses)
if (i > NSIZES - NBINS)
return (NULL);
return (super_arenas_hchunk_i_node);
}
@@ -2136,14 +2011,6 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
@@ -2182,32 +2049,12 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
return (super_stats_arenas_i_bins_j_node);
}
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{
if (j > nlclasses)
return (NULL);
return (super_stats_arenas_i_lruns_j_node);
}
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
uint64_t)
ctl_stats.arenas[mib[2]].hstats[mib[4]].nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
@@ -2216,7 +2063,7 @@ stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{
if (j > nhclasses)
if (j > NSIZES - NBINS)
return (NULL);
return (super_stats_arenas_i_hchunks_j_node);
}

View File

@@ -40,7 +40,7 @@ extent_size_quantize_floor(size_t size)
pszind_t pind;
assert(size > 0);
assert(size <= HUGE_MAXCLASS);
assert(size - large_pad <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
assert(size != 0);
@@ -77,7 +77,7 @@ extent_size_quantize_ceil(size_t size)
size_t ret;
assert(size > 0);
assert(size <= HUGE_MAXCLASS);
assert(size - large_pad <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size);

View File

@@ -19,6 +19,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
size_t ausize;
extent_t *extent;
bool is_zeroed;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
assert(!tsdn_null(tsdn) || arena != NULL);
@@ -42,6 +43,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
ql_elm_new(extent, ql_link);
ql_tail_insert(&arena->huge, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (config_prof && arena_prof_accum(tsdn, arena, usize))
prof_idump(tsdn);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed) {
@@ -61,8 +64,20 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
void
huge_dalloc_junk(void *ptr, size_t usize)
{
memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
huge_dalloc_maybe_junk(tsdn_t *tsdn, void *ptr, size_t usize)
{
if (config_fill && have_dss && unlikely(opt_junk_free)) {
@@ -71,14 +86,10 @@ huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
* unmapped.
*/
if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr)))
huge_dalloc_junk(ptr, usize);
memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static bool
huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
@@ -93,12 +104,12 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
/* Split excess pages. */
if (diff != 0) {
extent_t *trail = chunk_split_wrapper(tsdn, arena, &chunk_hooks,
extent, usize + large_pad, diff);
extent, usize + large_pad, usize, diff, diff);
if (trail == NULL)
return (true);
if (config_fill && unlikely(opt_junk_free)) {
huge_dalloc_junk(tsdn, extent_addr_get(trail),
huge_dalloc_maybe_junk(tsdn, extent_addr_get(trail),
extent_usize_get(trail));
}
@@ -176,7 +187,8 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
/* Both allocation sizes must be huge to avoid a move. */
assert(extent_usize_get(extent) >= chunksize && usize_max >= chunksize);
assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize_max >=
LARGE_MINCLASS);
if (usize_max > extent_usize_get(extent)) {
/* Attempt to expand the allocation in-place. */
@@ -234,7 +246,8 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= HUGE_MAXCLASS);
/* Both allocation sizes must be huge to avoid a move. */
assert(extent_usize_get(extent) >= chunksize && usize >= chunksize);
assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize >=
LARGE_MINCLASS);
/* Try to avoid moving the allocation. */
if (!huge_ralloc_no_move(tsdn, extent, usize, usize, zero))
@@ -257,21 +270,39 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
return (ret);
}
void
huge_dalloc(tsdn_t *tsdn, extent_t *extent)
static void
huge_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked)
{
arena_t *arena;
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
if (!junked_locked)
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_remove(&arena->huge, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (!junked_locked) {
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
huge_dalloc_junk(tsdn, extent_addr_get(extent),
extent_usize_get(extent));
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent), extent);
huge_dalloc_maybe_junk(tsdn, extent_addr_get(extent),
extent_usize_get(extent));
}
arena_chunk_dalloc_huge(tsdn, arena, extent, junked_locked);
arena_decay_tick(tsdn, arena);
if (!junked_locked)
arena_decay_tick(tsdn, arena);
}
void
huge_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent)
{
huge_dalloc_impl(tsdn, extent, true);
}
void
huge_dalloc(tsdn_t *tsdn, extent_t *extent)
{
huge_dalloc_impl(tsdn, extent, false);
}
size_t

View File

@@ -1401,7 +1401,7 @@ ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
} else
p = ialloc(tsd, usize, ind, zero, slow_path);
@@ -1483,8 +1483,7 @@ ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret,
config_prof));
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret));
*tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
}
witness_assert_lockless(tsdn);
@@ -1527,7 +1526,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
} else
p = ipalloc(tsd, usize, alignment, false);
@@ -1608,7 +1607,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
label_return:
if (config_stats && likely(result != NULL)) {
assert(usize == isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
result), result, config_prof));
result), result));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, result);
@@ -1699,7 +1698,7 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr,
false);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
} else
p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false);
@@ -1748,10 +1747,10 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
extent = iealloc(tsd_tsdn(tsd), ptr);
if (config_prof && opt_prof) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
prof_free(tsd, extent, ptr, usize);
} else if (config_stats)
usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
@@ -1815,7 +1814,7 @@ je_realloc(void *ptr, size_t size)
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(tsd_tsdn(tsd), ptr);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
if (config_prof && opt_prof) {
usize = s2u(size);
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
@@ -1848,8 +1847,7 @@ je_realloc(void *ptr, size_t size)
if (config_stats && likely(ret != NULL)) {
tsd_t *tsd;
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret,
config_prof));
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret));
tsd = tsdn_tsd(tsdn);
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
@@ -2003,7 +2001,7 @@ imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache, arena, slow_path);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsdn, iealloc(tsdn, p), p, usize);
arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize);
} else
p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
slow_path);
@@ -2138,7 +2136,7 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
alignment, zero, tcache, arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsdn, iealloc(tsdn, p), p, usize);
arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize);
} else {
p = iralloct(tsdn, extent, old_ptr, old_usize, usize, alignment,
zero, tcache, arena);
@@ -2182,7 +2180,7 @@ irallocx_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
* reallocation. Therefore, query the actual value of usize.
*/
e = extent;
*usize = isalloc(tsd_tsdn(tsd), e, p, config_prof);
*usize = isalloc(tsd_tsdn(tsd), e, p);
} else
e = iealloc(tsd_tsdn(tsd), p);
prof_realloc(tsd, e, p, *usize, tctx, prof_active, true, old_ptr,
@@ -2229,7 +2227,7 @@ je_rallocx(void *ptr, size_t size, int flags)
} else
tcache = tcache_get(tsd, true);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
@@ -2246,7 +2244,7 @@ je_rallocx(void *ptr, size_t size, int flags)
goto label_oom;
if (config_stats) {
usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
p), p, config_prof);
p), p);
}
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@@ -2276,7 +2274,7 @@ ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize,
if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment, zero))
return (old_usize);
usize = isalloc(tsdn, extent, ptr, config_prof);
usize = isalloc(tsdn, extent, ptr);
return (usize);
}
@@ -2363,7 +2361,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(tsd_tsdn(tsd), ptr);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
/*
* The API explicitly absolves itself of protecting against (size +
@@ -2414,9 +2412,9 @@ je_sallocx(const void *ptr, int flags)
witness_assert_lockless(tsdn);
if (config_ivsalloc)
usize = ivsalloc(tsdn, ptr, config_prof);
usize = ivsalloc(tsdn, ptr);
else
usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr, config_prof);
usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr);
witness_assert_lockless(tsdn);
return (usize);
@@ -2477,7 +2475,7 @@ je_sdallocx(void *ptr, size_t size, int flags)
tsd = tsd_fetch();
extent = iealloc(tsd_tsdn(tsd), ptr);
usize = inallocx(tsd_tsdn(tsd), size, flags);
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, config_prof));
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
@@ -2593,10 +2591,10 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
witness_assert_lockless(tsdn);
if (config_ivsalloc)
ret = ivsalloc(tsdn, ptr, config_prof);
ret = ivsalloc(tsdn, ptr);
else {
ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr), ptr,
config_prof);
ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr),
ptr);
}
witness_assert_lockless(tsdn);

View File

@@ -37,12 +37,10 @@ size_t stats_cactive = 0;
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_hchunks_print(
void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i, bool bins, bool large, bool huge);
void *cbopaque, unsigned i, bool bins, bool huge);
/******************************************************************************/
@@ -157,64 +155,17 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
}
static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
unsigned nbins, nlruns, j;
bool in_gap;
malloc_cprintf(write_cb, cbopaque,
"large: size ind allocated nmalloc ndalloc"
" nrequests curruns\n");
CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned);
for (j = 0, in_gap = false; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns;
CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
&nrequests, uint64_t);
if (nrequests == 0)
in_gap = true;
else {
CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
&curruns, size_t);
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
in_gap = false;
}
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n",
run_size, nbins + j, curruns * run_size, nmalloc,
ndalloc, nrequests, curruns);
}
}
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
}
static void
stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i)
{
unsigned nbins, nlruns, nhchunks, j;
unsigned nbins, nhchunks, j;
bool in_gap;
malloc_cprintf(write_cb, cbopaque,
"huge: size ind allocated nmalloc ndalloc"
" nrequests curhchunks\n");
CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned);
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
for (j = 0, in_gap = false; j < nhchunks; j++) {
uint64_t nmalloc, ndalloc, nrequests;
@@ -241,7 +192,7 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n",
hchunk_size, nbins + nlruns + j,
hchunk_size, nbins + j,
curhchunks * hchunk_size, nmalloc, ndalloc,
nrequests, curhchunks);
}
@@ -254,7 +205,7 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i, bool bins, bool large, bool huge)
unsigned i, bool bins, bool huge)
{
unsigned nthreads;
const char *dss;
@@ -264,8 +215,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
@@ -318,16 +267,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
"small: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
size_t);
CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
uint64_t);
malloc_cprintf(write_cb, cbopaque,
"large: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
@@ -340,10 +279,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
small_allocated + large_allocated + huge_allocated,
small_nmalloc + large_nmalloc + huge_nmalloc,
small_ndalloc + large_ndalloc + huge_ndalloc,
small_nrequests + large_nrequests + huge_nrequests);
small_allocated + huge_allocated, small_nmalloc + huge_nmalloc,
small_ndalloc + huge_ndalloc, small_nrequests + huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"active: %12zu\n", pactive * page);
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
@@ -362,8 +299,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (bins)
stats_arena_bins_print(write_cb, cbopaque, i);
if (large)
stats_arena_lruns_print(write_cb, cbopaque, i);
if (huge)
stats_arena_hchunks_print(write_cb, cbopaque, i);
}
@@ -379,7 +314,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool merged = true;
bool unmerged = true;
bool bins = true;
bool large = true;
bool huge = true;
/*
@@ -421,9 +355,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bins = false;
break;
case 'l':
large = false;
break;
case 'h':
huge = false;
break;
default:;
@@ -636,7 +567,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque,
narenas, bins, large, huge);
narenas, bins, huge);
}
}
}
@@ -662,8 +593,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
cbopaque, i, bins, large,
huge);
cbopaque, i, bins, huge);
}
}
}

View File

@@ -27,7 +27,7 @@ size_t
tcache_salloc(tsdn_t *tsdn, const void *ptr)
{
return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr, false));
return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr));
}
void
@@ -46,7 +46,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
tbin->ncached - tbin->low_water + (tbin->low_water
>> 2));
} else {
tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
tcache_bin_flush_huge(tsd, tbin, binind, tbin->ncached
- tbin->low_water + (tbin->low_water >> 2), tcache);
}
/*
@@ -170,7 +170,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
}
void
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache)
{
arena_t *arena;
@@ -200,9 +200,9 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
}
if (config_stats) {
merged_stats = true;
arena->stats.nrequests_large +=
arena->stats.nrequests_huge +=
tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
arena->stats.hstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
@@ -213,10 +213,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
assert(ptr != NULL);
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent_arena_get(extent) == locked_arena) {
arena_chunk_t *chunk =
(arena_chunk_t *)extent_base_get(extent);
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
locked_arena, chunk, extent, ptr);
huge_dalloc_junked_locked(tsd_tsdn(tsd),
extent);
} else {
/*
* This object was allocated via a different
@@ -240,8 +238,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
* arena, so the stats didn't get merged. Manually do so now.
*/
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
arena->stats.nrequests_huge += tbin->tstats.nrequests;
arena->stats.hstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
@@ -379,12 +377,12 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
tcache_bin_flush_huge(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
arena->stats.nrequests_huge += tbin->tstats.nrequests;
arena->stats.hstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
@@ -439,10 +437,10 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
}
for (; i < nhbins; i++) {
malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
malloc_huge_stats_t *hstats = &arena->stats.hstats[i - NBINS];
tcache_bin_t *tbin = &tcache->tbins[i];
arena->stats.nrequests_large += tbin->tstats.nrequests;
lstats->nrequests += tbin->tstats.nrequests;
arena->stats.nrequests_huge += tbin->tstats.nrequests;
hstats->nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
@@ -516,14 +514,9 @@ tcache_boot(tsdn_t *tsdn)
{
unsigned i;
/*
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known.
*/
/* If necessary, clamp opt_lg_tcache_max. */
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
else if ((1U << opt_lg_tcache_max) > large_maxclass)
tcache_maxclass = large_maxclass;
else
tcache_maxclass = (1U << opt_lg_tcache_max);
@@ -550,7 +543,7 @@ tcache_boot(tsdn_t *tsdn)
stack_nelms += tcache_bin_info[i].ncached_max;
}
for (; i < nhbins; i++) {
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_HUGE;
stack_nelms += tcache_bin_info[i].ncached_max;
}

View File

@@ -56,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
return (ivsalloc(tsdn_fetch(), ptr, config_prof));
return (ivsalloc(tsdn_fetch(), ptr));
}
static void *
@@ -87,7 +87,7 @@ static void
zone_free(malloc_zone_t *zone, void *ptr)
{
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
je_free(ptr);
return;
}
@@ -99,7 +99,7 @@ static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
if (ivsalloc(tsdn_fetch(), ptr) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
@@ -123,7 +123,7 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
size_t alloc_size;
alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
alloc_size = ivsalloc(tsdn_fetch(), ptr);
if (alloc_size != 0) {
assert(alloc_size == size);
je_free(ptr);