Enable per-tcache tcache_max

1. add tcache_max and nhbins into tcache_t so that they are per-tcache,
   with one auto tcache per thread, it's also per-thread;
2. add mallctl for each thread to set its own tcache_max (of its auto tcache);
3. store the maximum number of items in each bin instead of using a global storage;
4. add tests for the modifications above.
5. Rename `nhbins` and `tcache_maxclass` to `global_do_not_change_nhbins` and `global_do_not_change_tcache_maxclass`.
This commit is contained in:
guangli-dai 2023-08-06 11:38:30 -07:00 committed by Qi Wang
parent fbca96c433
commit a442d9b895
15 changed files with 528 additions and 222 deletions

View File

@ -198,11 +198,11 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
assert(sz_can_use_slab(size));
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
} else if (likely(size <= tcache_maxclass)) {
} else if (likely(size <= tcache_max_get(tcache))) {
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
}
/* (size > tcache_maxclass) case falls through. */
/* (size > tcache_max) case falls through. */
}
return arena_malloc_hard(tsdn, arena, size, ind, zero, slab);
@ -297,7 +297,8 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
JEMALLOC_ALWAYS_INLINE void
arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
bool slow_path) {
if (szind < nhbins) {
assert (!tsdn_null(tsdn) && tcache != NULL);
if (szind < tcache_nhbins_get(tcache)) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
} else {

View File

@ -125,6 +125,9 @@ struct cache_bin_s {
* array. Immutable after initialization.
*/
uint16_t low_bits_empty;
/* The maximum number of cached items in the bin. */
cache_bin_info_t bin_info;
};
/*

View File

@ -23,6 +23,7 @@ percpu_arena_update(tsd_t *tsd, unsigned cpu) {
tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) {
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
assert(tcache_slow->arena != NULL);
tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
tcache, newarena);
}

View File

@ -530,7 +530,7 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) {
/*
* Currently the fastpath only handles small sizes. The branch on
* SC_LOOKUP_MAXCLASS makes sure of it. This lets us avoid checking
* tcache szind upper limit (i.e. tcache_maxclass) as well.
* tcache szind upper limit (i.e. tcache_max) as well.
*/
assert(alloc_ctx.slab);

View File

@ -21,14 +21,19 @@ extern unsigned opt_lg_tcache_flush_large_div;
/*
* Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
* large-object bins.
* large-object bins. This is only used during threads initialization and
* changing it will not reflect on initialized threads as expected. Thus,
* it should not be changed on the fly. To change the number of tcache bins
* in use, refer to tcache_nhbins of each tcache.
*/
extern unsigned nhbins;
extern unsigned global_do_not_change_nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
extern cache_bin_info_t *tcache_bin_info;
/*
* Maximum cached size class. Same as above, this is only used during threads
* initialization and should not be changed. To change the maximum cached size
* class, refer to tcache_max of each tcache.
*/
extern size_t global_do_not_change_tcache_maxclass;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
@ -65,7 +70,7 @@ void tcache_prefork(tsdn_t *tsdn);
void tcache_postfork_parent(tsdn_t *tsdn);
void tcache_postfork_child(tsdn_t *tsdn);
void tcache_flush(tsd_t *tsd);
bool tsd_tcache_data_init(tsd_t *tsd);
bool tsd_tcache_data_init(tsd_t *tsd, arena_t *arena);
bool tsd_tcache_enabled_data_init(tsd_t *tsd);
void tcache_assert_initialized(tcache_t *tcache);

View File

@ -23,7 +23,7 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) {
bool was_enabled = tsd_tcache_enabled_get(tsd);
if (!was_enabled && enabled) {
tsd_tcache_data_init(tsd);
tsd_tcache_data_init(tsd, NULL);
} else if (was_enabled && !enabled) {
tcache_cleanup(tsd);
}
@ -32,13 +32,67 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) {
tsd_slow_update(tsd);
}
static inline unsigned
tcache_nhbins_get(tcache_t *tcache) {
assert(tcache != NULL);
assert(tcache->tcache_nhbins <= TCACHE_NBINS_MAX);
return tcache->tcache_nhbins;
}
static inline size_t
tcache_max_get(tcache_t *tcache) {
assert(tcache != NULL);
assert(tcache->tcache_max <= TCACHE_MAXCLASS_LIMIT);
return tcache->tcache_max;
}
static inline void
tcache_max_and_nhbins_set(tcache_t *tcache, size_t tcache_max) {
assert(tcache != NULL);
assert(tcache_max <= TCACHE_MAXCLASS_LIMIT);
tcache->tcache_max = tcache_max;
tcache->tcache_nhbins = sz_size2index(tcache_max) + 1;
}
static inline void
thread_tcache_max_and_nhbins_set(tsd_t *tsd, size_t tcache_max) {
assert(tcache_max <= TCACHE_MAXCLASS_LIMIT);
assert(tcache_max == sz_s2u(tcache_max));
tcache_t *tcache = tsd_tcachep_get(tsd);
tcache_slow_t *tcache_slow;
assert(tcache != NULL);
bool enabled = tcache_available(tsd);
arena_t *assigned_arena;
if (enabled) {
tcache_slow = tcache_slow_get(tsd);
assert(tcache != NULL && tcache_slow != NULL);
assigned_arena = tcache_slow->arena;
/* Shutdown and reboot the tcache for a clean slate. */
tcache_cleanup(tsd);
}
/*
* Still set tcache_max and tcache_nhbins of the tcache even if
* the tcache is not available yet because the values are
* stored in tsd_t and are always available for changing.
*/
tcache_max_and_nhbins_set(tcache, tcache_max);
if (enabled) {
tsd_tcache_data_init(tsd, assigned_arena);
}
assert(tcache_nhbins_get(tcache) == sz_size2index(tcache_max) + 1);
}
JEMALLOC_ALWAYS_INLINE bool
tcache_small_bin_disabled(szind_t ind, cache_bin_t *bin) {
assert(ind < SC_NBINS);
bool ret = (cache_bin_info_ncached_max(&tcache_bin_info[ind]) == 0);
if (ret && bin != NULL) {
assert(bin != NULL);
bool ret = cache_bin_info_ncached_max(&bin->bin_info) == 0;
if (ret) {
/* small size class but cache bin disabled. */
assert(ind >= nhbins);
assert((uintptr_t)(*bin->stack_head) ==
cache_bin_preceding_junk);
}
@ -46,6 +100,14 @@ tcache_small_bin_disabled(szind_t ind, cache_bin_t *bin) {
return ret;
}
JEMALLOC_ALWAYS_INLINE bool
tcache_large_bin_disabled(szind_t ind, cache_bin_t *bin) {
assert(ind >= SC_NBINS);
assert(bin != NULL);
return (cache_bin_info_ncached_max(&bin->bin_info) == 0 ||
cache_bin_still_zero_initialized(bin));
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t binind, bool zero, bool slow_path) {
@ -95,7 +157,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
void *ret;
bool tcache_success;
assert(binind >= SC_NBINS && binind < nhbins);
assert(binind >= SC_NBINS && binind < tcache_nhbins_get(tcache));
cache_bin_t *bin = &tcache->bins[binind];
ret = cache_bin_alloc(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
@ -118,7 +180,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
} else {
if (unlikely(zero)) {
size_t usize = sz_index2size(binind);
assert(usize <= tcache_maxclass);
assert(usize <= tcache_max_get(tcache));
memset(ret, 0, usize);
}
@ -157,7 +219,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
return;
}
cache_bin_sz_t max = cache_bin_info_ncached_max(
&tcache_bin_info[binind]);
&bin->bin_info);
unsigned remain = max >> opt_lg_tcache_flush_small_div;
tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
@ -169,14 +231,13 @@ JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
> SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_max_get(tcache));
cache_bin_t *bin = &tcache->bins[binind];
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_info_ncached_max(
&tcache_bin_info[binind]) >> opt_lg_tcache_flush_large_div;
&bin->bin_info) >> opt_lg_tcache_flush_large_div;
tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);

View File

@ -55,6 +55,8 @@ struct tcache_slow_s {
struct tcache_s {
tcache_slow_t *tcache_slow;
unsigned tcache_nhbins;
size_t tcache_max;
cache_bin_t bins[TCACHE_NBINS_MAX];
};

View File

@ -19,7 +19,7 @@ typedef struct tcaches_s tcaches_t;
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
#define TCACHE_LG_MAXCLASS_LIMIT 23 /* tcache_maxclass = 8M */
#define TCACHE_LG_MAXCLASS_LIMIT 23 /* tcache_max = 8M */
#define TCACHE_MAXCLASS_LIMIT ((size_t)1 << TCACHE_LG_MAXCLASS_LIMIT)
#define TCACHE_NBINS_MAX (SC_NBINS + SC_NGROUP * \
(TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) + 1)

View File

@ -157,11 +157,18 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
cache_bin_array_descriptor_t *descriptor;
ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
for (szind_t i = 0; i < nhbins; i++) {
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
cache_bin_t *cache_bin = &descriptor->bins[i];
cache_bin_sz_t ncached, nstashed;
cache_bin_nitems_get_remote(cache_bin,
&tcache_bin_info[i], &ncached, &nstashed);
&cache_bin->bin_info, &ncached, &nstashed);
if ((i < SC_NBINS &&
tcache_small_bin_disabled(i, cache_bin)) ||
(i >= SC_NBINS &&
tcache_large_bin_disabled(i, cache_bin))) {
assert(ncached == 0 && nstashed == 0);
}
astats->tcache_bytes += ncached * sz_index2size(i);
astats->tcache_stashed_bytes += nstashed *
@ -720,7 +727,8 @@ arena_dalloc_promoted_impl(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
safety_check_verify_redzone(ptr, usize, bumped_usize);
}
if (bumped_usize >= SC_LARGE_MINCLASS &&
bumped_usize <= tcache_maxclass && tcache != NULL) {
tcache != NULL &&
bumped_usize <= tcache_max_get(tcache)) {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
sz_size2index(bumped_usize), slow_path);
} else {

View File

@ -82,6 +82,7 @@ cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
bin->low_bits_full = (uint16_t)(uintptr_t)full_position;
bin->low_bits_empty = (uint16_t)(uintptr_t)empty_position;
cache_bin_info_init(&bin->bin_info, info->ncached_max);
cache_bin_sz_t free_spots = cache_bin_diff(bin,
bin->low_bits_full, (uint16_t)(uintptr_t)bin->stack_head);
assert(free_spots == bin_stack_size);

View File

@ -66,6 +66,7 @@ CTL_PROTO(epoch)
CTL_PROTO(background_thread)
CTL_PROTO(max_background_threads)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_max)
CTL_PROTO(thread_tcache_flush)
CTL_PROTO(thread_peak_read)
CTL_PROTO(thread_peak_reset)
@ -371,6 +372,7 @@ CTL_PROTO(stats_mutexes_reset)
static const ctl_named_node_t thread_tcache_node[] = {
{NAME("enabled"), CTL(thread_tcache_enabled)},
{NAME("max"), CTL(thread_tcache_max)},
{NAME("flush"), CTL(thread_tcache_flush)}
};
@ -2289,6 +2291,40 @@ label_return:
return ret;
}
static int
thread_tcache_max_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
size_t oldval;
/* pointer to tcache_t always exists even with tcache disabled. */
tcache_t *tcache = tsd_tcachep_get(tsd);
assert(tcache != NULL);
oldval = tcache_max_get(tcache);
READ(oldval, size_t);
if (newp != NULL) {
if (newlen != sizeof(size_t)) {
ret = EINVAL;
goto label_return;
}
size_t new_tcache_max = oldval;
WRITE(new_tcache_max, size_t);
if (new_tcache_max > TCACHE_MAXCLASS_LIMIT) {
new_tcache_max = TCACHE_MAXCLASS_LIMIT;
}
new_tcache_max = sz_s2u(new_tcache_max);
if(new_tcache_max != oldval) {
thread_tcache_max_and_nhbins_set(tsd, new_tcache_max);
}
}
ret = 0;
label_return:
return ret;
}
static int
thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
@ -3101,9 +3137,9 @@ arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_tcache_max, global_do_not_change_tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_nhbins, global_do_not_change_nhbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)

View File

@ -4136,16 +4136,14 @@ batch_alloc(void **ptrs, size_t num, size_t size, int flags) {
filled += n;
}
if (likely(ind < nhbins) && progress < batch) {
if (bin == NULL) {
unsigned tcache_ind = mallocx_tcache_get(flags);
tcache_t *tcache = tcache_get_from_ind(tsd,
tcache_ind, /* slow */ true,
/* is_alloc */ true);
if (tcache != NULL) {
tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind,
/* slow */ true, /* is_alloc */ true);
if (likely(tcache != NULL &&
ind < tcache_nhbins_get(tcache)) && progress < batch) {
if (bin == NULL) {
bin = &tcache->bins[ind];
}
}
/*
* If we don't have a tcache bin, we don't want to
* immediately give up, because there's the possibility

View File

@ -12,7 +12,7 @@
bool opt_tcache = true;
/* tcache_maxclass is set to 32KB by default. */
/* global_do_not_change_tcache_maxclass is set to 32KB by default. */
size_t opt_tcache_max = ((size_t)1) << 15;
/* Reasonable defaults for min and max values. */
@ -57,16 +57,18 @@ size_t opt_tcache_gc_delay_bytes = 0;
unsigned opt_lg_tcache_flush_small_div = 1;
unsigned opt_lg_tcache_flush_large_div = 1;
cache_bin_info_t *tcache_bin_info;
/* Total stack size required (per tcache). Include the padding above. */
static size_t tcache_bin_alloc_size;
static size_t tcache_bin_alloc_alignment;
/* Number of cache bins enabled, including both large and small. */
unsigned nhbins;
/* Max size class to be cached (can be small or large). */
size_t tcache_maxclass;
/*
* Number of cache bins enabled, including both large and small. This value
* is only used to initialize tcache_nhbins in the per-thread tcache.
* Directly modifying it will not affect threads already launched.
*/
unsigned global_do_not_change_nhbins;
/*
* Max size class to be cached (can be small or large). This value is only used
* to initialize tcache_max in the per-thread tcache. Directly modifying it
* will not affect threads already launched.
*/
size_t global_do_not_change_tcache_maxclass;
tcaches_t *tcaches;
@ -127,9 +129,9 @@ tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
cache_bin_t *cache_bin = &tcache->bins[szind];
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
&tcache_bin_info[szind]);
&cache_bin->bin_info);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
&tcache_bin_info[szind]);
&cache_bin->bin_info);
assert(!tcache_slow->bin_refilled[szind]);
size_t nflush = low_water - (low_water >> 2);
@ -152,7 +154,7 @@ tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
* Reduce fill count by 2X. Limit lg_fill_div such that
* the fill count is always at least 1.
*/
if ((cache_bin_info_ncached_max(&tcache_bin_info[szind])
if ((cache_bin_info_ncached_max(&cache_bin->bin_info)
>> (tcache_slow->lg_fill_div[szind] + 1)) >= 1) {
tcache_slow->lg_fill_div[szind]++;
}
@ -165,9 +167,9 @@ tcache_gc_large(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
assert(szind >= SC_NBINS);
cache_bin_t *cache_bin = &tcache->bins[szind];
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
&tcache_bin_info[szind]);
&cache_bin->bin_info);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
&tcache_bin_info[szind]);
&cache_bin->bin_info);
tcache_bin_flush_large(tsd, tcache, cache_bin, szind,
(unsigned)(ncached - low_water + (low_water >> 2)));
}
@ -187,7 +189,7 @@ tcache_event(tsd_t *tsd) {
tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, is_small);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
&tcache_bin_info[szind]);
&cache_bin->bin_info);
if (low_water > 0) {
if (is_small) {
tcache_gc_small(tsd, tcache_slow, tcache, szind);
@ -208,7 +210,7 @@ tcache_event(tsd_t *tsd) {
cache_bin_low_water_set(cache_bin);
tcache_slow->next_gc_bin++;
if (tcache_slow->next_gc_bin == nhbins) {
if (tcache_slow->next_gc_bin == tcache_nhbins_get(tcache)) {
tcache_slow->next_gc_bin = 0;
}
}
@ -233,10 +235,10 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena,
void *ret;
assert(tcache_slow->arena != NULL);
unsigned nfill = cache_bin_info_ncached_max(&tcache_bin_info[binind])
unsigned nfill = cache_bin_info_ncached_max(&cache_bin->bin_info)
>> tcache_slow->lg_fill_div[binind];
arena_cache_bin_fill_small(tsdn, arena, cache_bin,
&tcache_bin_info[binind], binind, nfill);
&cache_bin->bin_info, binind, nfill);
tcache_slow->bin_refilled[binind] = true;
ret = cache_bin_alloc(cache_bin, tcache_success);
@ -318,7 +320,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
if (small) {
assert(binind < SC_NBINS);
} else {
assert(binind < nhbins);
assert(binind < tcache_nhbins_get(tcache));
}
arena_t *tcache_arena = tcache_slow->arena;
assert(tcache_arena != NULL);
@ -508,18 +510,18 @@ tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
tcache_bin_flush_stashed(tsd, tcache, cache_bin, binind, small);
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
&tcache_bin_info[binind]);
&cache_bin->bin_info);
assert((cache_bin_sz_t)rem <= ncached);
unsigned nflush = ncached - rem;
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
cache_bin_init_ptr_array_for_flush(cache_bin, &tcache_bin_info[binind],
cache_bin_init_ptr_array_for_flush(cache_bin, &cache_bin->bin_info,
&ptrs, nflush);
tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nflush,
small);
cache_bin_finish_flush(cache_bin, &tcache_bin_info[binind], &ptrs,
cache_bin_finish_flush(cache_bin, &cache_bin->bin_info, &ptrs,
ncached - rem);
}
@ -548,7 +550,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
void
tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
szind_t binind, bool is_small) {
cache_bin_info_t *info = &tcache_bin_info[binind];
cache_bin_info_t *info = &cache_bin->bin_info;
/*
* The two below are for assertion only. The content of original cached
* items remain unchanged -- the stashed items reside on the other end
@ -633,15 +635,31 @@ tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_arena_associate(tsdn, tcache_slow, tcache, arena);
}
static void
tcache_max_and_nhbins_init(tcache_t *tcache) {
assert(tcache != NULL);
assert(global_do_not_change_tcache_maxclass != 0);
assert(global_do_not_change_nhbins != 0);
tcache->tcache_max = global_do_not_change_tcache_maxclass;
tcache->tcache_nhbins = global_do_not_change_nhbins;
assert(tcache->tcache_nhbins == sz_size2index(tcache->tcache_max) + 1);
}
bool
tsd_tcache_enabled_data_init(tsd_t *tsd) {
/* Called upon tsd initialization. */
tsd_tcache_enabled_set(tsd, opt_tcache);
/*
* tcache is not available yet, but we need to set up its tcache_max
* and tcache_nhbins in advance.
*/
tcache_t *tcache = tsd_tcachep_get(tsd);
tcache_max_and_nhbins_init(tcache);
tsd_slow_update(tsd);
if (opt_tcache) {
/* Trigger tcache init. */
tsd_tcache_data_init(tsd);
tsd_tcache_data_init(tsd, NULL);
}
return false;
@ -649,7 +667,7 @@ tsd_tcache_enabled_data_init(tsd_t *tsd) {
static void
tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
void *mem) {
void *mem, cache_bin_info_t *tcache_bin_info) {
tcache->tcache_slow = tcache_slow;
tcache_slow->tcache = tcache;
@ -660,17 +678,19 @@ tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
/*
* We reserve cache bins for all small size classes, even if some may
* not get used (i.e. bins higher than nhbins). This allows the fast
* and common paths to access cache bin metadata safely w/o worrying
* about which ones are disabled.
* not get used (i.e. bins higher than tcache_nhbins). This allows
* the fast and common paths to access cache bin metadata safely w/o
* worrying about which ones are disabled.
*/
unsigned n_reserved_bins = nhbins < SC_NBINS ? SC_NBINS : nhbins;
unsigned tcache_nhbins = tcache_nhbins_get(tcache);
unsigned n_reserved_bins = tcache_nhbins < SC_NBINS ? SC_NBINS
: tcache_nhbins;
memset(tcache->bins, 0, sizeof(cache_bin_t) * n_reserved_bins);
size_t cur_offset = 0;
cache_bin_preincrement(tcache_bin_info, nhbins, mem,
cache_bin_preincrement(tcache_bin_info, tcache_nhbins, mem,
&cur_offset);
for (unsigned i = 0; i < nhbins; i++) {
for (unsigned i = 0; i < tcache_nhbins; i++) {
if (i < SC_NBINS) {
tcache_slow->lg_fill_div[i] = 1;
tcache_slow->bin_refilled[i] = false;
@ -682,12 +702,12 @@ tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
&cur_offset);
}
/*
* For small size classes beyond tcache_maxclass (i.e. nhbins < NBINS),
* their cache bins are initialized to a state to safely and efficiently
* fail all fastpath alloc / free, so that no additional check around
* nhbins is needed on fastpath.
* For small size classes beyond tcache_max(i.e.
* tcache_nhbins< NBINS), their cache bins are initialized to a state
* to safely and efficiently fail all fastpath alloc / free, so that
* no additional check around tcache_nhbins is needed on fastpath.
*/
for (unsigned i = nhbins; i < SC_NBINS; i++) {
for (unsigned i = tcache_nhbins; i < SC_NBINS; i++) {
/* Disabled small bins. */
cache_bin_t *cache_bin = &tcache->bins[i];
void *fake_stack = mem;
@ -699,19 +719,102 @@ tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
}
cache_bin_postincrement(mem, &cur_offset);
if (config_debug) {
/* Sanity check that the whole stack is used. */
assert(cur_offset == tcache_bin_alloc_size);
size_t size, alignment;
cache_bin_info_compute_alloc(tcache_bin_info, tcache_nhbins,
&size, &alignment);
assert(cur_offset == size);
}
}
static inline unsigned
tcache_ncached_max_compute(szind_t szind, unsigned current_nhbins) {
if (szind >= SC_NBINS) {
assert(szind < current_nhbins);
return opt_tcache_nslots_large;
}
unsigned slab_nregs = bin_infos[szind].nregs;
/* We may modify these values; start with the opt versions. */
unsigned nslots_small_min = opt_tcache_nslots_small_min;
unsigned nslots_small_max = opt_tcache_nslots_small_max;
/*
* Clamp values to meet our constraints -- even, nonzero, min < max, and
* suitable for a cache bin size.
*/
if (opt_tcache_nslots_small_max > CACHE_BIN_NCACHED_MAX) {
nslots_small_max = CACHE_BIN_NCACHED_MAX;
}
if (nslots_small_min % 2 != 0) {
nslots_small_min++;
}
if (nslots_small_max % 2 != 0) {
nslots_small_max--;
}
if (nslots_small_min < 2) {
nslots_small_min = 2;
}
if (nslots_small_max < 2) {
nslots_small_max = 2;
}
if (nslots_small_min > nslots_small_max) {
nslots_small_min = nslots_small_max;
}
unsigned candidate;
if (opt_lg_tcache_nslots_mul < 0) {
candidate = slab_nregs >> (-opt_lg_tcache_nslots_mul);
} else {
candidate = slab_nregs << opt_lg_tcache_nslots_mul;
}
if (candidate % 2 != 0) {
/*
* We need the candidate size to be even -- we assume that we
* can divide by two and get a positive number (e.g. when
* flushing).
*/
++candidate;
}
if (candidate <= nslots_small_min) {
return nslots_small_min;
} else if (candidate <= nslots_small_max) {
return candidate;
} else {
return nslots_small_max;
}
}
static void
tcache_bin_info_compute(cache_bin_info_t *tcache_bin_info,
unsigned tcache_nhbins) {
for (szind_t i = 0; i < tcache_nhbins; i++) {
unsigned ncached_max = tcache_ncached_max_compute(i,
tcache_nhbins);
cache_bin_info_init(&tcache_bin_info[i], ncached_max);
}
for (szind_t i = tcache_nhbins; i < SC_NBINS; i++) {
/* Disabled small bins. */
cache_bin_info_init(&tcache_bin_info[i], 0);
}
}
/* Initialize auto tcache (embedded in TSD). */
bool
tsd_tcache_data_init(tsd_t *tsd) {
tsd_tcache_data_init(tsd_t *tsd, arena_t *arena) {
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get_unsafe(tsd);
tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
assert(cache_bin_still_zero_initialized(&tcache->bins[0]));
size_t alignment = tcache_bin_alloc_alignment;
size_t size = sz_sa2u(tcache_bin_alloc_size, alignment);
unsigned tcache_nhbins = tcache_nhbins_get(tcache);
size_t size, alignment;
/* Takes 146B stack space. */
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX] = {0};
tcache_bin_info_compute(tcache_bin_info, tcache_nhbins);
cache_bin_info_compute_alloc(tcache_bin_info, tcache_nhbins,
&size, &alignment);
size = sz_sa2u(size, alignment);
void *mem = ipallocztm(tsd_tsdn(tsd), size, alignment, true, NULL,
true, arena_get(TSDN_NULL, 0, true));
@ -719,7 +822,7 @@ tsd_tcache_data_init(tsd_t *tsd) {
return true;
}
tcache_init(tsd, tcache_slow, tcache, mem);
tcache_init(tsd, tcache_slow, tcache, mem, tcache_bin_info);
/*
* Initialization is a bit tricky here. After malloc init is done, all
* threads can rely on arena_choose and associate tcache accordingly.
@ -729,14 +832,15 @@ tsd_tcache_data_init(tsd_t *tsd) {
* arena_choose_hard() will re-associate properly.
*/
tcache_slow->arena = NULL;
arena_t *arena;
if (!malloc_initialized()) {
/* If in initialization, assign to a0. */
arena = arena_get(tsd_tsdn(tsd), 0, false);
tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
arena);
} else {
if (arena == NULL) {
arena = arena_choose(tsd, NULL);
}
/* This may happen if thread.tcache.enabled is used. */
if (tcache_slow->arena == NULL) {
tcache_arena_associate(tsd_tsdn(tsd), tcache_slow,
@ -756,21 +860,29 @@ tcache_create_explicit(tsd_t *tsd) {
* the beginning of the whole allocation (for freeing). The makes sure
* the cache bins have the requested alignment.
*/
size_t size = tcache_bin_alloc_size + sizeof(tcache_t)
unsigned tcache_nhbins = global_do_not_change_nhbins;
size_t tcache_size, alignment;
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX] = {0};
tcache_bin_info_compute(tcache_bin_info, tcache_nhbins);
cache_bin_info_compute_alloc(tcache_bin_info, tcache_nhbins,
&tcache_size, &alignment);
size_t size = tcache_size + sizeof(tcache_t)
+ sizeof(tcache_slow_t);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
size = sz_sa2u(size, tcache_bin_alloc_alignment);
size = sz_sa2u(size, alignment);
void *mem = ipallocztm(tsd_tsdn(tsd), size, tcache_bin_alloc_alignment,
void *mem = ipallocztm(tsd_tsdn(tsd), size, alignment,
true, NULL, true, arena_get(TSDN_NULL, 0, true));
if (mem == NULL) {
return NULL;
}
tcache_t *tcache = (void *)((byte_t *)mem + tcache_bin_alloc_size);
tcache_t *tcache = (void *)((byte_t *)mem + tcache_size);
tcache_slow_t *tcache_slow =
(void *)((byte_t *)mem + tcache_bin_alloc_size + sizeof(tcache_t));
tcache_init(tsd, tcache_slow, tcache, mem);
(void *)((byte_t *)mem + tcache_size + sizeof(tcache_t));
tcache_max_and_nhbins_init(tcache);
tcache_init(tsd, tcache_slow, tcache, mem, tcache_bin_info);
tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
arena_ichoose(tsd, NULL));
@ -783,7 +895,7 @@ tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
tcache_slow_t *tcache_slow = tcache->tcache_slow;
assert(tcache_slow->arena != NULL);
for (unsigned i = 0; i < nhbins; i++) {
for (unsigned i = 0; i < tcache_nhbins_get(tcache); i++) {
cache_bin_t *cache_bin = &tcache->bins[i];
if (i < SC_NBINS) {
tcache_bin_flush_small(tsd, tcache, cache_bin, i, 0);
@ -811,7 +923,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
if (tsd_tcache) {
cache_bin_t *cache_bin = &tcache->bins[0];
cache_bin_assert_empty(cache_bin, &tcache_bin_info[0]);
cache_bin_assert_empty(cache_bin, &cache_bin->bin_info);
}
idalloctm(tsd_tsdn(tsd), tcache_slow->dyn_alloc, NULL, NULL, true,
true);
@ -849,13 +961,9 @@ tcache_cleanup(tsd_t *tsd) {
assert(!cache_bin_still_zero_initialized(&tcache->bins[0]));
tcache_destroy(tsd, tcache, true);
if (config_debug) {
/*
* For debug testing only, we want to pretend we're still in the
* zero-initialized state.
*/
memset(tcache->bins, 0, sizeof(cache_bin_t) * nhbins);
}
/* Make sure all bins used are reinitialized to the clean state. */
memset(tcache->bins, 0, sizeof(cache_bin_t) *
tcache_nhbins_get(tcache));
}
void
@ -863,7 +971,7 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
cassert(config_stats);
/* Merge and reset tcache stats. */
for (unsigned i = 0; i < nhbins; i++) {
for (unsigned i = 0; i < tcache_nhbins_get(tcache); i++) {
cache_bin_t *cache_bin = &tcache->bins[i];
if (i < SC_NBINS) {
bin_t *bin = arena_bin_choose(tsdn, arena, i, NULL);
@ -986,97 +1094,18 @@ tcaches_destroy(tsd_t *tsd, unsigned ind) {
}
}
static unsigned
tcache_ncached_max_compute(szind_t szind) {
if (szind >= SC_NBINS) {
assert(szind < nhbins);
return opt_tcache_nslots_large;
}
unsigned slab_nregs = bin_infos[szind].nregs;
/* We may modify these values; start with the opt versions. */
unsigned nslots_small_min = opt_tcache_nslots_small_min;
unsigned nslots_small_max = opt_tcache_nslots_small_max;
/*
* Clamp values to meet our constraints -- even, nonzero, min < max, and
* suitable for a cache bin size.
*/
if (opt_tcache_nslots_small_max > CACHE_BIN_NCACHED_MAX) {
nslots_small_max = CACHE_BIN_NCACHED_MAX;
}
if (nslots_small_min % 2 != 0) {
nslots_small_min++;
}
if (nslots_small_max % 2 != 0) {
nslots_small_max--;
}
if (nslots_small_min < 2) {
nslots_small_min = 2;
}
if (nslots_small_max < 2) {
nslots_small_max = 2;
}
if (nslots_small_min > nslots_small_max) {
nslots_small_min = nslots_small_max;
}
unsigned candidate;
if (opt_lg_tcache_nslots_mul < 0) {
candidate = slab_nregs >> (-opt_lg_tcache_nslots_mul);
} else {
candidate = slab_nregs << opt_lg_tcache_nslots_mul;
}
if (candidate % 2 != 0) {
/*
* We need the candidate size to be even -- we assume that we
* can divide by two and get a positive number (e.g. when
* flushing).
*/
++candidate;
}
if (candidate <= nslots_small_min) {
return nslots_small_min;
} else if (candidate <= nslots_small_max) {
return candidate;
} else {
return nslots_small_max;
}
}
bool
tcache_boot(tsdn_t *tsdn, base_t *base) {
tcache_maxclass = sz_s2u(opt_tcache_max);
assert(tcache_maxclass <= TCACHE_MAXCLASS_LIMIT);
nhbins = sz_size2index(tcache_maxclass) + 1;
global_do_not_change_tcache_maxclass = sz_s2u(opt_tcache_max);
assert(global_do_not_change_tcache_maxclass <= TCACHE_MAXCLASS_LIMIT);
global_do_not_change_nhbins =
sz_size2index(global_do_not_change_tcache_maxclass) + 1;
if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
malloc_mutex_rank_exclusive)) {
return true;
}
/* Initialize tcache_bin_info. See comments in tcache_init(). */
unsigned n_reserved_bins = nhbins < SC_NBINS ? SC_NBINS : nhbins;
size_t size = n_reserved_bins * sizeof(cache_bin_info_t);
tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, base, size,
CACHELINE);
if (tcache_bin_info == NULL) {
return true;
}
for (szind_t i = 0; i < nhbins; i++) {
unsigned ncached_max = tcache_ncached_max_compute(i);
cache_bin_info_init(&tcache_bin_info[i], ncached_max);
}
for (szind_t i = nhbins; i < SC_NBINS; i++) {
/* Disabled small bins. */
cache_bin_info_init(&tcache_bin_info[i], 0);
assert(tcache_small_bin_disabled(i, NULL));
}
cache_bin_info_compute_alloc(tcache_bin_info, nhbins,
&tcache_bin_alloc_size, &tcache_bin_alloc_alignment);
return false;
}

View File

@ -168,7 +168,7 @@ TEST_BEGIN(test_batch_alloc_large) {
assert_zu_eq(filled, batch, "");
release_batch(global_ptrs, batch, size);
}
size = tcache_maxclass + 1;
size = global_do_not_change_tcache_maxclass + 1;
for (size_t batch = 0; batch < 4; ++batch) {
assert(batch < BATCH_MAX);
size_t filled = batch_alloc(global_ptrs, batch, size, 0);

View File

@ -18,11 +18,10 @@ enum {
dalloc_option_end
};
static unsigned alloc_option, dalloc_option;
static size_t tcache_max;
static bool global_test;
static void *
alloc_func(size_t sz) {
alloc_func(size_t sz, unsigned alloc_option) {
void *ret;
switch (alloc_option) {
@ -41,7 +40,7 @@ alloc_func(size_t sz) {
}
static void
dalloc_func(void *ptr, size_t sz) {
dalloc_func(void *ptr, size_t sz, unsigned dalloc_option) {
switch (dalloc_option) {
case use_free:
free(ptr);
@ -58,10 +57,10 @@ dalloc_func(void *ptr, size_t sz) {
}
static size_t
tcache_bytes_read(void) {
tcache_bytes_read_global(void) {
uint64_t epoch;
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
sizeof(epoch)), 0, "Unexpected mallctl() failure");
size_t tcache_bytes;
size_t sz = sizeof(tcache_bytes);
@ -72,16 +71,30 @@ tcache_bytes_read(void) {
return tcache_bytes;
}
static size_t
tcache_bytes_read_local(void) {
size_t tcache_bytes = 0;
tsd_t *tsd = tsd_fetch();
tcache_t *tcache = tcache_get(tsd);
for (szind_t i = 0; i < tcache_nhbins_get(tcache); i++) {
cache_bin_t *cache_bin = &tcache->bins[i];
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
&cache_bin->bin_info);
tcache_bytes += ncached * sz_index2size(i);
}
return tcache_bytes;
}
static void
tcache_bytes_check_update(size_t *prev, ssize_t diff) {
size_t tcache_bytes = tcache_bytes_read();
size_t tcache_bytes = global_test ? tcache_bytes_read_global():
tcache_bytes_read_local();
expect_zu_eq(tcache_bytes, *prev + diff, "tcache bytes not expected");
*prev += diff;
}
static void
test_tcache_bytes_alloc(size_t alloc_size) {
test_tcache_bytes_alloc(size_t alloc_size, size_t tcache_max,
unsigned alloc_option, unsigned dalloc_option) {
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), 0,
"Unexpected tcache flush failure");
@ -90,65 +103,82 @@ test_tcache_bytes_alloc(size_t alloc_size) {
bool cached = (usize <= tcache_max);
ssize_t diff = cached ? usize : 0;
void *ptr1 = alloc_func(alloc_size);
void *ptr2 = alloc_func(alloc_size);
void *ptr1 = alloc_func(alloc_size, alloc_option);
void *ptr2 = alloc_func(alloc_size, alloc_option);
size_t bytes = tcache_bytes_read();
dalloc_func(ptr2, alloc_size);
size_t bytes = global_test ? tcache_bytes_read_global() :
tcache_bytes_read_local();
dalloc_func(ptr2, alloc_size, dalloc_option);
/* Expect tcache_bytes increase after dalloc */
tcache_bytes_check_update(&bytes, diff);
dalloc_func(ptr1, alloc_size);
dalloc_func(ptr1, alloc_size, alloc_option);
/* Expect tcache_bytes increase again */
tcache_bytes_check_update(&bytes, diff);
void *ptr3 = alloc_func(alloc_size);
void *ptr3 = alloc_func(alloc_size, alloc_option);
if (cached) {
expect_ptr_eq(ptr1, ptr3, "Unexpected cached ptr");
}
/* Expect tcache_bytes decrease after alloc */
tcache_bytes_check_update(&bytes, -diff);
void *ptr4 = alloc_func(alloc_size);
void *ptr4 = alloc_func(alloc_size, alloc_option);
if (cached) {
expect_ptr_eq(ptr2, ptr4, "Unexpected cached ptr");
}
/* Expect tcache_bytes decrease again */
tcache_bytes_check_update(&bytes, -diff);
dalloc_func(ptr3, alloc_size);
dalloc_func(ptr3, alloc_size, dalloc_option);
tcache_bytes_check_update(&bytes, diff);
dalloc_func(ptr4, alloc_size);
dalloc_func(ptr4, alloc_size, dalloc_option);
tcache_bytes_check_update(&bytes, diff);
}
static void
test_tcache_max_impl(void) {
size_t sz;
test_tcache_max_impl(size_t target_tcache_max, unsigned alloc_option,
unsigned dalloc_option) {
size_t tcache_max, sz;
sz = sizeof(tcache_max);
if (global_test) {
assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
&sz, NULL, 0), 0, "Unexpected mallctl() failure");
expect_zu_eq(tcache_max, target_tcache_max,
"Global tcache_max not expected");
} else {
assert_d_eq(mallctl("thread.tcache.max",
(void *)&tcache_max, &sz, NULL,.0), 0,
"Unexpected.mallctl().failure");
expect_zu_eq(tcache_max, target_tcache_max,
"Current thread's tcache_max not expected");
}
test_tcache_bytes_alloc(1, tcache_max, alloc_option, dalloc_option);
test_tcache_bytes_alloc(tcache_max - 1, tcache_max, alloc_option,
dalloc_option);
test_tcache_bytes_alloc(tcache_max, tcache_max, alloc_option,
dalloc_option);
test_tcache_bytes_alloc(tcache_max + 1, tcache_max, alloc_option,
dalloc_option);
/* opt.tcache_max set to 1024 in tcache_max.sh */
expect_zu_eq(tcache_max, 1024, "tcache_max not expected");
test_tcache_bytes_alloc(1);
test_tcache_bytes_alloc(tcache_max - 1);
test_tcache_bytes_alloc(tcache_max);
test_tcache_bytes_alloc(tcache_max + 1);
test_tcache_bytes_alloc(PAGE - 1);
test_tcache_bytes_alloc(PAGE);
test_tcache_bytes_alloc(PAGE + 1);
test_tcache_bytes_alloc(PAGE - 1, tcache_max, alloc_option,
dalloc_option);
test_tcache_bytes_alloc(PAGE, tcache_max, alloc_option,
dalloc_option);
test_tcache_bytes_alloc(PAGE + 1, tcache_max, alloc_option,
dalloc_option);
size_t large;
sz = sizeof(large);
assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
test_tcache_bytes_alloc(large - 1);
test_tcache_bytes_alloc(large);
test_tcache_bytes_alloc(large + 1);
test_tcache_bytes_alloc(large - 1, tcache_max, alloc_option,
dalloc_option);
test_tcache_bytes_alloc(large, tcache_max, alloc_option,
dalloc_option);
test_tcache_bytes_alloc(large + 1, tcache_max, alloc_option,
dalloc_option);
}
TEST_BEGIN(test_tcache_max) {
@ -157,26 +187,157 @@ TEST_BEGIN(test_tcache_max) {
test_skip_if(opt_prof);
test_skip_if(san_uaf_detection_enabled());
unsigned arena_ind;
unsigned arena_ind, alloc_option, dalloc_option;
size_t sz = sizeof(arena_ind);
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena_ind,
sizeof(arena_ind)), 0, "Unexpected mallctl() failure");
global_test = true;
for (alloc_option = alloc_option_start;
alloc_option < alloc_option_end;
alloc_option++) {
for (dalloc_option = dalloc_option_start;
dalloc_option < dalloc_option_end;
dalloc_option++) {
test_tcache_max_impl();
/* opt.tcache_max set to 1024 in tcache_max.sh. */
test_tcache_max_impl(1024, alloc_option,
dalloc_option);
}
}
global_test = false;
}
TEST_END
static size_t
tcache_max2nhbins(size_t tcache_max) {
return sz_size2index(tcache_max) + 1;
}
static void *
tcache_check(void *arg) {
size_t old_tcache_max, new_tcache_max, min_tcache_max, sz;
unsigned tcache_nhbins;
tsd_t *tsd = tsd_fetch();
tcache_t *tcache = tsd_tcachep_get(tsd);
sz = sizeof(size_t);
new_tcache_max = *(size_t *)arg;
min_tcache_max = 1;
/*
* Check the default tcache_max and tcache_nhbins of each thread's
* auto tcache.
*/
old_tcache_max = tcache_max_get(tcache);
expect_zu_eq(old_tcache_max, opt_tcache_max,
"Unexpected default value for tcache_max");
tcache_nhbins = tcache_nhbins_get(tcache);
expect_zu_eq(tcache_nhbins, (size_t)global_do_not_change_nhbins,
"Unexpected default value for tcache_nhbins");
/*
* Close the tcache and test the set.
* Test an input that is not a valid size class, it should be ceiled
* to a valid size class.
*/
bool e0 = false, e1;
size_t bool_sz = sizeof(bool);
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz,
(void *)&e0, bool_sz), 0, "Unexpected mallctl() error");
expect_true(e1, "Unexpected previous tcache state");
size_t temp_tcache_max = TCACHE_MAXCLASS_LIMIT - 1;
assert_d_eq(mallctl("thread.tcache.max",
NULL, NULL, (void *)&temp_tcache_max, sz),.0,
"Unexpected.mallctl().failure");
old_tcache_max = tcache_max_get(tcache);
expect_zu_eq(old_tcache_max, TCACHE_MAXCLASS_LIMIT,
"Unexpected value for tcache_max");
tcache_nhbins = tcache_nhbins_get(tcache);
expect_zu_eq(tcache_nhbins, TCACHE_NBINS_MAX,
"Unexpected value for tcache_nhbins");
assert_d_eq(mallctl("thread.tcache.max",
(void *)&old_tcache_max, &sz,
(void *)&min_tcache_max, sz),.0,
"Unexpected.mallctl().failure");
expect_zu_eq(old_tcache_max, TCACHE_MAXCLASS_LIMIT,
"Unexpected value for tcache_max");
/* Enable tcache, the set should still be valid. */
e0 = true;
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz,
(void *)&e0, bool_sz), 0, "Unexpected mallctl() error");
expect_false(e1, "Unexpected previous tcache state");
min_tcache_max = sz_s2u(min_tcache_max);
expect_zu_eq(tcache_max_get(tcache), min_tcache_max,
"Unexpected value for tcache_max");
expect_zu_eq(tcache_nhbins_get(tcache),
tcache_max2nhbins(min_tcache_max), "Unexpected value for nhbins");
assert_d_eq(mallctl("thread.tcache.max",
(void *)&old_tcache_max, &sz,
(void *)&new_tcache_max, sz),.0,
"Unexpected.mallctl().failure");
expect_zu_eq(old_tcache_max, min_tcache_max,
"Unexpected value for tcache_max");
/*
* Check the thread's tcache_max and nhbins both through mallctl
* and alloc tests.
*/
if (new_tcache_max > TCACHE_MAXCLASS_LIMIT) {
new_tcache_max = TCACHE_MAXCLASS_LIMIT;
}
old_tcache_max = tcache_max_get(tcache);
expect_zu_eq(old_tcache_max, new_tcache_max,
"Unexpected value for tcache_max");
tcache_nhbins = tcache_nhbins_get(tcache);
expect_zu_eq(tcache_nhbins, tcache_max2nhbins(new_tcache_max),
"Unexpected value for tcache_nhbins");
for (unsigned alloc_option = alloc_option_start;
alloc_option < alloc_option_end;
alloc_option++) {
for (unsigned dalloc_option = dalloc_option_start;
dalloc_option < dalloc_option_end;
dalloc_option++) {
test_tcache_max_impl(new_tcache_max,
alloc_option, dalloc_option);
}
}
return NULL;
}
TEST_BEGIN(test_thread_tcache_max) {
test_skip_if(!config_stats);
test_skip_if(!opt_tcache);
test_skip_if(opt_prof);
test_skip_if(san_uaf_detection_enabled());
unsigned nthreads = 8;
global_test = false;
VARIABLE_ARRAY(thd_t, threads, nthreads);
VARIABLE_ARRAY(size_t, all_threads_tcache_max, nthreads);
for (unsigned i = 0; i < nthreads; i++) {
all_threads_tcache_max[i] = 1024 * (1<<((i + 10) % 20));
if (i == nthreads - 1) {
all_threads_tcache_max[i] = UINT_MAX;
}
}
for (unsigned i = 0; i < nthreads; i++) {
thd_create(&threads[i], tcache_check,
&(all_threads_tcache_max[i]));
}
for (unsigned i = 0; i < nthreads; i++) {
thd_join(threads[i], NULL);
}
}
TEST_END
int
main(void) {
return test(test_tcache_max);
return test(
test_tcache_max,
test_thread_tcache_max);
}