Compare commits

..

10 Commits

Author SHA1 Message Date
Shirui Cheng
e4817c8d89 Cleanup cache_bin_info_t* info input args 2023-10-25 10:27:31 -07:00
Qi Wang
3025b021b9 Optimize mutex and bin alignment / locality. 2023-10-23 20:28:26 -07:00
guangli-dai
e2cd27132a Change stack_size assertion back to the more compatabile one. 2023-10-23 20:28:26 -07:00
guangli-dai
756d4df2fd Add util.c into vs project file. 2023-10-18 22:11:13 -07:00
Qi Wang
04d1a87b78 Fix a zero-initializer warning on macOS. 2023-10-18 14:12:43 -07:00
guangli-dai
d88fa71bbd Fix nfill = 0 bug when ncached_max is 1 2023-10-18 14:11:46 -07:00
guangli-dai
6fb3b6a8e4 Refactor the tcache initiailization
1. Pre-generate all default tcache ncached_max in tcache_boot;
2. Add getters returning default ncached_max and ncached_max_set;
3. Refactor tcache init so that it is always init with a given setting.
2023-10-18 14:11:46 -07:00
guangli-dai
8a22d10b83 Allow setting default ncached_max for each bin through malloc_conf 2023-10-18 14:11:46 -07:00
guangli-dai
867eedfc58 Fix the bug in dalloc promoted allocations.
An allocation small enough will be promoted so that it does not
share an extent with others.  However, when dalloc, such allocations
may not be dalloc as a promoted one if nbins < SC_NBINS.  This
commit fixes the bug.
2023-10-17 14:53:23 -07:00
guangli-dai
630f7de952 Add mallctl to set and get ncached_max of each cache_bin.
1. `thread_tcache_ncached_max_read_sizeclass` allows users to get the
    ncached_max of the bin with the input sizeclass, passed in through
    oldp (will be upper casted if not an exact bin size is given).
2. `thread_tcache_ncached_max_write` takes in a char array
    representing the settings for bins in the tcache.
2023-10-17 14:53:23 -07:00
29 changed files with 828 additions and 329 deletions

View File

@ -155,6 +155,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/thread_event.c \
$(srcroot)src/ticker.c \
$(srcroot)src/tsd.c \
$(srcroot)src/util.c \
$(srcroot)src/witness.c
ifeq ($(enable_zone_allocator), 1)
C_SRCS += $(srcroot)src/zone.c
@ -241,6 +242,7 @@ TESTS_UNIT := \
$(srcroot)test/unit/mq.c \
$(srcroot)test/unit/mtx.c \
$(srcroot)test/unit/nstime.c \
$(srcroot)test/unit/ncached_max.c \
$(srcroot)test/unit/oversize_threshold.c \
$(srcroot)test/unit/pa.c \
$(srcroot)test/unit/pack.c \

View File

@ -63,8 +63,7 @@ void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
const unsigned nfill);
cache_bin_t *cache_bin, szind_t binind, const unsigned nfill);
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero, bool slab);

View File

@ -198,7 +198,8 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
assert(sz_can_use_slab(size));
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
} else if (likely(ind < TCACHE_NBINS_MAX &&
} else if (likely(
ind < tcache_nbins_get(tcache->tcache_slow) &&
!tcache_bin_disabled(ind, &tcache->bins[ind],
tcache->tcache_slow))) {
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
@ -300,18 +301,18 @@ JEMALLOC_ALWAYS_INLINE void
arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
bool slow_path) {
assert (!tsdn_null(tsdn) && tcache != NULL);
if (szind < TCACHE_NBINS_MAX &&
!tcache_bin_disabled(szind, &tcache->bins[szind],
tcache->tcache_slow)) {
if (config_prof && unlikely(szind < SC_NBINS)) {
bool is_sample_promoted = config_prof && szind < SC_NBINS;
if (unlikely(is_sample_promoted)) {
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
} else {
if (szind < tcache_nbins_get(tcache->tcache_slow) &&
!tcache_bin_disabled(szind, &tcache->bins[szind],
tcache->tcache_slow)) {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
}
} else {
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
edata_t *edata = emap_edata_lookup(tsdn,
&arena_emap_global, ptr);
if (large_dalloc_safety_checks(edata, ptr, szind)) {
/* See the comment in isfree. */
return;
@ -319,6 +320,7 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
large_dalloc(tsdn, edata);
}
}
}
/* Find the region index of a pointer. */
JEMALLOC_ALWAYS_INLINE size_t

View File

@ -98,9 +98,12 @@ struct arena_s {
/*
* The arena is allocated alongside its bins; really this is a
* dynamically sized array determined by the binshard settings.
* Enforcing cacheline-alignment to minimize the number of cachelines
* touched on the hot paths.
*/
JEMALLOC_WARN_ON_USAGE("Do not use this field directly. "
"Use `arena_get_bin` instead.")
JEMALLOC_ALIGNED(CACHELINE)
bin_t all_bins[0];
};

View File

@ -202,12 +202,17 @@ cache_bin_disabled(cache_bin_t *bin) {
return disabled;
}
/* Gets ncached_max without asserting that the bin is enabled. */
static inline cache_bin_sz_t
cache_bin_ncached_max_get_unsafe(cache_bin_t *bin) {
return bin->bin_info.ncached_max;
}
/* Returns ncached_max: Upper limit on ncached. */
static inline cache_bin_sz_t
cache_bin_info_ncached_max_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_ncached_max_get(cache_bin_t *bin) {
assert(!cache_bin_disabled(bin));
assert(info == &bin->bin_info);
return info->ncached_max;
return cache_bin_ncached_max_get_unsafe(bin);
}
/*
@ -229,7 +234,7 @@ cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
* Does difference calculations that handle wraparound correctly. Earlier must
* be associated with the position earlier in memory.
*/
static inline uint16_t
static inline cache_bin_sz_t
cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
cache_bin_assert_earlier(bin, earlier, later);
return later - earlier;
@ -262,9 +267,9 @@ cache_bin_ncached_get_internal(cache_bin_t *bin) {
* possible.
*/
static inline cache_bin_sz_t
cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_ncached_get_local(cache_bin_t *bin) {
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin);
assert(n <= cache_bin_info_ncached_max_get(bin, info));
assert(n <= cache_bin_ncached_max_get(bin));
return n;
}
@ -299,9 +304,9 @@ cache_bin_empty_position_get(cache_bin_t *bin) {
* arena statistics collection.
*/
static inline uint16_t
cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_low_bits_low_bound_get(cache_bin_t *bin) {
return (uint16_t)bin->low_bits_empty -
cache_bin_info_ncached_max_get(bin, info) * sizeof(void *);
cache_bin_ncached_max_get(bin) * sizeof(void *);
}
/*
@ -310,8 +315,8 @@ cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
* A pointer to the position with the lowest address of the backing array.
*/
static inline void **
cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
cache_bin_low_bound_get(cache_bin_t *bin) {
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get(bin);
void **ret = cache_bin_empty_position_get(bin) - ncached_max;
assert(ret <= bin->stack_head);
@ -323,8 +328,8 @@ cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
* batch fill a nonempty cache bin.
*/
static inline void
cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) {
assert(cache_bin_ncached_get_local(bin, info) == 0);
cache_bin_assert_empty(cache_bin_t *bin) {
assert(cache_bin_ncached_get_local(bin) == 0);
assert(cache_bin_empty_position_get(bin) == bin->stack_head);
}
@ -341,10 +346,10 @@ cache_bin_low_water_get_internal(cache_bin_t *bin) {
/* Returns the numeric value of low water in [0, ncached]. */
static inline cache_bin_sz_t
cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_low_water_get(cache_bin_t *bin) {
cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin);
assert(low_water <= cache_bin_info_ncached_max_get(bin, info));
assert(low_water <= cache_bin_ncached_get_local(bin, info));
assert(low_water <= cache_bin_ncached_max_get(bin));
assert(low_water <= cache_bin_ncached_get_local(bin));
cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head,
bin->low_bits_low_water);
@ -525,17 +530,16 @@ cache_bin_stash(cache_bin_t *bin, void *ptr) {
/* Get the number of stashed pointers. */
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
info);
cache_bin_nstashed_get_internal(cache_bin_t *bin) {
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get(bin);
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin);
cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound,
bin->low_bits_full) / sizeof(void *);
assert(n <= ncached_max);
if (config_debug && n != 0) {
/* Below are for assertions only. */
void **low_bound = cache_bin_low_bound_get(bin, info);
void **low_bound = cache_bin_low_bound_get(bin);
assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound);
void *stashed = *(low_bound + n - 1);
@ -551,9 +555,9 @@ cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info) {
}
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info);
assert(n <= cache_bin_info_ncached_max_get(bin, info));
cache_bin_nstashed_get_local(cache_bin_t *bin) {
cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin);
assert(n <= cache_bin_ncached_max_get(bin));
return n;
}
@ -574,29 +578,26 @@ cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
* This function should not call other utility functions because the racy
* condition may cause unexpected / undefined behaviors in unverified utility
* functions. Currently, this function calls two utility functions
* cache_bin_info_ncached_max_get and cache_bin_low_bits_low_bound_get because
* cache_bin_ncached_max_get and cache_bin_low_bits_low_bound_get because
* they help access values that will not be concurrently modified.
*/
static inline void
cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_sz_t *ncached,
cache_bin_sz_t *nstashed) {
/* Racy version of cache_bin_ncached_get_internal. */
cache_bin_sz_t diff = bin->low_bits_empty -
(uint16_t)(uintptr_t)bin->stack_head;
cache_bin_sz_t n = diff / sizeof(void *);
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
assert(n <= ncached_max);
*ncached = n;
/* Racy version of cache_bin_nstashed_get_internal. */
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
info);
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin);
n = (bin->low_bits_full - low_bits_low_bound) / sizeof(void *);
assert(n <= ncached_max);
*nstashed = n;
/* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
/*
* Note that cannot assert anything regarding ncached_max because
* it can be configured on the fly and is thus racy.
*/
}
/*
@ -640,9 +641,9 @@ struct cache_bin_ptr_array_s {
* finish_fill call before doing any alloc/dalloc operations on the bin.
*/
static inline void
cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
cache_bin_assert_empty(bin, info);
cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nfill) {
cache_bin_assert_empty(bin);
arr->ptr = cache_bin_empty_position_get(bin) - nfill;
}
@ -652,9 +653,9 @@ cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
* case of OOM.
*/
static inline void
cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) {
cache_bin_assert_empty(bin, info);
cache_bin_finish_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nfilled) {
cache_bin_assert_empty(bin);
void **empty_position = cache_bin_empty_position_get(bin);
if (nfilled < arr->n) {
memmove(empty_position - nfilled, empty_position - arr->n,
@ -668,17 +669,17 @@ cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
* everything we give them.
*/
static inline void
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
arr->ptr = cache_bin_empty_position_get(bin) - nflush;
assert(cache_bin_ncached_get_local(bin, info) == 0
assert(cache_bin_ncached_get_local(bin) == 0
|| *arr->ptr != NULL);
}
static inline void
cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {
unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed;
cache_bin_finish_flush(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nflushed) {
unsigned rem = cache_bin_ncached_get_local(bin) - nflushed;
memmove(bin->stack_head + nflushed, bin->stack_head,
rem * sizeof(void *));
bin->stack_head += nflushed;
@ -687,23 +688,22 @@ cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
static inline void
cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nstashed) {
cache_bin_ptr_array_t *arr, cache_bin_sz_t nstashed) {
assert(nstashed > 0);
assert(cache_bin_nstashed_get_local(bin, info) == nstashed);
assert(cache_bin_nstashed_get_local(bin) == nstashed);
void **low_bound = cache_bin_low_bound_get(bin, info);
void **low_bound = cache_bin_low_bound_get(bin);
arr->ptr = low_bound;
assert(*arr->ptr != NULL);
}
static inline void
cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
void **low_bound = cache_bin_low_bound_get(bin, info);
cache_bin_finish_flush_stashed(cache_bin_t *bin) {
void **low_bound = cache_bin_low_bound_get(bin);
/* Reset the bin local full position. */
bin->low_bits_full = (uint16_t)(uintptr_t)low_bound;
assert(cache_bin_nstashed_get_local(bin, info) == 0);
assert(cache_bin_nstashed_get_local(bin) == 0);
}
/*
@ -716,8 +716,8 @@ void cache_bin_info_init(cache_bin_info_t *bin_info,
* Given an array of initialized cache_bin_info_ts, determine how big an
* allocation is required to initialize a full set of cache_bin_ts.
*/
void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
size_t *size, size_t *alignment);
void cache_bin_info_compute_alloc(const cache_bin_info_t *infos,
szind_t ninfos, size_t *size, size_t *alignment);
/*
* Actually initialize some cache bins. Callers should allocate the backing
@ -726,11 +726,11 @@ void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
* cache_bin_postincrement. *alloc_cur will then point immediately past the end
* of the allocation.
*/
void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos,
void cache_bin_preincrement(const cache_bin_info_t *infos, szind_t ninfos,
void *alloc, size_t *cur_offset);
void cache_bin_postincrement(void *alloc, size_t *cur_offset);
void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
size_t *cur_offset);
void cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info,
void *alloc, size_t *cur_offset);
void cache_bin_init_disabled(cache_bin_t *bin, cache_bin_sz_t ncached_max);
bool cache_bin_stack_use_thp(void);

View File

@ -14,6 +14,7 @@
/* Maximum ctl tree depth. */
#define CTL_MAX_DEPTH 7
#define CTL_MULTI_SETTING_MAX_LEN 1000
typedef struct ctl_node_s {
bool named;

View File

@ -37,8 +37,10 @@
/* Various function pointers are static and immutable except during testing. */
#ifdef JEMALLOC_JET
# define JET_MUTABLE
# define JET_EXTERN extern
#else
# define JET_MUTABLE const
# define JET_EXTERN static
#endif
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head

View File

@ -32,6 +32,12 @@ struct malloc_mutex_s {
* unlocking thread).
*/
mutex_prof_data_t prof_data;
/*
* Hint flag to avoid exclusive cache line contention
* during spin waiting. Placed along with prof_data
* since it's always modified even with no contention.
*/
atomic_b_t locked;
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
@ -46,11 +52,6 @@ struct malloc_mutex_s {
#else
pthread_mutex_t lock;
#endif
/*
* Hint flag to avoid exclusive cache line contention
* during spin waiting
*/
atomic_b_t locked;
};
/*
* We only touch witness when configured w/ debug. However we
@ -99,21 +100,21 @@ struct malloc_mutex_s {
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# if defined(JEMALLOC_DEBUG)
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
# else
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
# endif
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# if (defined(JEMALLOC_DEBUG))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
# else
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
# endif
@ -121,11 +122,11 @@ struct malloc_mutex_s {
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# if defined(JEMALLOC_DEBUG)
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
# else
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
# endif
#endif

View File

@ -26,7 +26,7 @@ extern unsigned opt_lg_tcache_flush_large_div;
* it should not be changed on the fly. To change the number of tcache bins
* in use, refer to tcache_nbins of each tcache.
*/
extern unsigned global_do_not_change_nbins;
extern unsigned global_do_not_change_tcache_nbins;
/*
* Maximum cached size class. Same as above, this is only used during threads
@ -55,6 +55,11 @@ void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, unsigned rem);
void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, bool is_small);
bool tcache_bin_info_default_init(const char *bin_settings_segment_cur,
size_t len_left);
bool tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len);
bool tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size,
cache_bin_sz_t *ncached_max);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena);
tcache_t *tcache_create_explicit(tsd_t *tsd);

View File

@ -46,7 +46,7 @@ tcache_bin_settings_backup(tcache_t *tcache,
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
for (unsigned i = 0; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i],
tcache->bins[i].bin_info.ncached_max);
cache_bin_ncached_max_get_unsafe(&tcache->bins[i]));
}
}
@ -54,6 +54,7 @@ JEMALLOC_ALWAYS_INLINE bool
tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
tcache_slow_t *tcache_slow) {
assert(bin != NULL);
assert(ind < TCACHE_NBINS_MAX);
bool disabled = cache_bin_disabled(bin);
/*
@ -66,7 +67,7 @@ tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
* ind < nbins and ncached_max > 0.
*/
unsigned nbins = tcache_nbins_get(tcache_slow);
cache_bin_sz_t ncached_max = bin->bin_info.ncached_max;
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get_unsafe(bin);
if (ind >= nbins) {
assert(disabled);
} else {
@ -199,8 +200,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
arena_dalloc_small(tsd_tsdn(tsd), ptr);
return;
}
cache_bin_sz_t max = cache_bin_info_ncached_max_get(
bin, &bin->bin_info);
cache_bin_sz_t max = cache_bin_ncached_max_get(bin);
unsigned remain = max >> opt_lg_tcache_flush_small_div;
tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
@ -215,11 +215,13 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <=
tcache_max_get(tcache->tcache_slow));
assert(!tcache_bin_disabled(binind, &tcache->bins[binind],
tcache->tcache_slow));
cache_bin_t *bin = &tcache->bins[binind];
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_info_ncached_max_get(
bin, &bin->bin_info) >> opt_lg_tcache_flush_large_div;
unsigned remain = cache_bin_ncached_max_get(bin) >>
opt_lg_tcache_flush_large_div;
tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);

View File

@ -10,7 +10,7 @@ typedef struct tcaches_s tcaches_t;
/* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
#define TCACHE_SLOW_ZERO_INITIALIZER {0}
#define TCACHE_SLOW_ZERO_INITIALIZER {{0}}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false

View File

@ -130,4 +130,12 @@ util_prefetch_write_range(void *ptr, size_t sz) {
#undef UTIL_INLINE
/*
* Reads the settings in the following format:
* key1-key2:value|key3-key4:value|...
* Note it does not handle the ending '\0'.
*/
bool
multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left,
size_t *key_start, size_t *key_end, size_t *value);
#endif /* JEMALLOC_INTERNAL_UTIL_H */

View File

@ -96,6 +96,7 @@
<ClCompile Include="..\..\..\..\src\thread_event.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\util.c" />
<ClCompile Include="..\..\..\..\src\witness.c" />
</ItemGroup>
<PropertyGroup Label="Globals">

View File

@ -166,6 +166,9 @@
<ClCompile Include="..\..\..\..\src\tsd.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\util.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter>
</ClCompile>

View File

@ -96,6 +96,7 @@
<ClCompile Include="..\..\..\..\src\thread_event.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\util.c" />
<ClCompile Include="..\..\..\..\src\witness.c" />
</ItemGroup>
<PropertyGroup Label="Globals">

View File

@ -166,6 +166,9 @@
<ClCompile Include="..\..\..\..\src\tsd.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\util.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter>
</ClCompile>

View File

@ -96,6 +96,7 @@
<ClCompile Include="..\..\..\..\src\thread_event.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\util.c" />
<ClCompile Include="..\..\..\..\src\witness.c" />
</ItemGroup>
<PropertyGroup Label="Globals">

View File

@ -166,6 +166,9 @@
<ClCompile Include="..\..\..\..\src\tsd.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\util.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter>
</ClCompile>

View File

@ -96,6 +96,7 @@
<ClCompile Include="..\..\..\..\src\thread_event.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\util.c" />
<ClCompile Include="..\..\..\..\src\witness.c" />
</ItemGroup>
<PropertyGroup Label="Globals">

View File

@ -166,6 +166,9 @@
<ClCompile Include="..\..\..\..\src\tsd.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\util.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter>
</ClCompile>

View File

@ -168,8 +168,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
}
cache_bin_sz_t ncached, nstashed;
cache_bin_nitems_get_remote(cache_bin,
&cache_bin->bin_info, &ncached, &nstashed);
cache_bin_nitems_get_remote(cache_bin, &ncached, &nstashed);
astats->tcache_bytes += ncached * sz_index2size(i);
astats->tcache_stashed_bytes += nstashed *
sz_index2size(i);
@ -1020,15 +1019,14 @@ arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
void
arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
const unsigned nfill) {
assert(cache_bin_ncached_get_local(cache_bin, cache_bin_info) == 0);
cache_bin_t *cache_bin, szind_t binind, const unsigned nfill) {
assert(cache_bin_ncached_get_local(cache_bin) == 0);
assert(nfill != 0);
const bin_info_t *bin_info = &bin_infos[binind];
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
cache_bin_init_ptr_array_for_fill(cache_bin, cache_bin_info, &ptrs,
nfill);
cache_bin_init_ptr_array_for_fill(cache_bin, &ptrs, nfill);
/*
* Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
* slabs. After both are exhausted, new slabs will be allocated through
@ -1142,7 +1140,7 @@ label_refill:
fresh_slab = NULL;
}
cache_bin_finish_fill(cache_bin, cache_bin_info, &ptrs, filled);
cache_bin_finish_fill(cache_bin, &ptrs, filled);
arena_decay_tick(tsdn, arena);
}
@ -1665,11 +1663,16 @@ arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
}
}
size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
size_t arena_size = ALIGNMENT_CEILING(sizeof(arena_t), CACHELINE) +
sizeof(bin_t) * nbins_total;
arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
if (arena == NULL) {
goto label_error;
}
JEMALLOC_SUPPRESS_WARN_ON_USAGE(
assert((uintptr_t)&arena->all_bins[nbins_total -1] + sizeof(bin_t) <=
(uintptr_t)arena + arena_size);
)
atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);

View File

@ -10,6 +10,7 @@ const uintptr_t disabled_bin = JUNK_ADDR;
void
cache_bin_info_init(cache_bin_info_t *info,
cache_bin_sz_t ncached_max) {
assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
size_t stack_size = (size_t)ncached_max * sizeof(void *);
assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8)));
info->ncached_max = (cache_bin_sz_t)ncached_max;
@ -27,7 +28,7 @@ cache_bin_stack_use_thp(void) {
}
void
cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
cache_bin_info_compute_alloc(const cache_bin_info_t *infos, szind_t ninfos,
size_t *size, size_t *alignment) {
/* For the total bin stack region (per tcache), reserve 2 more slots so
* that
@ -50,7 +51,7 @@ cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
}
void
cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
cache_bin_preincrement(const cache_bin_info_t *infos, szind_t ninfos, void *alloc,
size_t *cur_offset) {
if (config_debug) {
size_t computed_size;
@ -75,7 +76,7 @@ cache_bin_postincrement(void *alloc, size_t *cur_offset) {
}
void
cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc,
size_t *cur_offset) {
/*
* The full_position points to the lowest available space. Allocations
@ -99,7 +100,7 @@ cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
bin->low_bits_full, (uint16_t)(uintptr_t)bin->stack_head);
assert(free_spots == bin_stack_size);
if (!cache_bin_disabled(bin)) {
assert(cache_bin_ncached_get_local(bin, &bin->bin_info) == 0);
assert(cache_bin_ncached_get_local(bin) == 0);
}
assert(cache_bin_empty_position_get(bin) == empty_position);

View File

@ -68,6 +68,8 @@ CTL_PROTO(max_background_threads)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_max)
CTL_PROTO(thread_tcache_flush)
CTL_PROTO(thread_tcache_ncached_max_write)
CTL_PROTO(thread_tcache_ncached_max_read_sizeclass)
CTL_PROTO(thread_peak_read)
CTL_PROTO(thread_peak_reset)
CTL_PROTO(thread_prof_name)
@ -374,10 +376,17 @@ CTL_PROTO(stats_mutexes_reset)
*/
#define INDEX(i) {false}, i##_index
static const ctl_named_node_t thread_tcache_ncached_max_node[] = {
{NAME("read_sizeclass"),
CTL(thread_tcache_ncached_max_read_sizeclass)},
{NAME("write"), CTL(thread_tcache_ncached_max_write)}
};
static const ctl_named_node_t thread_tcache_node[] = {
{NAME("enabled"), CTL(thread_tcache_enabled)},
{NAME("max"), CTL(thread_tcache_max)},
{NAME("flush"), CTL(thread_tcache_flush)}
{NAME("flush"), CTL(thread_tcache_flush)},
{NAME("ncached_max"), CHILD(named, thread_tcache_ncached_max)}
};
static const ctl_named_node_t thread_peak_node[] = {
@ -2282,6 +2291,78 @@ label_return:
CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
static int
thread_tcache_ncached_max_read_sizeclass_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
size_t bin_size = 0;
/* Read the bin size from newp. */
if (newp == NULL) {
ret = EINVAL;
goto label_return;
}
WRITE(bin_size, size_t);
cache_bin_sz_t ncached_max = 0;
if (tcache_bin_ncached_max_read(tsd, bin_size, &ncached_max)) {
ret = EINVAL;
goto label_return;
}
size_t result = (size_t)ncached_max;
READ(result, size_t);
ret = 0;
label_return:
return ret;
}
static int
thread_tcache_ncached_max_write_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
WRITEONLY();
if (newp != NULL) {
if (!tcache_available(tsd)) {
ret = ENOENT;
goto label_return;
}
char *settings = NULL;
WRITE(settings, char *);
if (settings == NULL) {
ret = EINVAL;
goto label_return;
}
/* Get the length of the setting string safely. */
char *end = (char *)memchr(settings, '\0',
CTL_MULTI_SETTING_MAX_LEN);
if (end == NULL) {
ret = EINVAL;
goto label_return;
}
/*
* Exclude the last '\0' for len since it is not handled by
* multi_setting_parse_next.
*/
size_t len = (uintptr_t)end - (uintptr_t)settings;
if (len == 0) {
ret = 0;
goto label_return;
}
if (tcache_bins_ncached_max_write(tsd, settings, len)) {
ret = EINVAL;
goto label_return;
}
}
ret = 0;
label_return:
return ret;
}
CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
@ -3155,7 +3236,7 @@ CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_GEN(arenas_tcache_max, global_do_not_change_tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
CTL_RO_NL_GEN(arenas_nhbins, global_do_not_change_nbins, unsigned)
CTL_RO_NL_GEN(arenas_nhbins, global_do_not_change_tcache_nbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)

View File

@ -821,50 +821,6 @@ init_opt_stats_opts(const char *v, size_t vlen, char *dest) {
assert(opts_len == strlen(dest));
}
/* Reads the next size pair in a multi-sized option. */
static bool
malloc_conf_multi_sizes_next(const char **slab_size_segment_cur,
size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) {
const char *cur = *slab_size_segment_cur;
char *end;
uintmax_t um;
set_errno(0);
/* First number, then '-' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != '-') {
return true;
}
*slab_start = (size_t)um;
cur = end + 1;
/* Second number, then ':' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != ':') {
return true;
}
*slab_end = (size_t)um;
cur = end + 1;
/* Last number */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0) {
return true;
}
*new_size = (size_t)um;
/* Consume the separator if there is one. */
if (*end == '|') {
end++;
}
*vlen_left -= end - *slab_size_segment_cur;
*slab_size_segment_cur = end;
return false;
}
static void
malloc_conf_format_error(const char *msg, const char *begin, const char *end) {
size_t len = end - begin + 1;
@ -1351,7 +1307,7 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
size_t size_start;
size_t size_end;
size_t nshards;
bool err = malloc_conf_multi_sizes_next(
bool err = multi_setting_parse_next(
&bin_shards_segment_cur, &vlen_left,
&size_start, &size_end, &nshards);
if (err || bin_update_shard_size(
@ -1366,6 +1322,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
} while (vlen_left > 0);
CONF_CONTINUE;
}
if (CONF_MATCH("tcache_ncached_max")) {
bool err = tcache_bin_info_default_init(
v, vlen);
if (err) {
CONF_ERROR("Invalid settings for "
"tcache_ncached_max", k, klen, v,
vlen);
}
CONF_CONTINUE;
}
CONF_HANDLE_INT64_T(opt_mutex_max_spin,
"mutex_max_spin", -1, INT64_MAX, CONF_CHECK_MIN,
CONF_DONT_CHECK_MAX, false);
@ -1613,7 +1579,7 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
size_t slab_start;
size_t slab_end;
size_t pgs;
err = malloc_conf_multi_sizes_next(
err = multi_setting_parse_next(
&slab_size_segment_cur,
&vlen_left, &slab_start, &slab_end,
&pgs);
@ -4140,6 +4106,7 @@ batch_alloc(void **ptrs, size_t num, size_t size, int flags) {
tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind,
/* slow */ true, /* is_alloc */ true);
if (likely(tcache != NULL &&
ind < tcache_nbins_get(tcache->tcache_slow) &&
!tcache_bin_disabled(ind, &tcache->bins[ind],
tcache->tcache_slow)) && progress < batch) {
if (bin == NULL) {

View File

@ -63,7 +63,7 @@ unsigned opt_lg_tcache_flush_large_div = 1;
* is only used to initialize tcache_nbins in the per-thread tcache.
* Directly modifying it will not affect threads already launched.
*/
unsigned global_do_not_change_nbins;
unsigned global_do_not_change_tcache_nbins;
/*
* Max size class to be cached (can be small or large). This value is only used
* to initialize tcache_max in the per-thread tcache. Directly modifying it
@ -71,6 +71,18 @@ unsigned global_do_not_change_nbins;
*/
size_t global_do_not_change_tcache_maxclass;
/*
* Default bin info for each bin. Will be initialized in malloc_conf_init
* and tcache_boot and should not be modified after that.
*/
static cache_bin_info_t opt_tcache_ncached_max[TCACHE_NBINS_MAX] = {{0}};
/*
* Marks whether a bin's info is set already. This is used in
* tcache_bin_info_compute to avoid overwriting ncached_max specified by
* malloc_conf. It should be set only when parsing malloc_conf.
*/
static bool opt_tcache_ncached_max_set[TCACHE_NBINS_MAX] = {0};
tcaches_t *tcaches;
/* Index of first element within tcaches that has never been used. */
@ -130,10 +142,8 @@ tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
cache_bin_t *cache_bin = &tcache->bins[szind];
assert(!tcache_bin_disabled(szind, cache_bin, tcache->tcache_slow));
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
&cache_bin->bin_info);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
&cache_bin->bin_info);
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin);
assert(!tcache_slow->bin_refilled[szind]);
size_t nflush = low_water - (low_water >> 2);
@ -156,8 +166,8 @@ tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
* Reduce fill count by 2X. Limit lg_fill_div such that
* the fill count is always at least 1.
*/
if ((cache_bin_info_ncached_max_get(cache_bin, &cache_bin->bin_info)
>> (tcache_slow->lg_fill_div[szind] + 1)) >= 1) {
if ((cache_bin_ncached_max_get(cache_bin) >>
(tcache_slow->lg_fill_div[szind] + 1)) >= 1) {
tcache_slow->lg_fill_div[szind]++;
}
}
@ -169,10 +179,8 @@ tcache_gc_large(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
assert(szind >= SC_NBINS);
cache_bin_t *cache_bin = &tcache->bins[szind];
assert(!tcache_bin_disabled(szind, cache_bin, tcache->tcache_slow));
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
&cache_bin->bin_info);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
&cache_bin->bin_info);
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin);
tcache_bin_flush_large(tsd, tcache, cache_bin, szind,
(unsigned)(ncached - low_water + (low_water >> 2)));
}
@ -193,10 +201,8 @@ tcache_event(tsd_t *tsd) {
goto label_done;
}
tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind,
is_small);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
&cache_bin->bin_info);
tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, is_small);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin);
if (low_water > 0) {
if (is_small) {
tcache_gc_small(tsd, tcache_slow, tcache, szind);
@ -244,10 +250,12 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena,
assert(tcache_slow->arena != NULL);
assert(!tcache_bin_disabled(binind, cache_bin, tcache_slow));
unsigned nfill = cache_bin_info_ncached_max_get(cache_bin,
&cache_bin->bin_info) >> tcache_slow->lg_fill_div[binind];
arena_cache_bin_fill_small(tsdn, arena, cache_bin,
&cache_bin->bin_info, binind, nfill);
unsigned nfill = cache_bin_ncached_max_get(cache_bin)
>> tcache_slow->lg_fill_div[binind];
if (nfill == 0) {
nfill = 1;
}
arena_cache_bin_fill_small(tsdn, arena, cache_bin, binind, nfill);
tcache_slow->bin_refilled[binind] = true;
ret = cache_bin_alloc(cache_bin, tcache_success);
@ -519,20 +527,17 @@ tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
assert(!tcache_bin_disabled(binind, cache_bin, tcache->tcache_slow));
tcache_bin_flush_stashed(tsd, tcache, cache_bin, binind, small);
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
&cache_bin->bin_info);
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin);
assert((cache_bin_sz_t)rem <= ncached);
unsigned nflush = ncached - rem;
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
cache_bin_init_ptr_array_for_flush(cache_bin, &cache_bin->bin_info,
&ptrs, nflush);
cache_bin_init_ptr_array_for_flush(cache_bin, &ptrs, nflush);
tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nflush,
small);
cache_bin_finish_flush(cache_bin, &cache_bin->bin_info, &ptrs,
ncached - rem);
cache_bin_finish_flush(cache_bin, &ptrs, ncached - rem);
}
void
@ -561,36 +566,65 @@ void
tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
szind_t binind, bool is_small) {
assert(!tcache_bin_disabled(binind, cache_bin, tcache->tcache_slow));
cache_bin_info_t *info = &cache_bin->bin_info;
/*
* The two below are for assertion only. The content of original cached
* items remain unchanged -- the stashed items reside on the other end
* of the stack. Checking the stack head and ncached to verify.
*/
void *head_content = *cache_bin->stack_head;
cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin,
info);
cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin);
cache_bin_sz_t nstashed = cache_bin_nstashed_get_local(cache_bin, info);
assert(orig_cached + nstashed <=
cache_bin_info_ncached_max_get(cache_bin, info));
cache_bin_sz_t nstashed = cache_bin_nstashed_get_local(cache_bin);
assert(orig_cached + nstashed <= cache_bin_ncached_max_get(cache_bin));
if (nstashed == 0) {
return;
}
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nstashed);
cache_bin_init_ptr_array_for_stashed(cache_bin, binind, info, &ptrs,
cache_bin_init_ptr_array_for_stashed(cache_bin, binind, &ptrs,
nstashed);
san_check_stashed_ptrs(ptrs.ptr, nstashed, sz_index2size(binind));
tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nstashed,
is_small);
cache_bin_finish_flush_stashed(cache_bin, info);
cache_bin_finish_flush_stashed(cache_bin);
assert(cache_bin_nstashed_get_local(cache_bin, info) == 0);
assert(cache_bin_ncached_get_local(cache_bin, info) == orig_cached);
assert(cache_bin_nstashed_get_local(cache_bin) == 0);
assert(cache_bin_ncached_get_local(cache_bin) == orig_cached);
assert(head_content == *cache_bin->stack_head);
}
JET_EXTERN bool
tcache_get_default_ncached_max_set(szind_t ind) {
return opt_tcache_ncached_max_set[ind];
}
JET_EXTERN const cache_bin_info_t *
tcache_get_default_ncached_max(void) {
return opt_tcache_ncached_max;
}
bool
tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size,
cache_bin_sz_t *ncached_max) {
if (bin_size > TCACHE_MAXCLASS_LIMIT) {
return true;
}
if (!tcache_available(tsd)) {
*ncached_max = 0;
return false;
}
tcache_t *tcache = tsd_tcachep_get(tsd);
assert(tcache != NULL);
szind_t bin_ind = sz_size2index(bin_size);
cache_bin_t *bin = &tcache->bins[bin_ind];
*ncached_max = tcache_bin_disabled(bin_ind, bin, tcache->tcache_slow) ?
0: cache_bin_ncached_max_get(bin);
return false;
}
void
tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena) {
@ -651,13 +685,13 @@ static void
tcache_default_settings_init(tcache_slow_t *tcache_slow) {
assert(tcache_slow != NULL);
assert(global_do_not_change_tcache_maxclass != 0);
assert(global_do_not_change_nbins != 0);
tcache_slow->tcache_nbins = global_do_not_change_nbins;
assert(global_do_not_change_tcache_nbins != 0);
tcache_slow->tcache_nbins = global_do_not_change_tcache_nbins;
}
static void
tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
void *mem, cache_bin_info_t *tcache_bin_info) {
void *mem, const cache_bin_info_t *tcache_bin_info) {
tcache->tcache_slow = tcache_slow;
tcache_slow->tcache = tcache;
@ -772,14 +806,16 @@ tcache_ncached_max_compute(szind_t szind) {
}
}
static void
JET_EXTERN void
tcache_bin_info_compute(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
/*
* Compute the values for each bin, but for bins with indices larger
* than tcache_nbins, no items will be cached.
*/
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
unsigned ncached_max = tcache_ncached_max_compute(i);
unsigned ncached_max = tcache_get_default_ncached_max_set(i) ?
(unsigned)tcache_get_default_ncached_max()[i].ncached_max:
tcache_ncached_max_compute(i);
assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
cache_bin_info_init(&tcache_bin_info[i], ncached_max);
}
@ -787,7 +823,7 @@ tcache_bin_info_compute(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
static bool
tsd_tcache_data_init_impl(tsd_t *tsd, arena_t *arena,
cache_bin_info_t *tcache_bin_info) {
const cache_bin_info_t *tcache_bin_info) {
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get_unsafe(tsd);
tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
@ -841,20 +877,11 @@ tsd_tcache_data_init_impl(tsd_t *tsd, arena_t *arena,
return false;
}
static bool
tsd_tcache_data_init_with_bin_settings(tsd_t *tsd, arena_t *arena,
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
assert(tcache_bin_info != NULL);
return tsd_tcache_data_init_impl(tsd, arena, tcache_bin_info);
}
/* Initialize auto tcache (embedded in TSD). */
static bool
tsd_tcache_data_init(tsd_t *tsd, arena_t *arena) {
/* Takes 146B stack space. */
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX] = {{0}};
tcache_bin_info_compute(tcache_bin_info);
tsd_tcache_data_init(tsd_t *tsd, arena_t *arena,
const cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
assert(tcache_bin_info != NULL);
return tsd_tcache_data_init_impl(tsd, arena, tcache_bin_info);
}
@ -866,12 +893,10 @@ tcache_create_explicit(tsd_t *tsd) {
* the beginning of the whole allocation (for freeing). The makes sure
* the cache bins have the requested alignment.
*/
unsigned tcache_nbins = global_do_not_change_nbins;
unsigned tcache_nbins = global_do_not_change_tcache_nbins;
size_t tcache_size, alignment;
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX] = {{0}};
tcache_bin_info_compute(tcache_bin_info);
cache_bin_info_compute_alloc(tcache_bin_info, tcache_nbins,
&tcache_size, &alignment);
cache_bin_info_compute_alloc(tcache_get_default_ncached_max(),
tcache_nbins, &tcache_size, &alignment);
size_t size = tcache_size + sizeof(tcache_t)
+ sizeof(tcache_slow_t);
@ -888,7 +913,8 @@ tcache_create_explicit(tsd_t *tsd) {
tcache_slow_t *tcache_slow =
(void *)((byte_t *)mem + tcache_size + sizeof(tcache_t));
tcache_default_settings_init(tcache_slow);
tcache_init(tsd, tcache_slow, tcache, mem, tcache_bin_info);
tcache_init(tsd, tcache_slow, tcache, mem,
tcache_get_default_ncached_max());
tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
arena_ichoose(tsd, NULL));
@ -909,7 +935,8 @@ tsd_tcache_enabled_data_init(tsd_t *tsd) {
if (opt_tcache) {
/* Trigger tcache init. */
tsd_tcache_data_init(tsd, NULL);
tsd_tcache_data_init(tsd, NULL,
tcache_get_default_ncached_max());
}
return false;
@ -920,7 +947,8 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) {
bool was_enabled = tsd_tcache_enabled_get(tsd);
if (!was_enabled && enabled) {
tsd_tcache_data_init(tsd, NULL);
tsd_tcache_data_init(tsd, NULL,
tcache_get_default_ncached_max());
} else if (was_enabled && !enabled) {
tcache_cleanup(tsd);
}
@ -956,13 +984,79 @@ thread_tcache_max_set(tsd_t *tsd, size_t tcache_max) {
tcache_max_set(tcache_slow, tcache_max);
if (enabled) {
tsd_tcache_data_init_with_bin_settings(tsd, assigned_arena,
tcache_bin_info);
tsd_tcache_data_init(tsd, assigned_arena, tcache_bin_info);
}
assert(tcache_nbins_get(tcache_slow) == sz_size2index(tcache_max) + 1);
}
static bool
tcache_bin_info_settings_parse(const char *bin_settings_segment_cur,
size_t len_left, cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX],
bool bin_info_is_set[TCACHE_NBINS_MAX]) {
do {
size_t size_start, size_end;
size_t ncached_max;
bool err = multi_setting_parse_next(&bin_settings_segment_cur,
&len_left, &size_start, &size_end, &ncached_max);
if (err) {
return true;
}
if (size_end > TCACHE_MAXCLASS_LIMIT) {
size_end = TCACHE_MAXCLASS_LIMIT;
}
if (size_start > TCACHE_MAXCLASS_LIMIT ||
size_start > size_end) {
continue;
}
/* May get called before sz_init (during malloc_conf_init). */
szind_t bin_start = sz_size2index_compute(size_start);
szind_t bin_end = sz_size2index_compute(size_end);
if (ncached_max > CACHE_BIN_NCACHED_MAX) {
ncached_max = (size_t)CACHE_BIN_NCACHED_MAX;
}
for (szind_t i = bin_start; i <= bin_end; i++) {
cache_bin_info_init(&tcache_bin_info[i],
(cache_bin_sz_t)ncached_max);
if (bin_info_is_set != NULL) {
bin_info_is_set[i] = true;
}
}
} while (len_left > 0);
return false;
}
bool
tcache_bin_info_default_init(const char *bin_settings_segment_cur,
size_t len_left) {
return tcache_bin_info_settings_parse(bin_settings_segment_cur,
len_left, opt_tcache_ncached_max, opt_tcache_ncached_max_set);
}
bool
tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len) {
assert(tcache_available(tsd));
assert(len != 0);
tcache_t *tcache = tsd_tcachep_get(tsd);
assert(tcache != NULL);
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX];
tcache_bin_settings_backup(tcache, tcache_bin_info);
if(tcache_bin_info_settings_parse(settings, len, tcache_bin_info,
NULL)) {
return true;
}
arena_t *assigned_arena = tcache->tcache_slow->arena;
tcache_cleanup(tsd);
tsd_tcache_data_init(tsd, assigned_arena,
tcache_bin_info);
return false;
}
static void
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
tcache_slow_t *tcache_slow = tcache->tcache_slow;
@ -999,7 +1093,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
if (tsd_tcache) {
cache_bin_t *cache_bin = &tcache->bins[0];
cache_bin_assert_empty(cache_bin, &cache_bin->bin_info);
cache_bin_assert_empty(cache_bin);
}
if (tsd_tcache && cache_bin_stack_use_thp()) {
b0_dalloc_tcache_stack(tsd_tsdn(tsd), tcache_slow->dyn_alloc);
@ -1180,8 +1274,15 @@ bool
tcache_boot(tsdn_t *tsdn, base_t *base) {
global_do_not_change_tcache_maxclass = sz_s2u(opt_tcache_max);
assert(global_do_not_change_tcache_maxclass <= TCACHE_MAXCLASS_LIMIT);
global_do_not_change_nbins =
global_do_not_change_tcache_nbins =
sz_size2index(global_do_not_change_tcache_maxclass) + 1;
/*
* Pre-compute default bin info and store the results in
* opt_tcache_ncached_max. After the changes here,
* opt_tcache_ncached_max should not be modified and should always be
* accessed using tcache_get_default_ncached_max.
*/
tcache_bin_info_compute(opt_tcache_ncached_max);
if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
malloc_mutex_rank_exclusive)) {

49
src/util.c Normal file
View File

@ -0,0 +1,49 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/util.h"
/* Reads the next size pair in a multi-sized option. */
bool
multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left,
size_t *key_start, size_t *key_end, size_t *value) {
const char *cur = *setting_segment_cur;
char *end;
uintmax_t um;
set_errno(0);
/* First number, then '-' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != '-') {
return true;
}
*key_start = (size_t)um;
cur = end + 1;
/* Second number, then ':' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != ':') {
return true;
}
*key_end = (size_t)um;
cur = end + 1;
/* Last number */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0) {
return true;
}
*value = (size_t)um;
/* Consume the separator if there is one. */
if (*end == '|') {
end++;
}
*len_left -= end - *setting_segment_cur;
*setting_segment_cur = end;
return false;
}

View File

@ -1,19 +1,18 @@
#include "test/jemalloc_test.h"
static void
do_fill_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
cache_bin_sz_t ncached_max, cache_bin_sz_t nfill_attempt,
cache_bin_sz_t nfill_succeed) {
do_fill_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t ncached_max,
cache_bin_sz_t nfill_attempt, cache_bin_sz_t nfill_succeed) {
bool success;
void *ptr;
assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
assert_true(cache_bin_ncached_get_local(bin) == 0, "");
CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill_attempt);
cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill_attempt);
cache_bin_init_ptr_array_for_fill(bin, &arr, nfill_attempt);
for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) {
arr.ptr[i] = &ptrs[i];
}
cache_bin_finish_fill(bin, info, &arr, nfill_succeed);
expect_true(cache_bin_ncached_get_local(bin, info) == nfill_succeed,
cache_bin_finish_fill(bin, &arr, nfill_succeed);
expect_true(cache_bin_ncached_get_local(bin) == nfill_succeed,
"");
cache_bin_low_water_set(bin);
@ -22,18 +21,18 @@ do_fill_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
expect_true(success, "");
expect_ptr_eq(ptr, (void *)&ptrs[i],
"Should pop in order filled");
expect_true(cache_bin_low_water_get(bin, info)
expect_true(cache_bin_low_water_get(bin)
== nfill_succeed - i - 1, "");
}
expect_true(cache_bin_ncached_get_local(bin, info) == 0, "");
expect_true(cache_bin_low_water_get(bin, info) == 0, "");
expect_true(cache_bin_ncached_get_local(bin) == 0, "");
expect_true(cache_bin_low_water_get(bin) == 0, "");
}
static void
do_flush_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
cache_bin_sz_t nfill, cache_bin_sz_t nflush) {
do_flush_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
cache_bin_sz_t nflush) {
bool success;
assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
assert_true(cache_bin_ncached_get_local(bin) == 0, "");
for (cache_bin_sz_t i = 0; i < nfill; i++) {
success = cache_bin_dalloc_easy(bin, &ptrs[i]);
@ -41,30 +40,30 @@ do_flush_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
}
CACHE_BIN_PTR_ARRAY_DECLARE(arr, nflush);
cache_bin_init_ptr_array_for_flush(bin, info, &arr, nflush);
cache_bin_init_ptr_array_for_flush(bin, &arr, nflush);
for (cache_bin_sz_t i = 0; i < nflush; i++) {
expect_ptr_eq(arr.ptr[i], &ptrs[nflush - i - 1], "");
}
cache_bin_finish_flush(bin, info, &arr, nflush);
cache_bin_finish_flush(bin, &arr, nflush);
expect_true(cache_bin_ncached_get_local(bin, info) == nfill - nflush,
expect_true(cache_bin_ncached_get_local(bin) == nfill - nflush,
"");
while (cache_bin_ncached_get_local(bin, info) > 0) {
while (cache_bin_ncached_get_local(bin) > 0) {
cache_bin_alloc(bin, &success);
}
}
static void
do_batch_alloc_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
cache_bin_sz_t nfill, size_t batch) {
assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
do_batch_alloc_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
size_t batch) {
assert_true(cache_bin_ncached_get_local(bin) == 0, "");
CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill);
cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill);
cache_bin_init_ptr_array_for_fill(bin, &arr, nfill);
for (cache_bin_sz_t i = 0; i < nfill; i++) {
arr.ptr[i] = &ptrs[i];
}
cache_bin_finish_fill(bin, info, &arr, nfill);
assert_true(cache_bin_ncached_get_local(bin, info) == nfill, "");
cache_bin_finish_fill(bin, &arr, nfill);
assert_true(cache_bin_ncached_get_local(bin) == nfill, "");
cache_bin_low_water_set(bin);
void **out = malloc((batch + 1) * sizeof(void *));
@ -73,9 +72,9 @@ do_batch_alloc_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
for (cache_bin_sz_t i = 0; i < (cache_bin_sz_t)n; i++) {
expect_ptr_eq(out[i], &ptrs[i], "");
}
expect_true(cache_bin_low_water_get(bin, info) == nfill -
expect_true(cache_bin_low_water_get(bin) == nfill -
(cache_bin_sz_t)n, "");
while (cache_bin_ncached_get_local(bin, info) > 0) {
while (cache_bin_ncached_get_local(bin) > 0) {
bool success;
cache_bin_alloc(bin, &success);
}
@ -106,13 +105,11 @@ TEST_BEGIN(test_cache_bin) {
cache_bin_info_init(&info, ncached_max);
cache_bin_t bin;
test_bin_init(&bin, &info);
cache_bin_info_t *bin_info = &bin.bin_info;
/* Initialize to empty; should then have 0 elements. */
expect_d_eq(ncached_max, cache_bin_info_ncached_max_get(&bin,
&bin.bin_info), "");
expect_true(cache_bin_ncached_get_local(&bin, bin_info) == 0, "");
expect_true(cache_bin_low_water_get(&bin, bin_info) == 0, "");
expect_d_eq(ncached_max, cache_bin_ncached_max_get(&bin), "");
expect_true(cache_bin_ncached_get_local(&bin) == 0, "");
expect_true(cache_bin_low_water_get(&bin) == 0, "");
ptr = cache_bin_alloc_easy(&bin, &success);
expect_false(success, "Shouldn't successfully allocate when empty");
@ -129,14 +126,14 @@ TEST_BEGIN(test_cache_bin) {
void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
expect_true(cache_bin_ncached_get_local(&bin, bin_info) == i, "");
expect_true(cache_bin_ncached_get_local(&bin) == i, "");
success = cache_bin_dalloc_easy(&bin, &ptrs[i]);
expect_true(success,
"Should be able to dalloc into a non-full cache bin.");
expect_true(cache_bin_low_water_get(&bin, bin_info) == 0,
expect_true(cache_bin_low_water_get(&bin) == 0,
"Pushes and pops shouldn't change low water of zero.");
}
expect_true(cache_bin_ncached_get_local(&bin, bin_info) == ncached_max,
expect_true(cache_bin_ncached_get_local(&bin) == ncached_max,
"");
success = cache_bin_dalloc_easy(&bin, &ptrs[ncached_max]);
expect_false(success, "Shouldn't be able to dalloc into a full bin.");
@ -144,9 +141,9 @@ TEST_BEGIN(test_cache_bin) {
cache_bin_low_water_set(&bin);
for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
expect_true(cache_bin_low_water_get(&bin, bin_info)
expect_true(cache_bin_low_water_get(&bin)
== ncached_max - i, "");
expect_true(cache_bin_ncached_get_local(&bin, bin_info)
expect_true(cache_bin_ncached_get_local(&bin)
== ncached_max - i, "");
/*
* This should fail -- the easy variant can't change the low
@ -155,9 +152,9 @@ TEST_BEGIN(test_cache_bin) {
ptr = cache_bin_alloc_easy(&bin, &success);
expect_ptr_null(ptr, "");
expect_false(success, "");
expect_true(cache_bin_low_water_get(&bin, bin_info)
expect_true(cache_bin_low_water_get(&bin)
== ncached_max - i, "");
expect_true(cache_bin_ncached_get_local(&bin, bin_info)
expect_true(cache_bin_ncached_get_local(&bin)
== ncached_max - i, "");
/* This should succeed, though. */
@ -165,13 +162,13 @@ TEST_BEGIN(test_cache_bin) {
expect_true(success, "");
expect_ptr_eq(ptr, &ptrs[ncached_max - i - 1],
"Alloc should pop in stack order");
expect_true(cache_bin_low_water_get(&bin, bin_info)
expect_true(cache_bin_low_water_get(&bin)
== ncached_max - i - 1, "");
expect_true(cache_bin_ncached_get_local(&bin, bin_info)
expect_true(cache_bin_ncached_get_local(&bin)
== ncached_max - i - 1, "");
}
/* Now we're empty -- all alloc attempts should fail. */
expect_true(cache_bin_ncached_get_local(&bin, bin_info) == 0, "");
expect_true(cache_bin_ncached_get_local(&bin) == 0, "");
ptr = cache_bin_alloc_easy(&bin, &success);
expect_ptr_null(ptr, "");
expect_false(success, "");
@ -187,7 +184,7 @@ TEST_BEGIN(test_cache_bin) {
for (cache_bin_sz_t i = ncached_max / 2; i < ncached_max; i++) {
cache_bin_dalloc_easy(&bin, &ptrs[i]);
}
expect_true(cache_bin_ncached_get_local(&bin, bin_info) == ncached_max,
expect_true(cache_bin_ncached_get_local(&bin) == ncached_max,
"");
for (cache_bin_sz_t i = ncached_max - 1; i >= ncached_max / 2; i--) {
/*
@ -204,77 +201,72 @@ TEST_BEGIN(test_cache_bin) {
expect_ptr_null(ptr, "");
/* We're going to test filling -- we must be empty to start. */
while (cache_bin_ncached_get_local(&bin, bin_info)) {
while (cache_bin_ncached_get_local(&bin)) {
cache_bin_alloc(&bin, &success);
expect_true(success, "");
}
/* Test fill. */
/* Try to fill all, succeed fully. */
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max,
do_fill_test(&bin, ptrs, ncached_max, ncached_max,
ncached_max);
/* Try to fill all, succeed partially. */
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max,
do_fill_test(&bin, ptrs, ncached_max, ncached_max,
ncached_max / 2);
/* Try to fill all, fail completely. */
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max, 0);
do_fill_test(&bin, ptrs, ncached_max, ncached_max, 0);
/* Try to fill some, succeed fully. */
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max / 2,
do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2,
ncached_max / 2);
/* Try to fill some, succeed partially. */
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max / 2,
do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2,
ncached_max / 4);
/* Try to fill some, fail completely. */
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max / 2, 0);
do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2, 0);
do_flush_test(&bin, bin_info, ptrs, ncached_max, ncached_max);
do_flush_test(&bin, bin_info, ptrs, ncached_max, ncached_max / 2);
do_flush_test(&bin, bin_info, ptrs, ncached_max, 0);
do_flush_test(&bin, bin_info, ptrs, ncached_max / 2, ncached_max / 2);
do_flush_test(&bin, bin_info, ptrs, ncached_max / 2, ncached_max / 4);
do_flush_test(&bin, bin_info, ptrs, ncached_max / 2, 0);
do_flush_test(&bin, ptrs, ncached_max, ncached_max);
do_flush_test(&bin, ptrs, ncached_max, ncached_max / 2);
do_flush_test(&bin, ptrs, ncached_max, 0);
do_flush_test(&bin, ptrs, ncached_max / 2, ncached_max / 2);
do_flush_test(&bin, ptrs, ncached_max / 2, ncached_max / 4);
do_flush_test(&bin, ptrs, ncached_max / 2, 0);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max, ncached_max);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max,
ncached_max * 2);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max,
ncached_max / 2);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max, 2);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max, 1);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max, 0);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2,
ncached_max / 2);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2,
ncached_max);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2,
ncached_max / 4);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2, 2);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2, 1);
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2, 0);
do_batch_alloc_test(&bin, bin_info, ptrs, 2, ncached_max);
do_batch_alloc_test(&bin, bin_info, ptrs, 2, 2);
do_batch_alloc_test(&bin, bin_info, ptrs, 2, 1);
do_batch_alloc_test(&bin, bin_info, ptrs, 2, 0);
do_batch_alloc_test(&bin, bin_info, ptrs, 1, 2);
do_batch_alloc_test(&bin, bin_info, ptrs, 1, 1);
do_batch_alloc_test(&bin, bin_info, ptrs, 1, 0);
do_batch_alloc_test(&bin, bin_info, ptrs, 0, 2);
do_batch_alloc_test(&bin, bin_info, ptrs, 0, 1);
do_batch_alloc_test(&bin, bin_info, ptrs, 0, 0);
do_batch_alloc_test(&bin, ptrs, ncached_max, ncached_max);
do_batch_alloc_test(&bin, ptrs, ncached_max, ncached_max * 2);
do_batch_alloc_test(&bin, ptrs, ncached_max, ncached_max / 2);
do_batch_alloc_test(&bin, ptrs, ncached_max, 2);
do_batch_alloc_test(&bin, ptrs, ncached_max, 1);
do_batch_alloc_test(&bin, ptrs, ncached_max, 0);
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, ncached_max / 2);
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, ncached_max);
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, ncached_max / 4);
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, 2);
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, 1);
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, 0);
do_batch_alloc_test(&bin, ptrs, 2, ncached_max);
do_batch_alloc_test(&bin, ptrs, 2, 2);
do_batch_alloc_test(&bin, ptrs, 2, 1);
do_batch_alloc_test(&bin, ptrs, 2, 0);
do_batch_alloc_test(&bin, ptrs, 1, 2);
do_batch_alloc_test(&bin, ptrs, 1, 1);
do_batch_alloc_test(&bin, ptrs, 1, 0);
do_batch_alloc_test(&bin, ptrs, 0, 2);
do_batch_alloc_test(&bin, ptrs, 0, 1);
do_batch_alloc_test(&bin, ptrs, 0, 0);
free(ptrs);
}
TEST_END
static void
do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
cache_bin_sz_t nfill, cache_bin_sz_t nstash) {
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
do_flush_stashed_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
cache_bin_sz_t nstash) {
expect_true(cache_bin_ncached_get_local(bin) == 0,
"Bin not empty");
expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
expect_true(cache_bin_nstashed_get_local(bin) == 0,
"Bin not empty");
expect_true(nfill + nstash <= info->ncached_max, "Exceeded max");
expect_true(nfill + nstash <= bin->bin_info.ncached_max, "Exceeded max");
bool ret;
/* Fill */
@ -282,7 +274,7 @@ do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
ret = cache_bin_dalloc_easy(bin, &ptrs[i]);
expect_true(ret, "Unexpected fill failure");
}
expect_true(cache_bin_ncached_get_local(bin, info) == nfill,
expect_true(cache_bin_ncached_get_local(bin) == nfill,
"Wrong cached count");
/* Stash */
@ -290,10 +282,10 @@ do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
ret = cache_bin_stash(bin, &ptrs[i + nfill]);
expect_true(ret, "Unexpected stash failure");
}
expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
expect_true(cache_bin_nstashed_get_local(bin) == nstash,
"Wrong stashed count");
if (nfill + nstash == info->ncached_max) {
if (nfill + nstash == bin->bin_info.ncached_max) {
ret = cache_bin_dalloc_easy(bin, &ptrs[0]);
expect_false(ret, "Should not dalloc into a full bin");
ret = cache_bin_stash(bin, &ptrs[0]);
@ -308,19 +300,19 @@ do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
expect_true((uintptr_t)ptr < (uintptr_t)&ptrs[nfill],
"Should not alloc stashed ptrs");
}
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
expect_true(cache_bin_ncached_get_local(bin) == 0,
"Wrong cached count");
expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
expect_true(cache_bin_nstashed_get_local(bin) == nstash,
"Wrong stashed count");
cache_bin_alloc(bin, &ret);
expect_false(ret, "Should not alloc stashed");
/* Clear stashed ones */
cache_bin_finish_flush_stashed(bin, info);
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
cache_bin_finish_flush_stashed(bin);
expect_true(cache_bin_ncached_get_local(bin) == 0,
"Wrong cached count");
expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
expect_true(cache_bin_nstashed_get_local(bin) == 0,
"Wrong stashed count");
cache_bin_alloc(bin, &ret);
@ -334,7 +326,6 @@ TEST_BEGIN(test_cache_bin_stash) {
cache_bin_info_t info;
cache_bin_info_init(&info, ncached_max);
test_bin_init(&bin, &info);
cache_bin_info_t *bin_info = &bin.bin_info;
/*
* The content of this array is not accessed; instead the interior
@ -344,9 +335,9 @@ TEST_BEGIN(test_cache_bin_stash) {
assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
bool ret;
for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
expect_true(cache_bin_ncached_get_local(&bin, bin_info) ==
expect_true(cache_bin_ncached_get_local(&bin) ==
(i / 2 + i % 2), "Wrong ncached value");
expect_true(cache_bin_nstashed_get_local(&bin, bin_info) ==
expect_true(cache_bin_nstashed_get_local(&bin) ==
i / 2, "Wrong nstashed value");
if (i % 2 == 0) {
cache_bin_dalloc_easy(&bin, &ptrs[i]);
@ -369,22 +360,21 @@ TEST_BEGIN(test_cache_bin_stash) {
expect_true(diff % 2 == 0, "Should be able to alloc");
} else {
expect_false(ret, "Should not alloc stashed");
expect_true(cache_bin_nstashed_get_local(&bin,
bin_info) == ncached_max / 2,
expect_true(cache_bin_nstashed_get_local(&bin) == ncached_max / 2,
"Wrong nstashed value");
}
}
test_bin_init(&bin, &info);
do_flush_stashed_test(&bin, bin_info, ptrs, ncached_max, 0);
do_flush_stashed_test(&bin, bin_info, ptrs, 0, ncached_max);
do_flush_stashed_test(&bin, bin_info, ptrs, ncached_max / 2,
do_flush_stashed_test(&bin, ptrs, ncached_max, 0);
do_flush_stashed_test(&bin, ptrs, 0, ncached_max);
do_flush_stashed_test(&bin, ptrs, ncached_max / 2,
ncached_max / 2);
do_flush_stashed_test(&bin, bin_info, ptrs, ncached_max / 4,
do_flush_stashed_test(&bin, ptrs, ncached_max / 4,
ncached_max / 2);
do_flush_stashed_test(&bin, bin_info, ptrs, ncached_max / 2,
do_flush_stashed_test(&bin, ptrs, ncached_max / 2,
ncached_max / 4);
do_flush_stashed_test(&bin, bin_info, ptrs, ncached_max / 4,
do_flush_stashed_test(&bin, ptrs, ncached_max / 4,
ncached_max / 4);
}
TEST_END

268
test/unit/ncached_max.c Normal file
View File

@ -0,0 +1,268 @@
#include "test/jemalloc_test.h"
#include "test/san.h"
const char *malloc_conf =
"tcache_ncached_max:256-1024:1001|2048-2048:0|8192-8192:1,tcache_max:4096";
extern void tcache_bin_info_compute(
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]);
extern bool tcache_get_default_ncached_max_set(szind_t ind);
extern const cache_bin_info_t *tcache_get_default_ncached_max(void);
static void
check_bins_info(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
size_t mib_get[4], mib_get_len;
mib_get_len = sizeof(mib_get) / sizeof(size_t);
const char *get_name = "thread.tcache.ncached_max.read_sizeclass";
size_t ncached_max;
size_t sz = sizeof(size_t);
expect_d_eq(mallctlnametomib(get_name, mib_get, &mib_get_len), 0,
"Unexpected mallctlnametomib() failure");
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
size_t bin_size = sz_index2size(i);
expect_d_eq(mallctlbymib(mib_get, mib_get_len,
(void *)&ncached_max, &sz,
(void *)&bin_size, sizeof(size_t)), 0,
"Unexpected mallctlbymib() failure");
expect_zu_eq(ncached_max, tcache_bin_info[i].ncached_max,
"Unexpected ncached_max for bin %d", i);
/* Check ncached_max returned under a non-bin size. */
bin_size--;
size_t temp_ncached_max = 0;
expect_d_eq(mallctlbymib(mib_get, mib_get_len,
(void *)&temp_ncached_max, &sz,
(void *)&bin_size, sizeof(size_t)), 0,
"Unexpected mallctlbymib() failure");
expect_zu_eq(temp_ncached_max, ncached_max,
"Unexpected ncached_max for inaccurate bin size.");
}
}
static void *
ncached_max_check(void* args) {
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX];
cache_bin_info_t tcache_bin_info_backup[TCACHE_NBINS_MAX];
tsd_t *tsd = tsd_fetch();
tcache_t *tcache = tsd_tcachep_get(tsd);
assert(tcache != NULL);
tcache_slow_t *tcache_slow = tcache->tcache_slow;
tcache_bin_info_compute(tcache_bin_info);
memcpy(tcache_bin_info_backup, tcache_bin_info,
sizeof(tcache_bin_info));
/* Check ncached_max set by malloc_conf. */
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
bool first_range = (i >= sz_size2index(256) &&
i <= sz_size2index(1024));
bool second_range = (i == sz_size2index(2048));
bool third_range = (i == sz_size2index(8192));
cache_bin_sz_t target_ncached_max = 0;
if (first_range || second_range || third_range) {
target_ncached_max = first_range ? 1001:
(second_range ? 0: 1);
expect_true(tcache_get_default_ncached_max_set(i),
"Unexpected state for bin %u", i);
expect_zu_eq(target_ncached_max,
tcache_bin_info[i].ncached_max,
"Unexpected generated ncached_max for bin %u", i);
expect_zu_eq(target_ncached_max,
tcache_get_default_ncached_max()[i].ncached_max,
"Unexpected pre-set ncached_max for bin %u", i);
} else {
expect_false(tcache_get_default_ncached_max_set(i),
"Unexpected state for bin %u", i);
}
}
unsigned nbins = tcache_nbins_get(tcache_slow);
for (szind_t i = nbins; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
/* Check the initial bin settings. */
check_bins_info(tcache_bin_info);
size_t mib_set[4], mib_set_len;
mib_set_len = sizeof(mib_set) / sizeof(size_t);
const char *set_name = "thread.tcache.ncached_max.write";
expect_d_eq(mallctlnametomib(set_name, mib_set, &mib_set_len), 0,
"Unexpected mallctlnametomib() failure");
/* Test the ncached_max set with tcache on. */
char inputs[100] = "8-128:1|160-160:11|170-320:22|224-8388609:0";
char *inputp = inputs;
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), 0,
"Unexpected mallctlbymib() failure");
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
if (i >= sz_size2index(8) &&i <= sz_size2index(128)) {
cache_bin_info_init(&tcache_bin_info[i], 1);
}
if (i == sz_size2index(160)) {
cache_bin_info_init(&tcache_bin_info[i], 11);
}
if (i >= sz_size2index(170) && i <= sz_size2index(320)) {
cache_bin_info_init(&tcache_bin_info[i], 22);
}
if (i >= sz_size2index(224)) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
if (i >= nbins) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
}
check_bins_info(tcache_bin_info);
/*
* Close the tcache and set ncached_max of some bins. It will be
* set properly but thread.tcache.ncached_max.read still returns 0
* since the bin is not available yet. After enabling the tcache,
* the new setting will not be carried on. Instead, the default
* settings will be applied.
*/
bool e0 = false, e1;
size_t bool_sz = sizeof(bool);
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz,
(void *)&e0, bool_sz), 0, "Unexpected mallctl() error");
expect_true(e1, "Unexpected previous tcache state");
strcpy(inputs, "0-112:8");
/* Setting returns ENOENT when the tcache is disabled. */
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), ENOENT,
"Unexpected mallctlbymib() failure");
/* All ncached_max should return 0 once tcache is disabled. */
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
check_bins_info(tcache_bin_info);
e0 = true;
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz,
(void *)&e0, bool_sz), 0, "Unexpected mallctl() error");
expect_false(e1, "Unexpected previous tcache state");
memcpy(tcache_bin_info, tcache_bin_info_backup,
sizeof(tcache_bin_info_backup));
for (szind_t i = tcache_nbins_get(tcache_slow); i < TCACHE_NBINS_MAX;
i++) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
check_bins_info(tcache_bin_info);
/*
* Set ncached_max of bins not enabled yet. Then, enable them by
* resetting tcache_max. The ncached_max changes should stay.
*/
size_t tcache_max = 1024;
assert_d_eq(mallctl("thread.tcache.max",
NULL, NULL, (void *)&tcache_max, sizeof(size_t)),.0,
"Unexpected.mallctl().failure");
for (szind_t i = sz_size2index(1024) + 1; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
strcpy(inputs, "2048-6144:123");
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), 0,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
tcache_max = 6144;
assert_d_eq(mallctl("thread.tcache.max",
NULL, NULL, (void *)&tcache_max, sizeof(size_t)),.0,
"Unexpected.mallctl().failure");
memcpy(tcache_bin_info, tcache_bin_info_backup,
sizeof(tcache_bin_info_backup));
for (szind_t i = sz_size2index(2048); i < TCACHE_NBINS_MAX; i++) {
if (i <= sz_size2index(6144)) {
cache_bin_info_init(&tcache_bin_info[i], 123);
} else if (i > sz_size2index(6144)) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
}
check_bins_info(tcache_bin_info);
/* Test an empty input, it should do nothing. */
strcpy(inputs, "");
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), 0,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
/* Test a half-done string, it should return EINVAL and do nothing. */
strcpy(inputs, "4-1024:7|256-1024");
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), EINVAL,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
/*
* Test an invalid string with start size larger than end size. It
* should return success but do nothing.
*/
strcpy(inputs, "1024-256:7");
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), 0,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
/*
* Test a string exceeding the length limit, it should return EINVAL
* and do nothing.
*/
char *long_inputs = (char *)malloc(10000 * sizeof(char));
expect_true(long_inputs != NULL, "Unexpected allocation failure.");
for (int i = 0; i < 200; i++) {
memcpy(long_inputs + i * 9, "4-1024:3|", 9);
}
memcpy(long_inputs + 200 * 9, "4-1024:3", 8);
long_inputs[200 * 9 + 8] = '\0';
inputp = long_inputs;
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), EINVAL,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
free(long_inputs);
/*
* Test a string with invalid characters, it should return EINVAL
* and do nothing.
*/
strcpy(inputs, "k8-1024:77p");
inputp = inputs;
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), EINVAL,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
/* Test large ncached_max, it should return success but capped. */
strcpy(inputs, "1024-1024:65540");
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), 0,
"Unexpected mallctlbymib() failure");
cache_bin_info_init(&tcache_bin_info[sz_size2index(1024)],
CACHE_BIN_NCACHED_MAX);
check_bins_info(tcache_bin_info);
return NULL;
}
TEST_BEGIN(test_ncached_max) {
test_skip_if(!config_stats);
test_skip_if(!opt_tcache);
test_skip_if(san_uaf_detection_enabled());
/* TODO: change nthreads to 8 to reduce CI loads. */
unsigned nthreads = 108;
VARIABLE_ARRAY(thd_t, threads, nthreads);
for (unsigned i = 0; i < nthreads; i++) {
thd_create(&threads[i], ncached_max_check, NULL);
}
for (unsigned i = 0; i < nthreads; i++) {
thd_join(threads[i], NULL);
}
}
TEST_END
int
main(void) {
return test(
test_ncached_max);
}

View File

@ -81,8 +81,7 @@ tcache_bytes_read_local(void) {
if (tcache_bin_disabled(i, cache_bin, tcache->tcache_slow)) {
continue;
}
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
&cache_bin->bin_info);
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin);
tcache_bytes += ncached * sz_index2size(i);
}
return tcache_bytes;
@ -260,7 +259,7 @@ tcache_check(void *arg) {
expect_zu_eq(old_tcache_max, opt_tcache_max,
"Unexpected default value for tcache_max");
tcache_nbins = tcache_nbins_get(tcache_slow);
expect_zu_eq(tcache_nbins, (size_t)global_do_not_change_nbins,
expect_zu_eq(tcache_nbins, (size_t)global_do_not_change_tcache_nbins,
"Unexpected default value for tcache_nbins");
validate_tcache_stack(tcache);
@ -370,4 +369,3 @@ main(void) {
test_tcache_max,
test_thread_tcache_max);
}