Add mallctl to set and get ncached_max of each cache_bin.

1. `thread_tcache_ncached_max_read_sizeclass` allows users to get the
    ncached_max of the bin with the input sizeclass, passed in through
    oldp (will be upper casted if not an exact bin size is given).
2. `thread_tcache_ncached_max_write` takes in a char array
    representing the settings for bins in the tcache.
This commit is contained in:
guangli-dai 2023-09-19 14:37:09 -07:00 committed by Qi Wang
parent 6b197fdd46
commit 630f7de952
14 changed files with 477 additions and 70 deletions

View File

@ -155,6 +155,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/thread_event.c \ $(srcroot)src/thread_event.c \
$(srcroot)src/ticker.c \ $(srcroot)src/ticker.c \
$(srcroot)src/tsd.c \ $(srcroot)src/tsd.c \
$(srcroot)src/util.c \
$(srcroot)src/witness.c $(srcroot)src/witness.c
ifeq ($(enable_zone_allocator), 1) ifeq ($(enable_zone_allocator), 1)
C_SRCS += $(srcroot)src/zone.c C_SRCS += $(srcroot)src/zone.c

View File

@ -198,7 +198,8 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
assert(sz_can_use_slab(size)); assert(sz_can_use_slab(size));
return tcache_alloc_small(tsdn_tsd(tsdn), arena, return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path); tcache, size, ind, zero, slow_path);
} else if (likely(ind < TCACHE_NBINS_MAX && } else if (likely(
ind < tcache_nbins_get(tcache->tcache_slow) &&
!tcache_bin_disabled(ind, &tcache->bins[ind], !tcache_bin_disabled(ind, &tcache->bins[ind],
tcache->tcache_slow))) { tcache->tcache_slow))) {
return tcache_alloc_large(tsdn_tsd(tsdn), arena, return tcache_alloc_large(tsdn_tsd(tsdn), arena,

View File

@ -210,6 +210,11 @@ cache_bin_info_ncached_max_get(cache_bin_t *bin, cache_bin_info_t *info) {
return info->ncached_max; return info->ncached_max;
} }
/* Gets ncached_max without asserting that the bin is enabled. */
static inline cache_bin_sz_t
cache_bin_ncached_max_get_unsafe(cache_bin_t *bin) {
return bin->bin_info.ncached_max;
}
/* /*
* Internal. * Internal.
* *
@ -229,7 +234,7 @@ cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
* Does difference calculations that handle wraparound correctly. Earlier must * Does difference calculations that handle wraparound correctly. Earlier must
* be associated with the position earlier in memory. * be associated with the position earlier in memory.
*/ */
static inline uint16_t static inline cache_bin_sz_t
cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later) { cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
cache_bin_assert_earlier(bin, earlier, later); cache_bin_assert_earlier(bin, earlier, later);
return later - earlier; return later - earlier;
@ -584,19 +589,17 @@ cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_sz_t diff = bin->low_bits_empty - cache_bin_sz_t diff = bin->low_bits_empty -
(uint16_t)(uintptr_t)bin->stack_head; (uint16_t)(uintptr_t)bin->stack_head;
cache_bin_sz_t n = diff / sizeof(void *); cache_bin_sz_t n = diff / sizeof(void *);
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
assert(n <= ncached_max);
*ncached = n; *ncached = n;
/* Racy version of cache_bin_nstashed_get_internal. */ /* Racy version of cache_bin_nstashed_get_internal. */
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin, uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
info); info);
n = (bin->low_bits_full - low_bits_low_bound) / sizeof(void *); n = (bin->low_bits_full - low_bits_low_bound) / sizeof(void *);
assert(n <= ncached_max);
*nstashed = n; *nstashed = n;
/* Note that cannot assert ncached + nstashed <= ncached_max (racy). */ /*
* Note that cannot assert anything regarding ncached_max because
* it can be configured on the fly and is thus racy.
*/
} }
/* /*

View File

@ -14,6 +14,7 @@
/* Maximum ctl tree depth. */ /* Maximum ctl tree depth. */
#define CTL_MAX_DEPTH 7 #define CTL_MAX_DEPTH 7
#define CTL_MULTI_SETTING_MAX_LEN 1000
typedef struct ctl_node_s { typedef struct ctl_node_s {
bool named; bool named;

View File

@ -37,8 +37,10 @@
/* Various function pointers are static and immutable except during testing. */ /* Various function pointers are static and immutable except during testing. */
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
# define JET_MUTABLE # define JET_MUTABLE
# define JET_EXTERN extern
#else #else
# define JET_MUTABLE const # define JET_MUTABLE const
# define JET_EXTERN static
#endif #endif
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head #define JEMALLOC_VA_ARGS_HEAD(head, ...) head

View File

@ -26,7 +26,7 @@ extern unsigned opt_lg_tcache_flush_large_div;
* it should not be changed on the fly. To change the number of tcache bins * it should not be changed on the fly. To change the number of tcache bins
* in use, refer to tcache_nbins of each tcache. * in use, refer to tcache_nbins of each tcache.
*/ */
extern unsigned global_do_not_change_nbins; extern unsigned global_do_not_change_tcache_nbins;
/* /*
* Maximum cached size class. Same as above, this is only used during threads * Maximum cached size class. Same as above, this is only used during threads
@ -55,6 +55,9 @@ void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, unsigned rem); cache_bin_t *cache_bin, szind_t binind, unsigned rem);
void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, bool is_small); cache_bin_t *cache_bin, szind_t binind, bool is_small);
bool tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len);
bool tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size,
cache_bin_sz_t *ncached_max);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena); tcache_t *tcache, arena_t *arena);
tcache_t *tcache_create_explicit(tsd_t *tsd); tcache_t *tcache_create_explicit(tsd_t *tsd);

View File

@ -46,7 +46,7 @@ tcache_bin_settings_backup(tcache_t *tcache,
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) { cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
for (unsigned i = 0; i < TCACHE_NBINS_MAX; i++) { for (unsigned i = 0; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i], cache_bin_info_init(&tcache_bin_info[i],
tcache->bins[i].bin_info.ncached_max); cache_bin_ncached_max_get_unsafe(&tcache->bins[i]));
} }
} }
@ -54,6 +54,7 @@ JEMALLOC_ALWAYS_INLINE bool
tcache_bin_disabled(szind_t ind, cache_bin_t *bin, tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
tcache_slow_t *tcache_slow) { tcache_slow_t *tcache_slow) {
assert(bin != NULL); assert(bin != NULL);
assert(ind < TCACHE_NBINS_MAX);
bool disabled = cache_bin_disabled(bin); bool disabled = cache_bin_disabled(bin);
/* /*
@ -66,7 +67,7 @@ tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
* ind < nbins and ncached_max > 0. * ind < nbins and ncached_max > 0.
*/ */
unsigned nbins = tcache_nbins_get(tcache_slow); unsigned nbins = tcache_nbins_get(tcache_slow);
cache_bin_sz_t ncached_max = bin->bin_info.ncached_max; cache_bin_sz_t ncached_max = cache_bin_ncached_max_get_unsafe(bin);
if (ind >= nbins) { if (ind >= nbins) {
assert(disabled); assert(disabled);
} else { } else {
@ -215,6 +216,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= assert(tcache_salloc(tsd_tsdn(tsd), ptr) <=
tcache_max_get(tcache->tcache_slow)); tcache_max_get(tcache->tcache_slow));
assert(!tcache_bin_disabled(binind, &tcache->bins[binind],
tcache->tcache_slow));
cache_bin_t *bin = &tcache->bins[binind]; cache_bin_t *bin = &tcache->bins[binind];
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) { if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {

View File

@ -130,4 +130,12 @@ util_prefetch_write_range(void *ptr, size_t sz) {
#undef UTIL_INLINE #undef UTIL_INLINE
/*
* Reads the settings in the following format:
* key1-key2:value|key3-key4:value|...
* Note it does not handle the ending '\0'.
*/
bool
multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left,
size_t *key_start, size_t *key_end, size_t *value);
#endif /* JEMALLOC_INTERNAL_UTIL_H */ #endif /* JEMALLOC_INTERNAL_UTIL_H */

View File

@ -10,8 +10,9 @@ const uintptr_t disabled_bin = JUNK_ADDR;
void void
cache_bin_info_init(cache_bin_info_t *info, cache_bin_info_init(cache_bin_info_t *info,
cache_bin_sz_t ncached_max) { cache_bin_sz_t ncached_max) {
assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
size_t stack_size = (size_t)ncached_max * sizeof(void *); size_t stack_size = (size_t)ncached_max * sizeof(void *);
assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8))); assert(stack_size <= UINT16_MAX);
info->ncached_max = (cache_bin_sz_t)ncached_max; info->ncached_max = (cache_bin_sz_t)ncached_max;
} }

View File

@ -68,6 +68,8 @@ CTL_PROTO(max_background_threads)
CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_max) CTL_PROTO(thread_tcache_max)
CTL_PROTO(thread_tcache_flush) CTL_PROTO(thread_tcache_flush)
CTL_PROTO(thread_tcache_ncached_max_write)
CTL_PROTO(thread_tcache_ncached_max_read_sizeclass)
CTL_PROTO(thread_peak_read) CTL_PROTO(thread_peak_read)
CTL_PROTO(thread_peak_reset) CTL_PROTO(thread_peak_reset)
CTL_PROTO(thread_prof_name) CTL_PROTO(thread_prof_name)
@ -374,10 +376,17 @@ CTL_PROTO(stats_mutexes_reset)
*/ */
#define INDEX(i) {false}, i##_index #define INDEX(i) {false}, i##_index
static const ctl_named_node_t thread_tcache_ncached_max_node[] = {
{NAME("read_sizeclass"),
CTL(thread_tcache_ncached_max_read_sizeclass)},
{NAME("write"), CTL(thread_tcache_ncached_max_write)}
};
static const ctl_named_node_t thread_tcache_node[] = { static const ctl_named_node_t thread_tcache_node[] = {
{NAME("enabled"), CTL(thread_tcache_enabled)}, {NAME("enabled"), CTL(thread_tcache_enabled)},
{NAME("max"), CTL(thread_tcache_max)}, {NAME("max"), CTL(thread_tcache_max)},
{NAME("flush"), CTL(thread_tcache_flush)} {NAME("flush"), CTL(thread_tcache_flush)},
{NAME("ncached_max"), CHILD(named, thread_tcache_ncached_max)}
}; };
static const ctl_named_node_t thread_peak_node[] = { static const ctl_named_node_t thread_peak_node[] = {
@ -2282,6 +2291,78 @@ label_return:
CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t) CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *) CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
static int
thread_tcache_ncached_max_read_sizeclass_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
size_t bin_size = 0;
/* Read the bin size from newp. */
if (newp == NULL) {
ret = EINVAL;
goto label_return;
}
WRITE(bin_size, size_t);
cache_bin_sz_t ncached_max = 0;
if (tcache_bin_ncached_max_read(tsd, bin_size, &ncached_max)) {
ret = EINVAL;
goto label_return;
}
size_t result = (size_t)ncached_max;
READ(result, size_t);
ret = 0;
label_return:
return ret;
}
static int
thread_tcache_ncached_max_write_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
WRITEONLY();
if (newp != NULL) {
if (!tcache_available(tsd)) {
ret = ENOENT;
goto label_return;
}
char *settings = NULL;
WRITE(settings, char *);
if (settings == NULL) {
ret = EINVAL;
goto label_return;
}
/* Get the length of the setting string safely. */
char *end = (char *)memchr(settings, '\0',
CTL_MULTI_SETTING_MAX_LEN);
if (end == NULL) {
ret = EINVAL;
goto label_return;
}
/*
* Exclude the last '\0' for len since it is not handled by
* multi_setting_parse_next.
*/
size_t len = (uintptr_t)end - (uintptr_t)settings;
if (len == 0) {
ret = 0;
goto label_return;
}
if (tcache_bins_ncached_max_write(tsd, settings, len)) {
ret = EINVAL;
goto label_return;
}
}
ret = 0;
label_return:
return ret;
}
CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t) CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *) CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
@ -3155,7 +3236,7 @@ CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_GEN(arenas_tcache_max, global_do_not_change_tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_tcache_max, global_do_not_change_tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned) CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
CTL_RO_NL_GEN(arenas_nhbins, global_do_not_change_nbins, unsigned) CTL_RO_NL_GEN(arenas_nhbins, global_do_not_change_tcache_nbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)

View File

@ -821,50 +821,6 @@ init_opt_stats_opts(const char *v, size_t vlen, char *dest) {
assert(opts_len == strlen(dest)); assert(opts_len == strlen(dest));
} }
/* Reads the next size pair in a multi-sized option. */
static bool
malloc_conf_multi_sizes_next(const char **slab_size_segment_cur,
size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) {
const char *cur = *slab_size_segment_cur;
char *end;
uintmax_t um;
set_errno(0);
/* First number, then '-' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != '-') {
return true;
}
*slab_start = (size_t)um;
cur = end + 1;
/* Second number, then ':' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != ':') {
return true;
}
*slab_end = (size_t)um;
cur = end + 1;
/* Last number */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0) {
return true;
}
*new_size = (size_t)um;
/* Consume the separator if there is one. */
if (*end == '|') {
end++;
}
*vlen_left -= end - *slab_size_segment_cur;
*slab_size_segment_cur = end;
return false;
}
static void static void
malloc_conf_format_error(const char *msg, const char *begin, const char *end) { malloc_conf_format_error(const char *msg, const char *begin, const char *end) {
size_t len = end - begin + 1; size_t len = end - begin + 1;
@ -1351,7 +1307,7 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
size_t size_start; size_t size_start;
size_t size_end; size_t size_end;
size_t nshards; size_t nshards;
bool err = malloc_conf_multi_sizes_next( bool err = multi_setting_parse_next(
&bin_shards_segment_cur, &vlen_left, &bin_shards_segment_cur, &vlen_left,
&size_start, &size_end, &nshards); &size_start, &size_end, &nshards);
if (err || bin_update_shard_size( if (err || bin_update_shard_size(
@ -1613,7 +1569,7 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
size_t slab_start; size_t slab_start;
size_t slab_end; size_t slab_end;
size_t pgs; size_t pgs;
err = malloc_conf_multi_sizes_next( err = multi_setting_parse_next(
&slab_size_segment_cur, &slab_size_segment_cur,
&vlen_left, &slab_start, &slab_end, &vlen_left, &slab_start, &slab_end,
&pgs); &pgs);
@ -4140,6 +4096,7 @@ batch_alloc(void **ptrs, size_t num, size_t size, int flags) {
tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind,
/* slow */ true, /* is_alloc */ true); /* slow */ true, /* is_alloc */ true);
if (likely(tcache != NULL && if (likely(tcache != NULL &&
ind < tcache_nbins_get(tcache->tcache_slow) &&
!tcache_bin_disabled(ind, &tcache->bins[ind], !tcache_bin_disabled(ind, &tcache->bins[ind],
tcache->tcache_slow)) && progress < batch) { tcache->tcache_slow)) && progress < batch) {
if (bin == NULL) { if (bin == NULL) {

View File

@ -63,7 +63,7 @@ unsigned opt_lg_tcache_flush_large_div = 1;
* is only used to initialize tcache_nbins in the per-thread tcache. * is only used to initialize tcache_nbins in the per-thread tcache.
* Directly modifying it will not affect threads already launched. * Directly modifying it will not affect threads already launched.
*/ */
unsigned global_do_not_change_nbins; unsigned global_do_not_change_tcache_nbins;
/* /*
* Max size class to be cached (can be small or large). This value is only used * Max size class to be cached (can be small or large). This value is only used
* to initialize tcache_max in the per-thread tcache. Directly modifying it * to initialize tcache_max in the per-thread tcache. Directly modifying it
@ -193,8 +193,7 @@ tcache_event(tsd_t *tsd) {
goto label_done; goto label_done;
} }
tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, is_small);
is_small);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin, cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
&cache_bin->bin_info); &cache_bin->bin_info);
if (low_water > 0) { if (low_water > 0) {
@ -591,6 +590,28 @@ tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
assert(head_content == *cache_bin->stack_head); assert(head_content == *cache_bin->stack_head);
} }
bool
tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size,
cache_bin_sz_t *ncached_max) {
if (bin_size > TCACHE_MAXCLASS_LIMIT) {
return true;
}
if (!tcache_available(tsd)) {
*ncached_max = 0;
return false;
}
tcache_t *tcache = tsd_tcachep_get(tsd);
assert(tcache != NULL);
szind_t bin_ind = sz_size2index(bin_size);
cache_bin_t *bin = &tcache->bins[bin_ind];
*ncached_max = tcache_bin_disabled(bin_ind, bin, tcache->tcache_slow) ?
0: cache_bin_info_ncached_max_get(bin, &bin->bin_info);
return false;
}
void void
tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena) { tcache_t *tcache, arena_t *arena) {
@ -651,8 +672,8 @@ static void
tcache_default_settings_init(tcache_slow_t *tcache_slow) { tcache_default_settings_init(tcache_slow_t *tcache_slow) {
assert(tcache_slow != NULL); assert(tcache_slow != NULL);
assert(global_do_not_change_tcache_maxclass != 0); assert(global_do_not_change_tcache_maxclass != 0);
assert(global_do_not_change_nbins != 0); assert(global_do_not_change_tcache_nbins != 0);
tcache_slow->tcache_nbins = global_do_not_change_nbins; tcache_slow->tcache_nbins = global_do_not_change_tcache_nbins;
} }
static void static void
@ -772,7 +793,7 @@ tcache_ncached_max_compute(szind_t szind) {
} }
} }
static void JET_EXTERN void
tcache_bin_info_compute(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) { tcache_bin_info_compute(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
/* /*
* Compute the values for each bin, but for bins with indices larger * Compute the values for each bin, but for bins with indices larger
@ -866,7 +887,7 @@ tcache_create_explicit(tsd_t *tsd) {
* the beginning of the whole allocation (for freeing). The makes sure * the beginning of the whole allocation (for freeing). The makes sure
* the cache bins have the requested alignment. * the cache bins have the requested alignment.
*/ */
unsigned tcache_nbins = global_do_not_change_nbins; unsigned tcache_nbins = global_do_not_change_tcache_nbins;
size_t tcache_size, alignment; size_t tcache_size, alignment;
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX] = {{0}}; cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX] = {{0}};
tcache_bin_info_compute(tcache_bin_info); tcache_bin_info_compute(tcache_bin_info);
@ -963,6 +984,52 @@ thread_tcache_max_set(tsd_t *tsd, size_t tcache_max) {
assert(tcache_nbins_get(tcache_slow) == sz_size2index(tcache_max) + 1); assert(tcache_nbins_get(tcache_slow) == sz_size2index(tcache_max) + 1);
} }
bool
tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len) {
assert(tcache_available(tsd));
tcache_t *tcache = tsd_tcachep_get(tsd);
assert(tcache != NULL);
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX];
tcache_bin_settings_backup(tcache, tcache_bin_info);
const char *bin_settings_segment_cur = settings;
size_t len_left = len;
assert(len_left != 0);
do {
size_t size_start, size_end;
size_t ncached_max;
bool err = multi_setting_parse_next(&bin_settings_segment_cur,
&len_left, &size_start, &size_end, &ncached_max);
if (err) {
return true;
}
if (size_end > TCACHE_MAXCLASS_LIMIT) {
size_end = TCACHE_MAXCLASS_LIMIT;
}
if (size_start > TCACHE_MAXCLASS_LIMIT ||
size_start > size_end) {
continue;
}
/* May get called before sz_init (during malloc_conf_init). */
szind_t bin_start = sz_size2index_compute(size_start);
szind_t bin_end = sz_size2index_compute(size_end);
if (ncached_max > CACHE_BIN_NCACHED_MAX) {
ncached_max = (size_t)CACHE_BIN_NCACHED_MAX;
}
for (szind_t i = bin_start; i <= bin_end; i++) {
cache_bin_info_init(&tcache_bin_info[i],
(cache_bin_sz_t)ncached_max);
}
} while (len_left > 0);
arena_t *assigned_arena = tcache->tcache_slow->arena;
tcache_cleanup(tsd);
tsd_tcache_data_init_with_bin_settings(tsd, assigned_arena,
tcache_bin_info);
return false;
}
static void static void
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
tcache_slow_t *tcache_slow = tcache->tcache_slow; tcache_slow_t *tcache_slow = tcache->tcache_slow;
@ -1180,7 +1247,7 @@ bool
tcache_boot(tsdn_t *tsdn, base_t *base) { tcache_boot(tsdn_t *tsdn, base_t *base) {
global_do_not_change_tcache_maxclass = sz_s2u(opt_tcache_max); global_do_not_change_tcache_maxclass = sz_s2u(opt_tcache_max);
assert(global_do_not_change_tcache_maxclass <= TCACHE_MAXCLASS_LIMIT); assert(global_do_not_change_tcache_maxclass <= TCACHE_MAXCLASS_LIMIT);
global_do_not_change_nbins = global_do_not_change_tcache_nbins =
sz_size2index(global_do_not_change_tcache_maxclass) + 1; sz_size2index(global_do_not_change_tcache_maxclass) + 1;
if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,

49
src/util.c Normal file
View File

@ -0,0 +1,49 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/util.h"
/* Reads the next size pair in a multi-sized option. */
bool
multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left,
size_t *key_start, size_t *key_end, size_t *value) {
const char *cur = *setting_segment_cur;
char *end;
uintmax_t um;
set_errno(0);
/* First number, then '-' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != '-') {
return true;
}
*key_start = (size_t)um;
cur = end + 1;
/* Second number, then ':' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != ':') {
return true;
}
*key_end = (size_t)um;
cur = end + 1;
/* Last number */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0) {
return true;
}
*value = (size_t)um;
/* Consume the separator if there is one. */
if (*end == '|') {
end++;
}
*len_left -= end - *setting_segment_cur;
*setting_segment_cur = end;
return false;
}

View File

@ -2,6 +2,8 @@
#include "test/san.h" #include "test/san.h"
const char *malloc_conf = TEST_SAN_UAF_ALIGN_DISABLE; const char *malloc_conf = TEST_SAN_UAF_ALIGN_DISABLE;
extern void tcache_bin_info_compute(
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]);
enum { enum {
alloc_option_start = 0, alloc_option_start = 0,
@ -260,7 +262,7 @@ tcache_check(void *arg) {
expect_zu_eq(old_tcache_max, opt_tcache_max, expect_zu_eq(old_tcache_max, opt_tcache_max,
"Unexpected default value for tcache_max"); "Unexpected default value for tcache_max");
tcache_nbins = tcache_nbins_get(tcache_slow); tcache_nbins = tcache_nbins_get(tcache_slow);
expect_zu_eq(tcache_nbins, (size_t)global_do_not_change_nbins, expect_zu_eq(tcache_nbins, (size_t)global_do_not_change_tcache_nbins,
"Unexpected default value for tcache_nbins"); "Unexpected default value for tcache_nbins");
validate_tcache_stack(tcache); validate_tcache_stack(tcache);
@ -364,10 +366,238 @@ TEST_BEGIN(test_thread_tcache_max) {
} }
TEST_END TEST_END
static void
check_bins_info(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
size_t mib_get[4], mib_get_len;
mib_get_len = sizeof(mib_get) / sizeof(size_t);
const char *get_name = "thread.tcache.ncached_max.read_sizeclass";
size_t ncached_max;
size_t sz = sizeof(size_t);
expect_d_eq(mallctlnametomib(get_name, mib_get, &mib_get_len), 0,
"Unexpected mallctlnametomib() failure");
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
size_t bin_size = sz_index2size(i);
expect_d_eq(mallctlbymib(mib_get, mib_get_len,
(void *)&ncached_max, &sz,
(void *)&bin_size, sizeof(size_t)), 0,
"Unexpected mallctlbymib() failure");
expect_zu_eq(ncached_max, tcache_bin_info[i].ncached_max,
"Unexpected ncached_max for bin %d", i);
/* Check ncached_max returned under a non-bin size. */
bin_size--;
size_t temp_ncached_max = 0;
expect_d_eq(mallctlbymib(mib_get, mib_get_len,
(void *)&temp_ncached_max, &sz,
(void *)&bin_size, sizeof(size_t)), 0,
"Unexpected mallctlbymib() failure");
expect_zu_eq(temp_ncached_max, ncached_max,
"Unexpected ncached_max for inaccurate bin size.");
}
}
static void *
ncached_max_check(void* args) {
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX];
cache_bin_info_t tcache_bin_info_backup[TCACHE_NBINS_MAX];
tsd_t *tsd = tsd_fetch();
tcache_t *tcache = tsd_tcachep_get(tsd);
assert(tcache != NULL);
tcache_slow_t *tcache_slow = tcache->tcache_slow;
/* Check the initial bin settings. */
tcache_bin_info_compute(tcache_bin_info);
memcpy(tcache_bin_info_backup, tcache_bin_info,
sizeof(tcache_bin_info));
unsigned nbins = tcache_nbins_get(tcache_slow);
for (szind_t i = nbins; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
check_bins_info(tcache_bin_info);
size_t mib_set[4], mib_set_len;
mib_set_len = sizeof(mib_set) / sizeof(size_t);
const char *set_name = "thread.tcache.ncached_max.write";
expect_d_eq(mallctlnametomib(set_name, mib_set, &mib_set_len), 0,
"Unexpected mallctlnametomib() failure");
/* Test the ncached_max set with tcache on. */
char inputs[100] = "8-128:1|160-160:11|170-320:22|224-8388609:0";
char *inputp = inputs;
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), 0,
"Unexpected mallctlbymib() failure");
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
if (i >= sz_size2index(8) &&i <= sz_size2index(128)) {
cache_bin_info_init(&tcache_bin_info[i], 1);
}
if (i == sz_size2index(160)) {
cache_bin_info_init(&tcache_bin_info[i], 11);
}
if (i >= sz_size2index(170) && i <= sz_size2index(320)) {
cache_bin_info_init(&tcache_bin_info[i], 22);
}
if (i >= sz_size2index(224)) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
if (i >= nbins) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
}
check_bins_info(tcache_bin_info);
/*
* Close the tcache and set ncached_max of some bins. It will be
* set properly but thread.tcache.ncached_max.read still returns 0
* since the bin is not available yet. After enabling the tcache,
* the new setting will not be carried on. Instead, the default
* settings will be applied.
*/
bool e0 = false, e1;
size_t bool_sz = sizeof(bool);
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz,
(void *)&e0, bool_sz), 0, "Unexpected mallctl() error");
expect_true(e1, "Unexpected previous tcache state");
strcpy(inputs, "0-112:8");
/* Setting returns ENOENT when the tcache is disabled. */
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), ENOENT,
"Unexpected mallctlbymib() failure");
/* All ncached_max should return 0 once tcache is disabled. */
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
check_bins_info(tcache_bin_info);
e0 = true;
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz,
(void *)&e0, bool_sz), 0, "Unexpected mallctl() error");
expect_false(e1, "Unexpected previous tcache state");
memcpy(tcache_bin_info, tcache_bin_info_backup,
sizeof(tcache_bin_info_backup));
for (szind_t i = tcache_nbins_get(tcache_slow); i < TCACHE_NBINS_MAX;
i++) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
check_bins_info(tcache_bin_info);
/*
* Set ncached_max of bins not enabled yet. Then, enable them by
* resetting tcache_max. The ncached_max changes should stay.
*/
size_t tcache_max = 1024;
assert_d_eq(mallctl("thread.tcache.max",
NULL, NULL, (void *)&tcache_max, sizeof(size_t)),.0,
"Unexpected.mallctl().failure");
for (szind_t i = sz_size2index(1024) + 1; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
strcpy(inputs, "2048-6144:123");
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), 0,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
tcache_max = 6144;
assert_d_eq(mallctl("thread.tcache.max",
NULL, NULL, (void *)&tcache_max, sizeof(size_t)),.0,
"Unexpected.mallctl().failure");
memcpy(tcache_bin_info, tcache_bin_info_backup,
sizeof(tcache_bin_info_backup));
for (szind_t i = sz_size2index(2048); i < TCACHE_NBINS_MAX; i++) {
if (i <= sz_size2index(6144)) {
cache_bin_info_init(&tcache_bin_info[i], 123);
} else if (i > sz_size2index(6144)) {
cache_bin_info_init(&tcache_bin_info[i], 0);
}
}
check_bins_info(tcache_bin_info);
/* Test an empty input, it should do nothing. */
strcpy(inputs, "");
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), 0,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
/* Test a half-done string, it should return EINVAL and do nothing. */
strcpy(inputs, "4-1024:7|256-1024");
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), EINVAL,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
/*
* Test an invalid string with start size larger than end size. It
* should return success but do nothing.
*/
strcpy(inputs, "1024-256:7");
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), 0,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
/*
* Test a string exceeding the length limit, it should return EINVAL
* and do nothing.
*/
char *long_inputs = (char *)malloc(10000 * sizeof(char));
expect_true(long_inputs != NULL, "Unexpected allocation failure.");
for (int i = 0; i < 200; i++) {
memcpy(long_inputs + i * 9, "4-1024:3|", 9);
}
memcpy(long_inputs + 200 * 9, "4-1024:3", 8);
long_inputs[200 * 9 + 8] = '\0';
inputp = long_inputs;
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), EINVAL,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
free(long_inputs);
/*
* Test a string with invalid characters, it should return EINVAL
* and do nothing.
*/
strcpy(inputs, "k8-1024:77p");
inputp = inputs;
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), EINVAL,
"Unexpected mallctlbymib() failure");
check_bins_info(tcache_bin_info);
/* Test large ncached_max, it should return success but capped. */
strcpy(inputs, "1024-1024:65540");
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
(void *)&inputp, sizeof(char *)), 0,
"Unexpected mallctlbymib() failure");
cache_bin_info_init(&tcache_bin_info[sz_size2index(1024)],
CACHE_BIN_NCACHED_MAX);
check_bins_info(tcache_bin_info);
return NULL;
}
TEST_BEGIN(test_ncached_max) {
test_skip_if(!config_stats);
test_skip_if(!opt_tcache);
test_skip_if(san_uaf_detection_enabled());
unsigned nthreads = 8;
VARIABLE_ARRAY(thd_t, threads, nthreads);
for (unsigned i = 0; i < nthreads; i++) {
thd_create(&threads[i], ncached_max_check, NULL);
}
for (unsigned i = 0; i < nthreads; i++) {
thd_join(threads[i], NULL);
}
}
TEST_END
int int
main(void) { main(void) {
return test( return test(
test_tcache_max, test_tcache_max,
test_thread_tcache_max); test_thread_tcache_max,
test_ncached_max);
} }