Add mallctl to set and get ncached_max of each cache_bin.

1. `thread_tcache_ncached_max_read_sizeclass` allows users to get the
    ncached_max of the bin with the input sizeclass, passed in through
    oldp (will be upper casted if not an exact bin size is given).
2. `thread_tcache_ncached_max_write` takes in a char array
    representing the settings for bins in the tcache.
This commit is contained in:
guangli-dai
2023-09-19 14:37:09 -07:00
committed by Qi Wang
parent 6b197fdd46
commit 630f7de952
14 changed files with 477 additions and 70 deletions

View File

@@ -198,7 +198,8 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
assert(sz_can_use_slab(size));
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
} else if (likely(ind < TCACHE_NBINS_MAX &&
} else if (likely(
ind < tcache_nbins_get(tcache->tcache_slow) &&
!tcache_bin_disabled(ind, &tcache->bins[ind],
tcache->tcache_slow))) {
return tcache_alloc_large(tsdn_tsd(tsdn), arena,

View File

@@ -210,6 +210,11 @@ cache_bin_info_ncached_max_get(cache_bin_t *bin, cache_bin_info_t *info) {
return info->ncached_max;
}
/* Gets ncached_max without asserting that the bin is enabled. */
static inline cache_bin_sz_t
cache_bin_ncached_max_get_unsafe(cache_bin_t *bin) {
return bin->bin_info.ncached_max;
}
/*
* Internal.
*
@@ -229,7 +234,7 @@ cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
* Does difference calculations that handle wraparound correctly. Earlier must
* be associated with the position earlier in memory.
*/
static inline uint16_t
static inline cache_bin_sz_t
cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
cache_bin_assert_earlier(bin, earlier, later);
return later - earlier;
@@ -584,19 +589,17 @@ cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_sz_t diff = bin->low_bits_empty -
(uint16_t)(uintptr_t)bin->stack_head;
cache_bin_sz_t n = diff / sizeof(void *);
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
assert(n <= ncached_max);
*ncached = n;
/* Racy version of cache_bin_nstashed_get_internal. */
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
info);
n = (bin->low_bits_full - low_bits_low_bound) / sizeof(void *);
assert(n <= ncached_max);
*nstashed = n;
/* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
/*
* Note that cannot assert anything regarding ncached_max because
* it can be configured on the fly and is thus racy.
*/
}
/*

View File

@@ -14,6 +14,7 @@
/* Maximum ctl tree depth. */
#define CTL_MAX_DEPTH 7
#define CTL_MULTI_SETTING_MAX_LEN 1000
typedef struct ctl_node_s {
bool named;

View File

@@ -37,8 +37,10 @@
/* Various function pointers are static and immutable except during testing. */
#ifdef JEMALLOC_JET
# define JET_MUTABLE
# define JET_EXTERN extern
#else
# define JET_MUTABLE const
# define JET_EXTERN static
#endif
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head

View File

@@ -26,7 +26,7 @@ extern unsigned opt_lg_tcache_flush_large_div;
* it should not be changed on the fly. To change the number of tcache bins
* in use, refer to tcache_nbins of each tcache.
*/
extern unsigned global_do_not_change_nbins;
extern unsigned global_do_not_change_tcache_nbins;
/*
* Maximum cached size class. Same as above, this is only used during threads
@@ -55,6 +55,9 @@ void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, unsigned rem);
void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, bool is_small);
bool tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len);
bool tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size,
cache_bin_sz_t *ncached_max);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena);
tcache_t *tcache_create_explicit(tsd_t *tsd);

View File

@@ -46,7 +46,7 @@ tcache_bin_settings_backup(tcache_t *tcache,
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
for (unsigned i = 0; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i],
tcache->bins[i].bin_info.ncached_max);
cache_bin_ncached_max_get_unsafe(&tcache->bins[i]));
}
}
@@ -54,6 +54,7 @@ JEMALLOC_ALWAYS_INLINE bool
tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
tcache_slow_t *tcache_slow) {
assert(bin != NULL);
assert(ind < TCACHE_NBINS_MAX);
bool disabled = cache_bin_disabled(bin);
/*
@@ -66,7 +67,7 @@ tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
* ind < nbins and ncached_max > 0.
*/
unsigned nbins = tcache_nbins_get(tcache_slow);
cache_bin_sz_t ncached_max = bin->bin_info.ncached_max;
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get_unsafe(bin);
if (ind >= nbins) {
assert(disabled);
} else {
@@ -215,6 +216,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <=
tcache_max_get(tcache->tcache_slow));
assert(!tcache_bin_disabled(binind, &tcache->bins[binind],
tcache->tcache_slow));
cache_bin_t *bin = &tcache->bins[binind];
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {

View File

@@ -130,4 +130,12 @@ util_prefetch_write_range(void *ptr, size_t sz) {
#undef UTIL_INLINE
/*
* Reads the settings in the following format:
* key1-key2:value|key3-key4:value|...
* Note it does not handle the ending '\0'.
*/
bool
multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left,
size_t *key_start, size_t *key_end, size_t *value);
#endif /* JEMALLOC_INTERNAL_UTIL_H */