Use ticker_geom_t for arena tcache decay.

This commit is contained in:
David Goldblatt 2021-01-31 12:50:55 -08:00 committed by David Goldblatt
parent 8edfc5b170
commit c259323ab3
9 changed files with 52 additions and 198 deletions

View File

@ -116,18 +116,22 @@ arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx,
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
tsd_t *tsd;
ticker_t *decay_ticker;
if (unlikely(tsdn_null(tsdn))) { if (unlikely(tsdn_null(tsdn))) {
return; return;
} }
tsd = tsdn_tsd(tsdn); tsd_t *tsd = tsdn_tsd(tsdn);
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena)); /*
if (unlikely(decay_ticker == NULL)) { * We use the ticker_geom_t to avoid having per-arena state in the tsd.
return; * Instead of having a countdown-until-decay timer running for every
} * arena in every thread, we flip a coin once per tick, whose
if (unlikely(ticker_ticks(decay_ticker, nticks))) { * probability of coming up heads is 1/nticks; this is effectively the
* operation of the ticker_geom_t. Each arena has the same chance of a
* coinflip coming up heads (1/ARENA_DECAY_NTICKS_PER_UPDATE), so we can
* use a single ticker for all of them.
*/
ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd);
uint64_t *prng_state = tsd_prng_statep_get(tsd);
if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks))) {
arena_decay(tsdn, arena, false, false); arena_decay(tsdn, arena, false, false);
} }
} }

View File

@ -98,9 +98,4 @@ struct arena_s {
bin_t bins[0]; bin_t bins[0];
}; };
/* Used in conjunction with tsd for fast arena-related context lookup. */
struct arena_tdata_s {
ticker_t decay_ticker;
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */ #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */

View File

@ -7,11 +7,10 @@
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) #define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT (0) #define MUZZY_DECAY_MS_DEFAULT (0)
/* Number of event ticks between time checks. */ /* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000 #define ARENA_DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_decay_s arena_decay_t; typedef struct arena_decay_s arena_decay_t;
typedef struct arena_s arena_t; typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t;
typedef enum { typedef enum {
percpu_arena_mode_names_base = 0, /* Used for options processing. */ percpu_arena_mode_names_base = 0, /* Used for options processing. */

View File

@ -56,12 +56,10 @@ void bootstrap_free(void *ptr);
void arena_set(unsigned ind, arena_t *arena); void arena_set(unsigned ind, arena_t *arena);
unsigned narenas_total_get(void); unsigned narenas_total_get(void);
arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
arena_t *arena_choose_hard(tsd_t *tsd, bool internal); arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
void iarena_cleanup(tsd_t *tsd); void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd); void arena_cleanup(tsd_t *tsd);
void arenas_tdata_cleanup(tsd_t *tsd);
size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags); size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags);
void jemalloc_prefork(void); void jemalloc_prefork(void);
void jemalloc_postfork_parent(void); void jemalloc_postfork_parent(void);

View File

@ -56,31 +56,6 @@ percpu_arena_ind_limit(percpu_arena_mode_t mode) {
} }
} }
static inline arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
arena_tdata_t *tdata;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
if (unlikely(arenas_tdata == NULL)) {
/* arenas_tdata hasn't been initialized yet. */
return arena_tdata_get_hard(tsd, ind);
}
if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
/*
* ind is invalid, cache is old (too small), or tdata to be
* initialized.
*/
return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
NULL);
}
tdata = &arenas_tdata[ind];
if (likely(tdata != NULL) || !refresh_if_missing) {
return tdata;
}
return arena_tdata_get_hard(tsd, ind);
}
static inline arena_t * static inline arena_t *
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
arena_t *ret; arena_t *ret;
@ -97,17 +72,6 @@ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
return ret; return ret;
} }
static inline ticker_t *
decay_ticker_get(tsd_t *tsd, unsigned ind) {
arena_tdata_t *tdata;
tdata = arena_tdata_get(tsd, ind, true);
if (unlikely(tdata == NULL)) {
return NULL;
}
return &tdata->decay_ticker;
}
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool
tcache_available(tsd_t *tsd) { tcache_available(tsd_t *tsd) {
/* /*

View File

@ -58,9 +58,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
/* O(name, type, nullable type) */ /* O(name, type, nullable type) */
#define TSD_DATA_SLOW \ #define TSD_DATA_SLOW \
O(tcache_enabled, bool, bool) \ O(tcache_enabled, bool, bool) \
O(arenas_tdata_bypass, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \ O(reentrancy_level, int8_t, int8_t) \
O(narenas_tdata, uint32_t, uint32_t) \
O(thread_allocated_last_event, uint64_t, uint64_t) \ O(thread_allocated_last_event, uint64_t, uint64_t) \
O(thread_allocated_next_event, uint64_t, uint64_t) \ O(thread_allocated_next_event, uint64_t, uint64_t) \
O(thread_deallocated_last_event, uint64_t, uint64_t) \ O(thread_deallocated_last_event, uint64_t, uint64_t) \
@ -77,7 +75,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
O(prng_state, uint64_t, uint64_t) \ O(prng_state, uint64_t, uint64_t) \
O(iarena, arena_t *, arena_t *) \ O(iarena, arena_t *, arena_t *) \
O(arena, arena_t *, arena_t *) \ O(arena, arena_t *, arena_t *) \
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\ O(arena_decay_ticker, ticker_geom_t, ticker_geom_t) \
O(sec_shard, uint8_t, uint8_t) \ O(sec_shard, uint8_t, uint8_t) \
O(binshards, tsd_binshards_t, tsd_binshards_t)\ O(binshards, tsd_binshards_t, tsd_binshards_t)\
O(tsd_link, tsd_link_t, tsd_link_t) \ O(tsd_link, tsd_link_t, tsd_link_t) \
@ -90,9 +88,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
#define TSD_DATA_SLOW_INITIALIZER \ #define TSD_DATA_SLOW_INITIALIZER \
/* tcache_enabled */ TCACHE_ENABLED_ZERO_INITIALIZER, \ /* tcache_enabled */ TCACHE_ENABLED_ZERO_INITIALIZER, \
/* arenas_tdata_bypass */ false, \
/* reentrancy_level */ 0, \ /* reentrancy_level */ 0, \
/* narenas_tdata */ 0, \
/* thread_allocated_last_event */ 0, \ /* thread_allocated_last_event */ 0, \
/* thread_allocated_next_event */ 0, \ /* thread_allocated_next_event */ 0, \
/* thread_deallocated_last_event */ 0, \ /* thread_deallocated_last_event */ 0, \
@ -109,7 +105,8 @@ typedef ql_elm(tsd_t) tsd_link_t;
/* prng_state */ 0, \ /* prng_state */ 0, \
/* iarena */ NULL, \ /* iarena */ NULL, \
/* arena */ NULL, \ /* arena */ NULL, \
/* arenas_tdata */ NULL, \ /* arena_decay_ticker */ \
TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE), \
/* sec_shard */ (uint8_t)-1, \ /* sec_shard */ (uint8_t)-1, \
/* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \ /* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \
/* tsd_link */ {NULL}, \ /* tsd_link */ {NULL}, \

View File

@ -493,82 +493,6 @@ arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
} }
} }
arena_tdata_t *
arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
arena_tdata_t *tdata, *arenas_tdata_old;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
unsigned narenas_tdata_old, i;
unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
unsigned narenas_actual = narenas_total_get();
/*
* Dissociate old tdata array (and set up for deallocation upon return)
* if it's too small.
*/
if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
arenas_tdata_old = arenas_tdata;
narenas_tdata_old = narenas_tdata;
arenas_tdata = NULL;
narenas_tdata = 0;
tsd_arenas_tdata_set(tsd, arenas_tdata);
tsd_narenas_tdata_set(tsd, narenas_tdata);
} else {
arenas_tdata_old = NULL;
narenas_tdata_old = 0;
}
/* Allocate tdata array if it's missing. */
if (arenas_tdata == NULL) {
bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
*arenas_tdata_bypassp = true;
arenas_tdata = (arena_tdata_t *)a0malloc(
sizeof(arena_tdata_t) * narenas_tdata);
*arenas_tdata_bypassp = false;
}
if (arenas_tdata == NULL) {
tdata = NULL;
goto label_return;
}
assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
tsd_arenas_tdata_set(tsd, arenas_tdata);
tsd_narenas_tdata_set(tsd, narenas_tdata);
}
/*
* Copy to tdata array. It's possible that the actual number of arenas
* has increased since narenas_total_get() was called above, but that
* causes no correctness issues unless two threads concurrently execute
* the arenas.create mallctl, which we trust mallctl synchronization to
* prevent.
*/
/* Copy/initialize tickers. */
for (i = 0; i < narenas_actual; i++) {
if (i < narenas_tdata_old) {
ticker_copy(&arenas_tdata[i].decay_ticker,
&arenas_tdata_old[i].decay_ticker);
} else {
ticker_init(&arenas_tdata[i].decay_ticker,
DECAY_NTICKS_PER_UPDATE);
}
}
if (narenas_tdata > narenas_actual) {
memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
* (narenas_tdata - narenas_actual));
}
/* Read the refreshed tdata array. */
tdata = &arenas_tdata[ind];
label_return:
if (arenas_tdata_old != NULL) {
a0dalloc(arenas_tdata_old);
}
return tdata;
}
/* Slow path, called only by arena_choose(). */ /* Slow path, called only by arena_choose(). */
arena_t * arena_t *
arena_choose_hard(tsd_t *tsd, bool internal) { arena_choose_hard(tsd_t *tsd, bool internal) {
@ -705,20 +629,6 @@ arena_cleanup(tsd_t *tsd) {
} }
} }
void
arenas_tdata_cleanup(tsd_t *tsd) {
arena_tdata_t *arenas_tdata;
/* Prevent tsd->arenas_tdata from being (re)created. */
*tsd_arenas_tdata_bypassp_get(tsd) = true;
arenas_tdata = tsd_arenas_tdata_get(tsd);
if (arenas_tdata != NULL) {
tsd_arenas_tdata_set(tsd, NULL);
a0dalloc(arenas_tdata);
}
}
static void static void
stats_print_atexit(void) { stats_print_atexit(void) {
if (config_stats) { if (config_stats) {

View File

@ -251,8 +251,6 @@ assert_tsd_data_cleanup_done(tsd_t *tsd) {
assert(!tsd_in_nominal_list(tsd)); assert(!tsd_in_nominal_list(tsd));
assert(*tsd_arenap_get_unsafe(tsd) == NULL); assert(*tsd_arenap_get_unsafe(tsd) == NULL);
assert(*tsd_iarenap_get_unsafe(tsd) == NULL); assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true);
assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL);
assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false); assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL); assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
} }
@ -267,7 +265,6 @@ tsd_data_init_nocleanup(tsd_t *tsd) {
* We set up tsd in a way that no cleanup is needed. * We set up tsd in a way that no cleanup is needed.
*/ */
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
*tsd_arenas_tdata_bypassp_get(tsd) = true;
*tsd_tcache_enabledp_get_unsafe(tsd) = false; *tsd_tcache_enabledp_get_unsafe(tsd) = false;
*tsd_reentrancy_levelp_get(tsd) = 1; *tsd_reentrancy_levelp_get(tsd) = 1;
tsd_prng_state_init(tsd); tsd_prng_state_init(tsd);
@ -375,7 +372,6 @@ tsd_do_data_cleanup(tsd_t *tsd) {
prof_tdata_cleanup(tsd); prof_tdata_cleanup(tsd);
iarena_cleanup(tsd); iarena_cleanup(tsd);
arena_cleanup(tsd); arena_cleanup(tsd);
arenas_tdata_cleanup(tsd);
tcache_cleanup(tsd); tcache_cleanup(tsd);
witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd)); witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
*tsd_reentrancy_levelp_get(tsd) = 1; *tsd_reentrancy_levelp_get(tsd) = 1;
@ -439,7 +435,6 @@ malloc_tsd_boot0(void) {
return NULL; return NULL;
} }
tsd = tsd_fetch(); tsd = tsd_fetch();
*tsd_arenas_tdata_bypassp_get(tsd) = true;
return tsd; return tsd;
} }
@ -449,7 +444,6 @@ malloc_tsd_boot1(void) {
tsd_t *tsd = tsd_fetch(); tsd_t *tsd = tsd_fetch();
/* malloc_slow has been set properly. Update tsd_slow. */ /* malloc_slow has been set properly. Update tsd_slow. */
tsd_slow_update(tsd); tsd_slow_update(tsd);
*tsd_arenas_tdata_bypassp_get(tsd) = false;
} }
#ifdef _WIN32 #ifdef _WIN32

View File

@ -187,7 +187,7 @@ TEST_BEGIN(test_decay_ticks) {
test_skip_if(check_background_thread_enabled()); test_skip_if(check_background_thread_enabled());
test_skip_if(opt_hpa); test_skip_if(opt_hpa);
ticker_t *decay_ticker; ticker_geom_t *decay_ticker;
unsigned tick0, tick1, arena_ind; unsigned tick0, tick1, arena_ind;
size_t sz, large0; size_t sz, large0;
void *p; void *p;
@ -205,7 +205,7 @@ TEST_BEGIN(test_decay_ticks) {
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
&sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0, &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
"Unexpected mallctl() failure"); "Unexpected mallctl() failure");
decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind); decay_ticker = tsd_arena_decay_tickerp_get(tsd_fetch());
expect_ptr_not_null(decay_ticker, expect_ptr_not_null(decay_ticker,
"Unexpected failure getting decay ticker"); "Unexpected failure getting decay ticker");
@ -216,60 +216,60 @@ TEST_BEGIN(test_decay_ticks) {
*/ */
/* malloc(). */ /* malloc(). */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
p = malloc(large0); p = malloc(large0);
expect_ptr_not_null(p, "Unexpected malloc() failure"); expect_ptr_not_null(p, "Unexpected malloc() failure");
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()"); expect_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
/* free(). */ /* free(). */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
free(p); free(p);
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, "Expected ticker to tick during free()"); expect_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
/* calloc(). */ /* calloc(). */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
p = calloc(1, large0); p = calloc(1, large0);
expect_ptr_not_null(p, "Unexpected calloc() failure"); expect_ptr_not_null(p, "Unexpected calloc() failure");
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()"); expect_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
free(p); free(p);
/* posix_memalign(). */ /* posix_memalign(). */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0, expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
"Unexpected posix_memalign() failure"); "Unexpected posix_memalign() failure");
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, expect_u32_ne(tick1, tick0,
"Expected ticker to tick during posix_memalign()"); "Expected ticker to tick during posix_memalign()");
free(p); free(p);
/* aligned_alloc(). */ /* aligned_alloc(). */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
p = aligned_alloc(sizeof(size_t), large0); p = aligned_alloc(sizeof(size_t), large0);
expect_ptr_not_null(p, "Unexpected aligned_alloc() failure"); expect_ptr_not_null(p, "Unexpected aligned_alloc() failure");
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, expect_u32_ne(tick1, tick0,
"Expected ticker to tick during aligned_alloc()"); "Expected ticker to tick during aligned_alloc()");
free(p); free(p);
/* realloc(). */ /* realloc(). */
/* Allocate. */ /* Allocate. */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
p = realloc(NULL, large0); p = realloc(NULL, large0);
expect_ptr_not_null(p, "Unexpected realloc() failure"); expect_ptr_not_null(p, "Unexpected realloc() failure");
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
/* Reallocate. */ /* Reallocate. */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
p = realloc(p, large0); p = realloc(p, large0);
expect_ptr_not_null(p, "Unexpected realloc() failure"); expect_ptr_not_null(p, "Unexpected realloc() failure");
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
/* Deallocate. */ /* Deallocate. */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
realloc(p, 0); realloc(p, 0);
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
/* /*
@ -286,41 +286,41 @@ TEST_BEGIN(test_decay_ticks) {
sz = allocx_sizes[i]; sz = allocx_sizes[i];
/* mallocx(). */ /* mallocx(). */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
p = mallocx(sz, MALLOCX_TCACHE_NONE); p = mallocx(sz, MALLOCX_TCACHE_NONE);
expect_ptr_not_null(p, "Unexpected mallocx() failure"); expect_ptr_not_null(p, "Unexpected mallocx() failure");
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, expect_u32_ne(tick1, tick0,
"Expected ticker to tick during mallocx() (sz=%zu)", "Expected ticker to tick during mallocx() (sz=%zu)",
sz); sz);
/* rallocx(). */ /* rallocx(). */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
p = rallocx(p, sz, MALLOCX_TCACHE_NONE); p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
expect_ptr_not_null(p, "Unexpected rallocx() failure"); expect_ptr_not_null(p, "Unexpected rallocx() failure");
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, expect_u32_ne(tick1, tick0,
"Expected ticker to tick during rallocx() (sz=%zu)", "Expected ticker to tick during rallocx() (sz=%zu)",
sz); sz);
/* xallocx(). */ /* xallocx(). */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
xallocx(p, sz, 0, MALLOCX_TCACHE_NONE); xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, expect_u32_ne(tick1, tick0,
"Expected ticker to tick during xallocx() (sz=%zu)", "Expected ticker to tick during xallocx() (sz=%zu)",
sz); sz);
/* dallocx(). */ /* dallocx(). */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
dallocx(p, MALLOCX_TCACHE_NONE); dallocx(p, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, expect_u32_ne(tick1, tick0,
"Expected ticker to tick during dallocx() (sz=%zu)", "Expected ticker to tick during dallocx() (sz=%zu)",
sz); sz);
/* sdallocx(). */ /* sdallocx(). */
p = mallocx(sz, MALLOCX_TCACHE_NONE); p = mallocx(sz, MALLOCX_TCACHE_NONE);
expect_ptr_not_null(p, "Unexpected mallocx() failure"); expect_ptr_not_null(p, "Unexpected mallocx() failure");
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
sdallocx(p, sz, MALLOCX_TCACHE_NONE); sdallocx(p, sz, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, expect_u32_ne(tick1, tick0,
"Expected ticker to tick during sdallocx() " "Expected ticker to tick during sdallocx() "
"(sz=%zu)", sz); "(sz=%zu)", sz);
@ -349,31 +349,24 @@ TEST_BEGIN(test_decay_ticks) {
sz = tcache_sizes[i]; sz = tcache_sizes[i];
/* tcache fill. */ /* tcache fill. */
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
p = mallocx(sz, MALLOCX_TCACHE(tcache_ind)); p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
expect_ptr_not_null(p, "Unexpected mallocx() failure"); expect_ptr_not_null(p, "Unexpected mallocx() failure");
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
expect_u32_ne(tick1, tick0, expect_u32_ne(tick1, tick0,
"Expected ticker to tick during tcache fill " "Expected ticker to tick during tcache fill "
"(sz=%zu)", sz); "(sz=%zu)", sz);
/* tcache flush. */ /* tcache flush. */
dallocx(p, MALLOCX_TCACHE(tcache_ind)); dallocx(p, MALLOCX_TCACHE(tcache_ind));
tick0 = ticker_read(decay_ticker); tick0 = ticker_geom_read(decay_ticker);
expect_d_eq(mallctl("tcache.flush", NULL, NULL, expect_d_eq(mallctl("tcache.flush", NULL, NULL,
(void *)&tcache_ind, sizeof(unsigned)), 0, (void *)&tcache_ind, sizeof(unsigned)), 0,
"Unexpected mallctl failure"); "Unexpected mallctl failure");
tick1 = ticker_read(decay_ticker); tick1 = ticker_geom_read(decay_ticker);
/* Will only tick if it's in tcache. */ /* Will only tick if it's in tcache. */
if (sz <= tcache_max) {
expect_u32_ne(tick1, tick0, expect_u32_ne(tick1, tick0,
"Expected ticker to tick during tcache " "Expected ticker to tick during tcache flush (sz=%zu)", sz);
"flush (sz=%zu)", sz);
} else {
expect_u32_eq(tick1, tick0,
"Unexpected ticker tick during tcache "
"flush (sz=%zu)", sz);
}
} }
} }
TEST_END TEST_END
@ -401,7 +394,7 @@ decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
void *p = do_mallocx(1, flags); void *p = do_mallocx(1, flags);
uint64_t dirty_npurge1, muzzy_npurge1; uint64_t dirty_npurge1, muzzy_npurge1;
do { do {
for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; for (unsigned i = 0; i < ARENA_DECAY_NTICKS_PER_UPDATE / 2;
i++) { i++) {
void *q = do_mallocx(1, flags); void *q = do_mallocx(1, flags);
dallocx(q, flags); dallocx(q, flags);