Tcache: split up fast and slow path data.

This commit is contained in:
David Goldblatt 2020-04-07 17:48:35 -07:00 committed by David Goldblatt
parent 7099c66205
commit a13fbad374
12 changed files with 156 additions and 103 deletions

View File

@ -53,7 +53,7 @@ struct arena_s {
*
* Synchronization: tcache_ql_mtx.
*/
ql_head(tcache_t) tcache_ql;
ql_head(tcache_slow_t) tcache_ql;
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
malloc_mutex_t tcache_ql_mtx;

View File

@ -129,7 +129,7 @@ tcache_available(tsd_t *tsd) {
*/
if (likely(tsd_tcache_enabled_get(tsd))) {
/* Associated arena == NULL implies tcache init in progress. */
assert(tsd_tcachep_get(tsd)->arena == NULL ||
assert(tsd_tcache_slowp_get(tsd)->arena == NULL ||
!cache_bin_still_zero_initialized(
tcache_small_bin_get(tsd_tcachep_get(tsd), 0)));
return true;
@ -147,6 +147,15 @@ tcache_get(tsd_t *tsd) {
return tsd_tcachep_get(tsd);
}
JEMALLOC_ALWAYS_INLINE tcache_slow_t *
tcache_slow_get(tsd_t *tsd) {
if (!tcache_available(tsd)) {
return NULL;
}
return tsd_tcache_slowp_get(tsd);
}
static inline void
pre_reentrancy(tsd_t *tsd, arena_t *arena) {
/* arena is the current context. Reentry from a0 is not allowed. */

View File

@ -19,8 +19,10 @@ percpu_arena_update(tsd_t *tsd, unsigned cpu) {
arena_migrate(tsd, oldind, newind);
tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) {
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
newarena);
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
tcache_t *tcache = tsd_tcachep_get(tsd);
tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
tcache, newarena);
}
}
}
@ -45,18 +47,19 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
ret = arena_choose_hard(tsd, internal);
assert(ret);
if (tcache_available(tsd)) {
tcache_t *tcache = tcache_get(tsd);
if (tcache->arena != NULL) {
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
tcache_t *tcache = tsd_tcachep_get(tsd);
if (tcache_slow->arena != NULL) {
/* See comments in tsd_tcache_data_init().*/
assert(tcache->arena ==
assert(tcache_slow->arena ==
arena_get(tsd_tsdn(tsd), 0, false));
if (tcache->arena != ret) {
if (tcache_slow->arena != ret) {
tcache_arena_reassociate(tsd_tsdn(tsd),
tcache, ret);
tcache_slow, tcache, ret);
}
} else {
tcache_arena_associate(tsd_tsdn(tsd), tcache,
ret);
tcache_arena_associate(tsd_tsdn(tsd),
tcache_slow, tcache, ret);
}
}
}

View File

@ -26,15 +26,17 @@ extern cache_bin_info_t *tcache_bin_info;
extern tcaches_t *tcaches;
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow,
tcache_t *tcache);
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, bool *tcache_success);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
arena_t *arena);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena);
tcache_t *tcache_create_explicit(tsd_t *tsd);
void tcache_cleanup(tsd_t *tsd);
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
@ -42,7 +44,8 @@ bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn, base_t *base);
void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
void tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena);
void tcache_prefork(tsdn_t *tsdn);
void tcache_postfork_parent(tsdn_t *tsdn);
void tcache_postfork_child(tsdn_t *tsdn);

View File

@ -7,25 +7,19 @@
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/tsd_types.h"
struct tcache_s {
/*
* To minimize our cache-footprint, we put the frequently accessed data
* together at the start of this struct.
/*
* The tcache state is split into the slow and hot path data. Each has a
* pointer to the other, and the data always comes in pairs. The layout of each
* of them varies in practice; tcache_slow lives in the TSD for the automatic
* tcache, and as part of a dynamic allocation for manual allocations. Keeping
* a pointer to tcache_slow lets us treat these cases uniformly, rather than
* splitting up the tcache [de]allocation code into those paths called with the
* TSD tcache and those called with a manual tcache.
*/
/*
* The pointer stacks associated with bins follow as a contiguous array.
* During tcache initialization, the avail pointer in each element of
* tbins is initialized to point to the proper offset within this array.
*/
cache_bin_t bins_small[SC_NBINS];
/*
* This data is less hot; we can be a little less careful with our
* footprint here.
*/
struct tcache_slow_s {
/* Lets us track all the tcaches in an arena. */
ql_elm(tcache_t) link;
ql_elm(tcache_slow_t) link;
/*
* The descriptor lets the arena find our cache bins without seeing the
@ -45,9 +39,23 @@ struct tcache_s {
/*
* The start of the allocation containing the dynamic allocation for
* either the cache bins alone, or the cache bin memory as well as this
* tcache_t.
* tcache_slow_t and its associated tcache_t.
*/
void *dyn_alloc;
/* The associated bins. */
tcache_t *tcache;
};
struct tcache_s {
tcache_slow_t *tcache_slow;
/*
* The pointer stacks associated with bins follow as a contiguous array.
* During tcache initialization, the avail pointer in each element of
* tbins is initialized to point to the proper offset within this array.
*/
cache_bin_t bins_small[SC_NBINS];
/*
* We put the cache bins for large size classes at the end of the
* struct, since some of them might not get used. This might end up

View File

@ -3,6 +3,7 @@
#include "jemalloc/internal/sc.h"
typedef struct tcache_slow_s tcache_slow_t;
typedef struct tcache_s tcache_t;
typedef struct tcaches_s tcaches_t;
@ -52,6 +53,7 @@ typedef struct tcaches_s tcaches_t;
/* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
#define TCACHE_SLOW_ZERO_INITIALIZER {0}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false

View File

@ -76,7 +76,8 @@ typedef ql_elm(tsd_t) tsd_link_t;
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
O(binshards, tsd_binshards_t, tsd_binshards_t)\
O(tsd_link, tsd_link_t, tsd_link_t) \
O(in_hook, bool, bool)
O(in_hook, bool, bool) \
O(tcache_slow, tcache_slow_t, tcache_slow_t)
#define TSD_DATA_SLOW_INITIALIZER \
/* tcache_enabled */ TCACHE_ENABLED_ZERO_INITIALIZER, \
@ -100,7 +101,8 @@ typedef ql_elm(tsd_t) tsd_link_t;
/* arenas_tdata */ NULL, \
/* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \
/* tsd_link */ {NULL}, \
/* in_hook */ false,
/* in_hook */ false, \
/* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER,
/* O(name, type, nullable type) */
#define TSD_DATA_FAST \

View File

@ -1690,15 +1690,16 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
if (config_stats) {
ql_new(&arena->tcache_ql);
ql_new(&arena->cache_bin_array_descriptor_ql);
tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
if (tcache != NULL && tcache->arena == arena) {
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
tcache_slow_t *tcache_slow = tcache_slow_get(tsdn_tsd(tsdn));
if (tcache_slow != NULL && tcache_slow->arena == arena) {
tcache_t *tcache = tcache_slow->tcache;
ql_elm_new(tcache_slow, link);
ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
cache_bin_array_descriptor_init(
&tcache->cache_bin_array_descriptor,
&tcache_slow->cache_bin_array_descriptor,
tcache->bins_small, tcache->bins_large);
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
&tcache->cache_bin_array_descriptor, link);
&tcache_slow->cache_bin_array_descriptor, link);
}
}

View File

@ -1864,7 +1864,8 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
arena_migrate(tsd, oldind, newind);
if (tcache_available(tsd)) {
tcache_arena_reassociate(tsd_tsdn(tsd),
tsd_tcachep_get(tsd), newarena);
tsd_tcache_slowp_get(tsd), tsd_tcachep_get(tsd),
newarena);
}
}

View File

@ -717,11 +717,13 @@ stats_print_atexit(void) {
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena = arena_get(tsdn, i, false);
if (arena != NULL) {
tcache_t *tcache;
tcache_slow_t *tcache_slow;
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
ql_foreach(tcache, &arena->tcache_ql, link) {
tcache_stats_merge(tsdn, tcache, arena);
ql_foreach(tcache_slow, &arena->tcache_ql,
link) {
tcache_stats_merge(tsdn,
tcache_slow->tcache, arena);
}
malloc_mutex_unlock(tsdn,
&arena->tcache_ql_mtx);

View File

@ -41,8 +41,8 @@ tcache_salloc(tsdn_t *tsdn, const void *ptr) {
}
void
tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
szind_t binind = tcache->next_gc_bin;
tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache) {
szind_t binind = tcache_slow->next_gc_bin;
cache_bin_t *tbin;
bool is_small;
if (binind < SC_NBINS) {
@ -62,7 +62,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
if (is_small) {
assert(!tcache->bin_refilled[binind]);
assert(!tcache_slow->bin_refilled[binind]);
tcache_bin_flush_small(tsd, tcache, tbin, binind,
ncached - low_water + (low_water >> 2));
/*
@ -71,43 +71,45 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
*/
if ((cache_bin_info_ncached_max(
&tcache_bin_info[binind]) >>
(tcache->lg_fill_div[binind] + 1)) >= 1) {
tcache->lg_fill_div[binind]++;
(tcache_slow->lg_fill_div[binind] + 1)) >= 1) {
tcache_slow->lg_fill_div[binind]++;
}
} else {
tcache_bin_flush_large(tsd, tcache, tbin, binind,
ncached - low_water + (low_water >> 2));
}
} else if (is_small && tcache->bin_refilled[binind]) {
} else if (is_small && tcache_slow->bin_refilled[binind]) {
assert(low_water == 0);
/*
* Increase fill count by 2X for small bins. Make sure
* lg_fill_div stays greater than 0.
*/
if (tcache->lg_fill_div[binind] > 1) {
tcache->lg_fill_div[binind]--;
if (tcache_slow->lg_fill_div[binind] > 1) {
tcache_slow->lg_fill_div[binind]--;
}
tcache->bin_refilled[binind] = false;
tcache_slow->bin_refilled[binind] = false;
}
cache_bin_low_water_set(tbin);
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins) {
tcache->next_gc_bin = 0;
tcache_slow->next_gc_bin++;
if (tcache_slow->next_gc_bin == nhbins) {
tcache_slow->next_gc_bin = 0;
}
}
void *
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, bool *tcache_success) {
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena,
tcache_t *tcache, cache_bin_t *tbin, szind_t binind,
bool *tcache_success) {
tcache_slow_t *tcache_slow = tcache->tcache_slow;
void *ret;
assert(tcache->arena != NULL);
assert(tcache_slow->arena != NULL);
unsigned nfill = cache_bin_info_ncached_max(&tcache_bin_info[binind])
>> tcache->lg_fill_div[binind];
>> tcache_slow->lg_fill_div[binind];
arena_cache_bin_fill_small(tsdn, arena, tbin, &tcache_bin_info[binind],
binind, nfill);
tcache->bin_refilled[binind] = true;
tcache_slow->bin_refilled[binind] = true;
ret = cache_bin_alloc(tbin, tcache_success);
return ret;
@ -154,6 +156,7 @@ tcache_bin_flush_match(edata_t *edata, unsigned cur_arena_ind,
JEMALLOC_ALWAYS_INLINE void
tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
szind_t binind, unsigned rem, bool small) {
tcache_slow_t *tcache_slow = tcache->tcache_slow;
/*
* A couple lookup calls take tsdn; declare it once for convenience
* instead of calling tsd_tsdn(tsd) all the time.
@ -168,7 +171,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
cache_bin_sz_t ncached = cache_bin_ncached_get(tbin,
&tcache_bin_info[binind]);
assert((cache_bin_sz_t)rem <= ncached);
arena_t *tcache_arena = tcache->arena;
arena_t *tcache_arena = tcache_slow->arena;
assert(tcache_arena != NULL);
unsigned nflush = ncached - rem;
@ -361,57 +364,60 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
}
void
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
assert(tcache->arena == NULL);
tcache->arena = arena;
tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena) {
assert(tcache_slow->arena == NULL);
tcache_slow->arena = arena;
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
ql_elm_new(tcache_slow, link);
ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
cache_bin_array_descriptor_init(
&tcache->cache_bin_array_descriptor, tcache->bins_small,
tcache->bins_large);
&tcache_slow->cache_bin_array_descriptor,
tcache->bins_small, tcache->bins_large);
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
&tcache->cache_bin_array_descriptor, link);
&tcache_slow->cache_bin_array_descriptor, link);
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
}
static void
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
arena_t *arena = tcache->arena;
tcache_arena_dissociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache) {
arena_t *arena = tcache_slow->arena;
assert(arena != NULL);
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
if (config_debug) {
bool in_ql = false;
tcache_t *iter;
tcache_slow_t *iter;
ql_foreach(iter, &arena->tcache_ql, link) {
if (iter == tcache) {
if (iter == tcache_slow) {
in_ql = true;
break;
}
}
assert(in_ql);
}
ql_remove(&arena->tcache_ql, tcache, link);
ql_remove(&arena->tcache_ql, tcache_slow, link);
ql_remove(&arena->cache_bin_array_descriptor_ql,
&tcache->cache_bin_array_descriptor, link);
tcache_stats_merge(tsdn, tcache, arena);
&tcache_slow->cache_bin_array_descriptor, link);
tcache_stats_merge(tsdn, tcache_slow->tcache, arena);
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
tcache->arena = NULL;
tcache_slow->arena = NULL;
}
void
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
tcache_arena_dissociate(tsdn, tcache);
tcache_arena_associate(tsdn, tcache, arena);
tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena) {
tcache_arena_dissociate(tsdn, tcache_slow, tcache);
tcache_arena_associate(tsdn, tcache_slow, tcache, arena);
}
bool
@ -429,11 +435,15 @@ tsd_tcache_enabled_data_init(tsd_t *tsd) {
}
static void
tcache_init(tsd_t *tsd, tcache_t *tcache, void *mem) {
memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
tcache->next_gc_bin = 0;
tcache->arena = NULL;
tcache->dyn_alloc = mem;
tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
void *mem) {
tcache->tcache_slow = tcache_slow;
tcache_slow->tcache = tcache;
memset(&tcache_slow->link, 0, sizeof(ql_elm(tcache_t)));
tcache_slow->next_gc_bin = 0;
tcache_slow->arena = NULL;
tcache_slow->dyn_alloc = mem;
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS);
@ -444,8 +454,8 @@ tcache_init(tsd_t *tsd, tcache_t *tcache, void *mem) {
cache_bin_preincrement(tcache_bin_info, nhbins, mem,
&cur_offset);
for (; i < SC_NBINS; i++) {
tcache->lg_fill_div[i] = 1;
tcache->bin_refilled[i] = false;
tcache_slow->lg_fill_div[i] = 1;
tcache_slow->bin_refilled[i] = false;
cache_bin_t *bin = tcache_small_bin_get(tcache, i);
cache_bin_init(bin, &tcache_bin_info[i], mem,
&cur_offset);
@ -464,7 +474,9 @@ tcache_init(tsd_t *tsd, tcache_t *tcache, void *mem) {
/* Initialize auto tcache (embedded in TSD). */
bool
tsd_tcache_data_init(tsd_t *tsd) {
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get_unsafe(tsd);
tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
assert(cache_bin_still_zero_initialized(
tcache_small_bin_get(tcache, 0)));
size_t alignment = tcache_bin_alloc_alignment;
@ -476,7 +488,7 @@ tsd_tcache_data_init(tsd_t *tsd) {
return true;
}
tcache_init(tsd, tcache, mem);
tcache_init(tsd, tcache_slow, tcache, mem);
/*
* Initialization is a bit tricky here. After malloc init is done, all
* threads can rely on arena_choose and associate tcache accordingly.
@ -485,20 +497,22 @@ tsd_tcache_data_init(tsd_t *tsd) {
* associate its tcache to a0 temporarily, and later on
* arena_choose_hard() will re-associate properly.
*/
tcache->arena = NULL;
tcache_slow->arena = NULL;
arena_t *arena;
if (!malloc_initialized()) {
/* If in initialization, assign to a0. */
arena = arena_get(tsd_tsdn(tsd), 0, false);
tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
arena);
} else {
arena = arena_choose(tsd, NULL);
/* This may happen if thread.tcache.enabled is used. */
if (tcache->arena == NULL) {
tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
if (tcache_slow->arena == NULL) {
tcache_arena_associate(tsd_tsdn(tsd), tcache_slow,
tcache, arena);
}
}
assert(arena == tcache->arena);
assert(arena == tcache_slow->arena);
return false;
}
@ -511,7 +525,8 @@ tcache_create_explicit(tsd_t *tsd) {
* the beginning of the whole allocation (for freeing). The makes sure
* the cache bins have the requested alignment.
*/
size_t size = tcache_bin_alloc_size + sizeof(tcache_t);
size_t size = tcache_bin_alloc_size + sizeof(tcache_t)
+ sizeof(tcache_slow_t);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
size = sz_sa2u(size, tcache_bin_alloc_alignment);
@ -522,16 +537,20 @@ tcache_create_explicit(tsd_t *tsd) {
return NULL;
}
tcache_t *tcache = (void *)((uintptr_t)mem + tcache_bin_alloc_size);
tcache_init(tsd, tcache, mem);
tcache_slow_t *tcache_slow =
(void *)((uintptr_t)mem + tcache_bin_alloc_size + sizeof(tcache_t));
tcache_init(tsd, tcache_slow, tcache, mem);
tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
arena_ichoose(tsd, NULL));
return tcache;
}
static void
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
assert(tcache->arena != NULL);
tcache_slow_t *tcache_slow = tcache->tcache_slow;
assert(tcache_slow->arena != NULL);
for (unsigned i = 0; i < SC_NBINS; i++) {
cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
@ -559,15 +578,17 @@ tcache_flush(tsd_t *tsd) {
static void
tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
tcache_slow_t *tcache_slow = tcache->tcache_slow;
tcache_flush_cache(tsd, tcache);
arena_t *arena = tcache->arena;
tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
arena_t *arena = tcache_slow->arena;
tcache_arena_dissociate(tsd_tsdn(tsd), tcache_slow, tcache);
if (tsd_tcache) {
cache_bin_t *bin = tcache_small_bin_get(tcache, 0);
cache_bin_assert_empty(bin, &tcache_bin_info[0]);
}
idalloctm(tsd_tsdn(tsd), tcache->dyn_alloc, NULL, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tcache_slow->dyn_alloc, NULL, NULL, true,
true);
/*
* The deallocation and tcache flush above may not trigger decay since

View File

@ -50,7 +50,8 @@ tcache_gc_event(tsd_t *tsd) {
assert(TCACHE_GC_INCR_BYTES > 0);
tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) {
tcache_event_hard(tsd, tcache);
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
tcache_event_hard(tsd, tcache_slow, tcache);
}
}