Tcache: treat small and large cache bins uniformly

This commit is contained in:
David Goldblatt
2020-04-07 20:04:46 -07:00
committed by David Goldblatt
parent a13fbad374
commit cd29ebefd0
8 changed files with 87 additions and 145 deletions

View File

@@ -106,16 +106,14 @@ struct cache_bin_array_descriptor_s {
*/
ql_elm(cache_bin_array_descriptor_t) link;
/* Pointers to the tcache bins. */
cache_bin_t *bins_small;
cache_bin_t *bins_large;
cache_bin_t *bins;
};
static inline void
cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
cache_bin_t *bins_small, cache_bin_t *bins_large) {
cache_bin_t *bins) {
ql_elm_new(descriptor, link);
descriptor->bins_small = bins_small;
descriptor->bins_large = bins_large;
descriptor->bins = bins;
}
/* Returns ncached_max: Upper limit on ncached. */

View File

@@ -108,18 +108,6 @@ decay_ticker_get(tsd_t *tsd, unsigned ind) {
return &tdata->decay_ticker;
}
JEMALLOC_ALWAYS_INLINE cache_bin_t *
tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
assert(binind < SC_NBINS);
return &tcache->bins_small[binind];
}
JEMALLOC_ALWAYS_INLINE cache_bin_t *
tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
assert(binind >= SC_NBINS &&binind < nhbins);
return &tcache->bins_large[binind - SC_NBINS];
}
JEMALLOC_ALWAYS_INLINE bool
tcache_available(tsd_t *tsd) {
/*
@@ -129,9 +117,9 @@ tcache_available(tsd_t *tsd) {
*/
if (likely(tsd_tcache_enabled_get(tsd))) {
/* Associated arena == NULL implies tcache init in progress. */
assert(tsd_tcache_slowp_get(tsd)->arena == NULL ||
!cache_bin_still_zero_initialized(
tcache_small_bin_get(tsd_tcachep_get(tsd), 0)));
if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) {
tcache_assert_initialized(tsd_tcachep_get(tsd));
}
return true;
}

View File

@@ -53,4 +53,6 @@ void tcache_flush(tsd_t *tsd);
bool tsd_tcache_data_init(tsd_t *tsd);
bool tsd_tcache_enabled_data_init(tsd_t *tsd);
void tcache_assert_initialized(tcache_t *tcache);
#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */

View File

@@ -30,12 +30,11 @@ JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t binind, bool zero, bool slow_path) {
void *ret;
cache_bin_t *bin;
bool tcache_success;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
assert(binind < SC_NBINS);
bin = tcache_small_bin_get(tcache, binind);
cache_bin_t *bin = &tcache->bins[binind];
ret = cache_bin_alloc(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
@@ -74,11 +73,10 @@ JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path) {
void *ret;
cache_bin_t *bin;
bool tcache_success;
assert(binind >= SC_NBINS &&binind < nhbins);
bin = tcache_large_bin_get(tcache, binind);
assert(binind >= SC_NBINS && binind < nhbins);
cache_bin_t *bin = &tcache->bins[binind];
ret = cache_bin_alloc(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
@@ -120,12 +118,10 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
cache_bin_t *bin;
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
<= SC_SMALL_MAXCLASS);
bin = tcache_small_bin_get(tcache, binind);
cache_bin_t *bin = &tcache->bins[binind];
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_info_ncached_max(
&tcache_bin_info[binind]) >> 1;
@@ -138,13 +134,12 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
cache_bin_t *bin;
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
> SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
bin = tcache_large_bin_get(tcache, binind);
cache_bin_t *bin = &tcache->bins[binind];
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_info_ncached_max(
&tcache_bin_info[binind]) >> 1;

View File

@@ -49,19 +49,7 @@ struct tcache_slow_s {
struct tcache_s {
tcache_slow_t *tcache_slow;
/*
* The pointer stacks associated with bins follow as a contiguous array.
* During tcache initialization, the avail pointer in each element of
* tbins is initialized to point to the proper offset within this array.
*/
cache_bin_t bins_small[SC_NBINS];
/*
* We put the cache bins for large size classes at the end of the
* struct, since some of them might not get used. This might end up
* letting us avoid touching an extra page if we don't have to.
*/
cache_bin_t bins_large[SC_NSIZES-SC_NBINS];
cache_bin_t bins[SC_NSIZES];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */