Cache bin: Only take the relevant cache_bin_info_t

Previously, we took an array of cache_bin_info_ts and an index, and dereferenced
ourselves.  But infos for other cache_bins aren't relevant to any particular
cache bin, so that should be the caller's job.
This commit is contained in:
David Goldblatt
2020-02-28 18:55:33 -08:00
committed by David Goldblatt
parent 1b00d808d7
commit e1dcc557d6
5 changed files with 70 additions and 81 deletions

View File

@@ -200,14 +200,14 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
for (szind_t i = 0; i < SC_NBINS; i++) {
cache_bin_t *tbin = &descriptor->bins_small[i];
arena_stats_accum_zu(&astats->tcache_bytes,
cache_bin_ncached_get(tbin, i, tcache_bin_info)
cache_bin_ncached_get(tbin, &tcache_bin_info[i])
* sz_index2size(i));
}
for (szind_t i = 0; i < nhbins - SC_NBINS; i++) {
cache_bin_t *tbin = &descriptor->bins_large[i];
arena_stats_accum_zu(&astats->tcache_bytes,
cache_bin_ncached_get(tbin, i + SC_NBINS,
tcache_bin_info) * sz_index2size(i));
cache_bin_ncached_get(tbin,
&tcache_bin_info[i + SC_NBINS]) * sz_index2size(i));
}
}
malloc_mutex_prof_read(tsdn,
@@ -1321,7 +1321,7 @@ arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
void
arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind) {
assert(cache_bin_ncached_get(tbin, binind, tcache_bin_info) == 0);
assert(cache_bin_ncached_get(tbin, &tcache_bin_info[binind]) == 0);
tcache->bin_refilled[binind] = true;
const bin_info_t *bin_info = &bin_infos[binind];
@@ -1329,8 +1329,8 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
&tcache_bin_info[binind]) >> tcache->lg_fill_div[binind];
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
cache_bin_ptr_array_init_for_fill(&ptrs, tbin, nfill, binind,
tcache_bin_info);
cache_bin_ptr_array_init_for_fill(&ptrs, tbin, nfill,
&tcache_bin_info[binind]);
/*
* Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
@@ -1443,8 +1443,8 @@ label_refill:
fresh_slab = NULL;
}
cache_bin_fill_from_ptr_array(tbin, &ptrs, binind, filled,
tcache_bin_info);
cache_bin_fill_from_ptr_array(tbin, &ptrs, filled,
&tcache_bin_info[binind]);
arena_decay_tick(tsdn, arena);
}

View File

@@ -59,10 +59,10 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
is_small = false;
}
cache_bin_sz_t low_water = cache_bin_low_water_get(tbin, binind,
tcache_bin_info);
cache_bin_sz_t ncached = cache_bin_ncached_get(tbin, binind,
tcache_bin_info);
cache_bin_sz_t low_water = cache_bin_low_water_get(tbin,
&tcache_bin_info[binind]);
cache_bin_sz_t ncached = cache_bin_ncached_get(tbin,
&tcache_bin_info[binind]);
if (low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
@@ -110,8 +110,8 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
assert(tcache->arena != NULL);
arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind);
ret = cache_bin_alloc_easy(tbin, tcache_success, binind,
tcache_bin_info);
ret = cache_bin_alloc_easy(tbin, tcache_success,
&tcache_bin_info[binind]);
return ret;
}
@@ -168,8 +168,8 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
} else {
assert(binind < nhbins);
}
cache_bin_sz_t ncached = cache_bin_ncached_get(tbin, binind,
tcache_bin_info);
cache_bin_sz_t ncached = cache_bin_ncached_get(tbin,
&tcache_bin_info[binind]);
assert((cache_bin_sz_t)rem <= ncached);
arena_t *tcache_arena = tcache->arena;
assert(tcache_arena != NULL);
@@ -182,8 +182,8 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
VARIABLE_ARRAY(edata_t *, item_edata, nflush + 1);
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
cache_bin_ptr_array_init_for_flush(&ptrs, tbin, nflush, binind,
tcache_bin_info);
cache_bin_ptr_array_init_for_flush(&ptrs, tbin, nflush,
&tcache_bin_info[binind]);
/* Look up edata once per item. */
if (config_opt_safety_checks) {
@@ -348,7 +348,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
memmove(tbin->cur_ptr.ptr + (ncached - rem), tbin->cur_ptr.ptr, rem *
sizeof(void *));
cache_bin_ncached_set(tbin, binind, rem, tcache_bin_info);
cache_bin_ncached_set(tbin, rem, &tcache_bin_info[binind]);
if (tbin->cur_ptr.lowbits > tbin->low_water_position) {
tbin->low_water_position = tbin->cur_ptr.lowbits;
}
@@ -453,8 +453,8 @@ tcache_bin_init(cache_bin_t *bin, szind_t ind, uintptr_t *stack_cur) {
bin->low_water_position = bin->cur_ptr.lowbits;
bin->full_position = (uint32_t)(uintptr_t)full_position;
assert(bin->cur_ptr.lowbits - bin->full_position == bin_stack_size);
assert(cache_bin_ncached_get(bin, ind, tcache_bin_info) == 0);
assert(cache_bin_empty_position_get(bin, ind, tcache_bin_info)
assert(cache_bin_ncached_get(bin, &tcache_bin_info[ind]) == 0);
assert(cache_bin_empty_position_get(bin, &tcache_bin_info[ind])
== empty_position);
return false;
@@ -614,8 +614,8 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
if (tsd_tcache) {
/* Release the avail array for the TSD embedded auto tcache. */
cache_bin_t *bin = tcache_small_bin_get(tcache, 0);
assert(cache_bin_ncached_get(bin, 0, tcache_bin_info) == 0);
assert(cache_bin_empty_position_get(bin, 0, tcache_bin_info) ==
assert(cache_bin_ncached_get(bin, &tcache_bin_info[0]) == 0);
assert(cache_bin_empty_position_get(bin, &tcache_bin_info[0]) ==
bin->cur_ptr.ptr);
void *avail_array = (void *)((uintptr_t)bin->cur_ptr.ptr -
tcache_bin_info[0].stack_size);