Cache bin: simplify names and argument ordering.

We always start with the cache bin, then its info (if necessary).
This commit is contained in:
David Goldblatt 2020-02-28 19:12:07 -08:00 committed by David Goldblatt
parent e1dcc557d6
commit ff6acc6ed5
5 changed files with 51 additions and 48 deletions

View File

@ -158,8 +158,8 @@ cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) {
}
static inline void
cache_bin_ncached_set(cache_bin_t *bin, cache_bin_sz_t n,
cache_bin_info_t *info) {
cache_bin_ncached_set(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_sz_t n) {
bin->cur_ptr.lowbits = bin->full_position + info->stack_size
- n * sizeof(void *);
assert(n <= cache_bin_info_ncached_max(info));
@ -175,8 +175,8 @@ cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
}
JEMALLOC_ALWAYS_INLINE void *
cache_bin_alloc_easy_impl(cache_bin_t *bin, bool *success,
cache_bin_info_t *info, const bool adjust_low_water) {
cache_bin_alloc_easy_impl(cache_bin_t *bin, cache_bin_info_t *info,
bool *success, const bool adjust_low_water) {
/*
* This may read from the empty position; however the loaded value won't
* be used. It's safe because the stack has one more slot reserved.
@ -185,7 +185,7 @@ cache_bin_alloc_easy_impl(cache_bin_t *bin, bool *success,
/*
* Check for both bin->ncached == 0 and ncached < low_water in a single
* branch. When adjust_low_water is true, this also avoids accessing
* the cache_bin_info_ts (which is on a separate cacheline / page) in
* the cache_bin_info_t (which is on a separate cacheline / page) in
* the common case.
*/
if (unlikely(bin->cur_ptr.lowbits > bin->low_water_position)) {
@ -223,12 +223,12 @@ cache_bin_alloc_easy_impl(cache_bin_t *bin, bool *success,
JEMALLOC_ALWAYS_INLINE void *
cache_bin_alloc_easy_reduced(cache_bin_t *bin, bool *success) {
/* We don't look at info if we're not adjusting low-water. */
return cache_bin_alloc_easy_impl(bin, success, NULL, false);
return cache_bin_alloc_easy_impl(bin, NULL, success, false);
}
JEMALLOC_ALWAYS_INLINE void *
cache_bin_alloc_easy(cache_bin_t *bin, bool *success, cache_bin_info_t *info) {
return cache_bin_alloc_easy_impl(bin, success, info, true);
cache_bin_alloc_easy(cache_bin_t *bin, cache_bin_info_t *info, bool *success) {
return cache_bin_alloc_easy_impl(bin, info, success, true);
}
JEMALLOC_ALWAYS_INLINE bool
@ -254,18 +254,35 @@ struct cache_bin_ptr_array_s {
name.n = (nval)
static inline void
cache_bin_ptr_array_init_for_flush(cache_bin_ptr_array_t *arr, cache_bin_t *bin,
cache_bin_sz_t nflush, cache_bin_info_t *info) {
arr->ptr = cache_bin_empty_position_get(bin, info) - 1;
assert(cache_bin_ncached_get(bin, info) == 0
|| *arr->ptr != NULL);
cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
arr->ptr = cache_bin_empty_position_get(bin, info) - nfill;
assert(cache_bin_ncached_get(bin, info) == 0);
}
/*
* While nfill in cache_bin_init_ptr_array_for_fill is the number we *intend* to
* fill, nfilled here is the number we actually filled (which may be less, in
* case of OOM.
*/
static inline void
cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, szind_t nfilled) {
assert(cache_bin_ncached_get(bin, info) == 0);
if (nfilled < arr->n) {
void **empty_position = cache_bin_empty_position_get(bin, info);
memmove(empty_position - nfilled, empty_position - arr->n,
nfilled * sizeof(void *));
}
cache_bin_ncached_set(bin, info, nfilled);
}
static inline void
cache_bin_ptr_array_init_for_fill(cache_bin_ptr_array_t *arr, cache_bin_t *bin,
cache_bin_sz_t nfill, cache_bin_info_t *info) {
arr->ptr = cache_bin_empty_position_get(bin, info) - nfill;
assert(cache_bin_ncached_get(bin, info) == 0);
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
arr->ptr = cache_bin_empty_position_get(bin, info) - 1;
assert(cache_bin_ncached_get(bin, info) == 0
|| *arr->ptr != NULL);
}
JEMALLOC_ALWAYS_INLINE void *
@ -278,16 +295,4 @@ cache_bin_ptr_array_set(cache_bin_ptr_array_t *arr, cache_bin_sz_t n, void *p) {
*(arr->ptr - n) = p;
}
static inline void
cache_bin_fill_from_ptr_array(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
szind_t nfilled, cache_bin_info_t *info) {
assert(cache_bin_ncached_get(bin, info) == 0);
if (nfilled < arr->n) {
void **empty_position = cache_bin_empty_position_get(bin, info);
memmove(empty_position - nfilled, empty_position - arr->n,
nfilled * sizeof(void *));
}
cache_bin_ncached_set(bin, nfilled, info);
}
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */

View File

@ -36,8 +36,8 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
assert(binind < SC_NBINS);
bin = tcache_small_bin_get(tcache, binind);
ret = cache_bin_alloc_easy(bin, &tcache_success,
&tcache_bin_info[binind]);
ret = cache_bin_alloc_easy(bin, &tcache_bin_info[binind],
&tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
bool tcache_hard_success;
@ -80,8 +80,8 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
assert(binind >= SC_NBINS &&binind < nhbins);
bin = tcache_large_bin_get(tcache, binind);
ret = cache_bin_alloc_easy(bin, &tcache_success,
&tcache_bin_info[binind]);
ret = cache_bin_alloc_easy(bin, &tcache_bin_info[binind],
&tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
/*

View File

@ -1329,9 +1329,8 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
&tcache_bin_info[binind]) >> tcache->lg_fill_div[binind];
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
cache_bin_ptr_array_init_for_fill(&ptrs, tbin, nfill,
&tcache_bin_info[binind]);
cache_bin_init_ptr_array_for_fill(tbin, &tcache_bin_info[binind], &ptrs,
nfill);
/*
* Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
* slabs. After both are exhausted, new slabs will be allocated through
@ -1443,8 +1442,7 @@ label_refill:
fresh_slab = NULL;
}
cache_bin_fill_from_ptr_array(tbin, &ptrs, filled,
&tcache_bin_info[binind]);
cache_bin_finish_fill(tbin, &tcache_bin_info[binind], &ptrs, filled);
arena_decay_tick(tsdn, arena);
}

View File

@ -110,8 +110,8 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
assert(tcache->arena != NULL);
arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind);
ret = cache_bin_alloc_easy(tbin, tcache_success,
&tcache_bin_info[binind]);
ret = cache_bin_alloc_easy(tbin, &tcache_bin_info[binind],
tcache_success);
return ret;
}
@ -182,8 +182,8 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
VARIABLE_ARRAY(edata_t *, item_edata, nflush + 1);
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
cache_bin_ptr_array_init_for_flush(&ptrs, tbin, nflush,
&tcache_bin_info[binind]);
cache_bin_init_ptr_array_for_flush(tbin, &tcache_bin_info[binind],
&ptrs, nflush);
/* Look up edata once per item. */
if (config_opt_safety_checks) {
@ -348,7 +348,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
memmove(tbin->cur_ptr.ptr + (ncached - rem), tbin->cur_ptr.ptr, rem *
sizeof(void *));
cache_bin_ncached_set(tbin, rem, &tcache_bin_info[binind]);
cache_bin_ncached_set(tbin, &tcache_bin_info[binind], rem);
if (tbin->cur_ptr.lowbits > tbin->low_water_position) {
tbin->low_water_position = tbin->cur_ptr.lowbits;
}

View File

@ -23,12 +23,12 @@ TEST_BEGIN(test_cache_bin) {
"Incorrect cache size");
bool success;
void *ret = cache_bin_alloc_easy(bin, &success, &tcache_bin_info[0]);
void *ret = cache_bin_alloc_easy(bin, &tcache_bin_info[0], &success);
expect_false(success, "Empty cache bin should not alloc");
expect_true(cache_bin_low_water_get(bin, &tcache_bin_info[0]) == 0,
"Incorrect low water mark");
cache_bin_ncached_set(bin, 0, &tcache_bin_info[0]);
cache_bin_ncached_set(bin, &tcache_bin_info[0], 0);
expect_ptr_eq(bin->cur_ptr.ptr, empty_position, "Bin should be empty");
for (cache_bin_sz_t i = 1; i < ncached_max + 1; i++) {
success = cache_bin_dalloc_easy(bin, (void *)(uintptr_t)i);
@ -39,12 +39,12 @@ TEST_BEGIN(test_cache_bin) {
expect_false(success, "Bin should be full");
expect_ptr_eq(bin->cur_ptr.ptr, stack, "Incorrect bin cur_ptr");
cache_bin_ncached_set(bin, ncached_max, &tcache_bin_info[0]);
cache_bin_ncached_set(bin, &tcache_bin_info[0], ncached_max);
expect_ptr_eq(bin->cur_ptr.ptr, stack, "cur_ptr should not change");
/* Emulate low water after refill. */
bin->low_water_position = bin->full_position;
for (cache_bin_sz_t i = ncached_max; i > 0; i--) {
ret = cache_bin_alloc_easy(bin, &success, &tcache_bin_info[0]);
ret = cache_bin_alloc_easy(bin, &tcache_bin_info[0], &success);
cache_bin_sz_t ncached = cache_bin_ncached_get(bin,
&tcache_bin_info[0]);
expect_true(success && ncached == i - 1,
@ -54,7 +54,7 @@ TEST_BEGIN(test_cache_bin) {
== ncached, "Incorrect low water mark");
}
ret = cache_bin_alloc_easy(bin, &success, &tcache_bin_info[0]);
ret = cache_bin_alloc_easy(bin, &tcache_bin_info[0], &success);
expect_false(success, "Empty cache bin should not alloc.");
expect_ptr_eq(bin->cur_ptr.ptr, stack + ncached_max,
"Bin should be empty");