Cache bin: Don't reverse flush order.

The items we pick to flush matter a lot, but the order in which they get flushed
doesn't; just use forward scans.  This simplifies the accessing code, both in
terms of the C and the generated assembly (i.e. this speeds up the flush
pathways).
This commit is contained in:
David Goldblatt 2021-01-29 13:10:44 -08:00 committed by David Goldblatt
parent 4c46e11365
commit 2fcbd18115
3 changed files with 10 additions and 21 deletions

View File

@ -441,29 +441,18 @@ cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
bin->stack_head = empty_position - nfilled;
}
/* Same deal, but with flush. */
/*
* Same deal, but with flush. Unlike fill (which can fail), the user must flush
* everything we give them.
*/
static inline void
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
arr->ptr = cache_bin_empty_position_get(bin) - 1;
arr->ptr = cache_bin_empty_position_get(bin) - nflush;
assert(cache_bin_ncached_get_local(bin, info) == 0
|| *arr->ptr != NULL);
}
/*
* These accessors are used by the flush pathways -- they reverse ordinary array
* ordering. See the note above.
*/
JEMALLOC_ALWAYS_INLINE void *
cache_bin_ptr_array_get(cache_bin_ptr_array_t *arr, cache_bin_sz_t n) {
return *(arr->ptr - n);
}
JEMALLOC_ALWAYS_INLINE void
cache_bin_ptr_array_set(cache_bin_ptr_array_t *arr, cache_bin_sz_t n, void *p) {
*(arr->ptr - n) = p;
}
static inline void
cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {

View File

@ -239,7 +239,7 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena,
static const void *
tcache_bin_flush_ptr_getter(void *arr_ctx, size_t ind) {
cache_bin_ptr_array_t *arr = (cache_bin_ptr_array_t *)arr_ctx;
return cache_bin_ptr_array_get(arr, (unsigned)ind);
return arr->ptr[ind];
}
static void
@ -382,7 +382,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
*/
if (!small) {
for (unsigned i = 0; i < nflush; i++) {
void *ptr = cache_bin_ptr_array_get(&ptrs, i);
void *ptr = ptrs.ptr[i];
edata = item_edata[i].edata;
assert(ptr != NULL && edata != NULL);
@ -404,7 +404,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
arena_dalloc_bin_locked_begin(&dalloc_bin_info, binind);
}
for (unsigned i = 0; i < nflush; i++) {
void *ptr = cache_bin_ptr_array_get(&ptrs, i);
void *ptr = ptrs.ptr[i];
edata = item_edata[i].edata;
assert(ptr != NULL && edata != NULL);
if (!tcache_bin_flush_match(edata, cur_arena_ind,
@ -415,7 +415,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
* arena. Either way, stash the object so that
* it can be handled in a future pass.
*/
cache_bin_ptr_array_set(&ptrs, ndeferred, ptr);
ptrs.ptr[ndeferred] = ptr;
item_edata[ndeferred].edata = edata;
ndeferred++;
continue;

View File

@ -43,7 +43,7 @@ do_flush_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
CACHE_BIN_PTR_ARRAY_DECLARE(arr, nflush);
cache_bin_init_ptr_array_for_flush(bin, info, &arr, nflush);
for (cache_bin_sz_t i = 0; i < nflush; i++) {
expect_ptr_eq(cache_bin_ptr_array_get(&arr, i), &ptrs[i], "");
expect_ptr_eq(arr.ptr[i], &ptrs[nflush - i - 1], "");
}
cache_bin_finish_flush(bin, info, &arr, nflush);