diff --git a/include/jemalloc/internal/emap.h b/include/jemalloc/internal/emap.h index 8b2c6ba0..f0d7e768 100644 --- a/include/jemalloc/internal/emap.h +++ b/include/jemalloc/internal/emap.h @@ -213,4 +213,37 @@ emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr, return false; } +/* + * We want to do batch lookups out of the cache bins, which use + * cache_bin_ptr_array_get to access the i'th element of the bin (since they + * invert usual ordering in deciding what to flush). This lets the emap avoid + * caring about its caller's ordering. + */ +typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind); +/* + * This allows size-checking assertions, which we can only do while we're in the + * process of edata lookups. + */ +typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx); + +JEMALLOC_ALWAYS_INLINE void +emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs, + emap_ptr_getter ptr_getter, void *ptr_getter_ctx, + emap_metadata_visitor metadata_visitor, void *metadata_visitor_ctx, + edata_t **r_edatas) { + + /* Avoids null-checking tsdn in the loop below. */ + util_assume(tsd != NULL); + + for (size_t i = 0; i < nptrs; i++) { + emap_full_alloc_ctx_t full_alloc_ctx; + const void *ptr = ptr_getter(ptr_getter_ctx, i); + + emap_full_alloc_ctx_lookup(tsd_tsdn(tsd), emap, ptr, + &full_alloc_ctx); + r_edatas[i] = full_alloc_ctx.edata; + metadata_visitor(metadata_visitor_ctx, &full_alloc_ctx); + } +} + #endif /* JEMALLOC_INTERNAL_EMAP_H */ diff --git a/src/tcache.c b/src/tcache.c index 678fe524..602823d9 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -236,11 +236,22 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, return ret; } +static const void * +tcache_bin_flush_ptr_getter(void *arr_ctx, size_t ind) { + cache_bin_ptr_array_t *arr = (cache_bin_ptr_array_t *)arr_ctx; + return cache_bin_ptr_array_get(arr, (unsigned)ind); +} + +static void +tcache_bin_flush_metadata_visitor(void *szind_sum_ctx, + emap_full_alloc_ctx_t *alloc_ctx) { + size_t *szind_sum = (size_t *)szind_sum_ctx; + *szind_sum -= alloc_ctx->szind; +} + static void tcache_bin_flush_edatas_lookup(tsd_t *tsd, cache_bin_ptr_array_t *arr, szind_t binind, size_t nflush, edata_t **edatas) { - /* Avoids null-checking tsdn in the loop below. */ - util_assume(tsd != NULL); /* * This gets compiled away when config_opt_safety_checks is false. @@ -248,18 +259,13 @@ tcache_bin_flush_edatas_lookup(tsd_t *tsd, cache_bin_ptr_array_t *arr, * corrupting metadata. */ size_t szind_sum = binind * nflush; - for (unsigned i = 0; i < nflush; i++) { - emap_full_alloc_ctx_t full_alloc_ctx; - emap_full_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, - cache_bin_ptr_array_get(arr, i), &full_alloc_ctx); - edatas[i] = full_alloc_ctx.edata; - szind_sum -= full_alloc_ctx.szind; - } - + emap_edata_lookup_batch(tsd, &arena_emap_global, nflush, + &tcache_bin_flush_ptr_getter, (void *)arr, + &tcache_bin_flush_metadata_visitor, (void *)&szind_sum, + edatas); if (config_opt_safety_checks && szind_sum != 0) { safety_check_fail_sized_dealloc(false); } - } JEMALLOC_ALWAYS_INLINE bool