cache_bin: Don't allow direct internals access.

This commit is contained in:
David Goldblatt 2020-02-26 17:10:12 -08:00 committed by David Goldblatt
parent da68f73296
commit b66c0973cc
2 changed files with 41 additions and 23 deletions

View File

@ -144,16 +144,6 @@ cache_bin_empty_position_get(cache_bin_t *bin, szind_t ind,
return ret; return ret;
} }
/* Returns the position of the bottom item on the stack; for convenience. */
static inline void **
cache_bin_bottom_item_get(cache_bin_t *bin, szind_t ind,
cache_bin_info_t *infos) {
void **bottom = cache_bin_empty_position_get(bin, ind, infos) - 1;
assert(cache_bin_ncached_get(bin, ind, infos) == 0 || *bottom != NULL);
return bottom;
}
/* Returns the numeric value of low water in [0, ncached]. */ /* Returns the numeric value of low water in [0, ncached]. */
static inline cache_bin_sz_t static inline cache_bin_sz_t
cache_bin_low_water_get(cache_bin_t *bin, szind_t ind, cache_bin_low_water_get(cache_bin_t *bin, szind_t ind,
@ -263,4 +253,32 @@ cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) {
return true; return true;
} }
typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t;
struct cache_bin_ptr_array_s {
cache_bin_sz_t nflush;
void **ptr;
};
#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nflush_val) \
cache_bin_ptr_array_t name; \
name.nflush = (nflush_val)
static inline void
cache_bin_ptr_array_init(cache_bin_ptr_array_t *arr, cache_bin_t *bin,
cache_bin_sz_t nflush, szind_t ind, cache_bin_info_t *infos) {
arr->ptr = cache_bin_empty_position_get(bin, ind, infos) - 1;
assert(cache_bin_ncached_get(bin, ind, infos) == 0
|| *arr->ptr != NULL);
}
JEMALLOC_ALWAYS_INLINE void *
cache_bin_ptr_array_get(cache_bin_ptr_array_t *arr, cache_bin_sz_t n) {
return *(arr->ptr - n);
}
JEMALLOC_ALWAYS_INLINE void
cache_bin_ptr_array_set(cache_bin_ptr_array_t *arr, cache_bin_sz_t n, void *p) {
*(arr->ptr - n) = p;
}
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */ #endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */

View File

@ -117,8 +117,8 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
/* Enabled with --enable-extra-size-check. */ /* Enabled with --enable-extra-size-check. */
static void static void
tbin_edatas_lookup_size_check(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, tbin_edatas_lookup_size_check(tsd_t *tsd, cache_bin_ptr_array_t *arr,
size_t nflush, edata_t **edatas) { szind_t binind, size_t nflush, edata_t **edatas) {
/* Avoids null-checking tsdn in the loop below. */ /* Avoids null-checking tsdn in the loop below. */
util_assume(tsd != NULL); util_assume(tsd != NULL);
@ -129,15 +129,14 @@ tbin_edatas_lookup_size_check(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
* builds, avoid the branch in the loop. * builds, avoid the branch in the loop.
*/ */
size_t szind_sum = binind * nflush; size_t szind_sum = binind * nflush;
void **bottom_item = cache_bin_bottom_item_get(tbin, binind,
tcache_bin_info);
for (unsigned i = 0; i < nflush; i++) { for (unsigned i = 0; i < nflush; i++) {
emap_full_alloc_ctx_t full_alloc_ctx; emap_full_alloc_ctx_t full_alloc_ctx;
emap_full_alloc_ctx_lookup(tsd_tsdn(tsd), &emap_global, emap_full_alloc_ctx_lookup(tsd_tsdn(tsd), &emap_global,
*(bottom_item - i), &full_alloc_ctx); cache_bin_ptr_array_get(arr, i), &full_alloc_ctx);
edatas[i] = full_alloc_ctx.edata; edatas[i] = full_alloc_ctx.edata;
szind_sum -= full_alloc_ctx.szind; szind_sum -= full_alloc_ctx.szind;
} }
if (szind_sum != 0) { if (szind_sum != 0) {
safety_check_fail_sized_dealloc(false); safety_check_fail_sized_dealloc(false);
} }
@ -180,17 +179,18 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
* touched (it's just included to satisfy the no-zero-length rule). * touched (it's just included to satisfy the no-zero-length rule).
*/ */
VARIABLE_ARRAY(edata_t *, item_edata, nflush + 1); VARIABLE_ARRAY(edata_t *, item_edata, nflush + 1);
void **bottom_item = cache_bin_bottom_item_get(tbin, binind, CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
tcache_bin_info);
cache_bin_ptr_array_init(&ptrs, tbin, nflush, binind, tcache_bin_info);
/* Look up edata once per item. */ /* Look up edata once per item. */
if (config_opt_safety_checks) { if (config_opt_safety_checks) {
tbin_edatas_lookup_size_check(tsd, tbin, binind, nflush, tbin_edatas_lookup_size_check(tsd, &ptrs, binind, nflush,
item_edata); item_edata);
} else { } else {
for (unsigned i = 0 ; i < nflush; i++) { for (unsigned i = 0 ; i < nflush; i++) {
item_edata[i] = emap_edata_lookup(tsd_tsdn(tsd), item_edata[i] = emap_edata_lookup(tsd_tsdn(tsd),
&emap_global, *(bottom_item - i)); &emap_global, cache_bin_ptr_array_get(&ptrs, i));
} }
} }
@ -262,7 +262,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
*/ */
if (!small) { if (!small) {
for (unsigned i = 0; i < nflush; i++) { for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i); void *ptr = cache_bin_ptr_array_get(&ptrs, i);
edata = item_edata[i]; edata = item_edata[i];
assert(ptr != NULL && edata != NULL); assert(ptr != NULL && edata != NULL);
@ -280,7 +280,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
/* Deallocate whatever we can. */ /* Deallocate whatever we can. */
unsigned ndeferred = 0; unsigned ndeferred = 0;
for (unsigned i = 0; i < nflush; i++) { for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i); void *ptr = cache_bin_ptr_array_get(&ptrs, i);
edata = item_edata[i]; edata = item_edata[i];
assert(ptr != NULL && edata != NULL); assert(ptr != NULL && edata != NULL);
if (!tcache_bin_flush_match(edata, cur_arena_ind, if (!tcache_bin_flush_match(edata, cur_arena_ind,
@ -291,7 +291,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
* arena. Either way, stash the object so that * arena. Either way, stash the object so that
* it can be handled in a future pass. * it can be handled in a future pass.
*/ */
*(bottom_item - ndeferred) = ptr; cache_bin_ptr_array_set(&ptrs, ndeferred, ptr);
item_edata[ndeferred] = edata; item_edata[ndeferred] = edata;
ndeferred++; ndeferred++;
continue; continue;