Move cache index randomization out of extent.

This is logically at a higher level of the stack; extent should just allocate
things at the page-level; it shouldn't care exactly why the callers wants a
given number of pages.
This commit is contained in:
David Goldblatt
2020-03-08 10:11:02 -07:00
committed by David Goldblatt
parent 12be9f5727
commit 585f925055
5 changed files with 93 additions and 105 deletions

View File

@@ -397,4 +397,30 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
}
}
static inline void
arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t alignment) {
assert(edata_base_get(edata) == edata_addr_get(edata));
if (alignment < PAGE) {
unsigned lg_range = LG_PAGE -
lg_floor(CACHELINE_CEILING(alignment));
size_t r;
if (!tsdn_null(tsdn)) {
tsd_t *tsd = tsdn_tsd(tsdn);
r = (size_t)prng_lg_range_u64(
tsd_prng_statep_get(tsd), lg_range);
} else {
uint64_t stack_value = (uint64_t)(uintptr_t)&r;
r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
}
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
lg_range);
edata->e_addr = (void *)((uintptr_t)edata->e_addr +
random_offset);
assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
edata->e_addr);
}
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */

View File

@@ -20,19 +20,19 @@
extern size_t opt_lg_extent_max_active_fit;
edata_t *ecache_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero);
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
szind_t szind, bool *zero);
edata_t *ecache_alloc_grow(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero);
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
szind_t szind, bool *zero);
void ecache_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata);
edata_t *ecache_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, size_t npages_min);
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit);
void *new_addr, size_t size, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit);
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, edata_t *edata);
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_t *edata);