Implement cache-oblivious support for huge size classes.

This commit is contained in:
Jason Evans 2016-05-27 18:57:15 -07:00
parent 4731cd47f7
commit b46261d58b
12 changed files with 298 additions and 170 deletions

View File

@ -688,7 +688,7 @@ JEMALLOC_ALWAYS_INLINE size_t
arena_miscelm_to_pageind(const extent_t *extent,
const arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
@ -702,7 +702,7 @@ JEMALLOC_ALWAYS_INLINE void *
arena_miscelm_to_rpages(const extent_t *extent,
const arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
size_t pageind = arena_miscelm_to_pageind(extent, miscelm);
return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
@ -1065,7 +1065,7 @@ arena_ptr_small_binind_get(tsdn_t *tsdn, const void *ptr, size_t mapbits)
assert(binind != BININD_INVALID);
assert(binind < NBINS);
extent = iealloc(tsdn, ptr);
chunk = (arena_chunk_t *)extent_addr_get(extent);
chunk = (arena_chunk_t *)extent_base_get(extent);
arena = extent_arena_get(extent);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
actual_mapbits = arena_mapbits_get(chunk, pageind);
@ -1106,7 +1106,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
assert(ptr != NULL);
if (likely(extent_slab_get(extent))) {
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
@ -1132,7 +1132,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
assert(ptr != NULL);
if (likely(extent_slab_get(extent))) {
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
@ -1168,8 +1168,9 @@ arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
(uintptr_t)old_tctx > (uintptr_t)1U))) {
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
if (likely(chunk != ptr)) {
if (likely(extent_slab_get(extent))) {
arena_chunk_t *chunk =
(arena_chunk_t *)extent_base_get(extent);
size_t pageind;
arena_chunk_map_misc_t *elm;
@ -1253,7 +1254,7 @@ arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote)
if (likely(extent_slab_get(extent))) {
const arena_chunk_t *chunk =
(const arena_chunk_t *)extent_addr_get(extent);
(const arena_chunk_t *)extent_base_get(extent);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
@ -1302,7 +1303,7 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
assert(ptr != NULL);
if (likely(extent_slab_get(extent))) {
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = arena_mapbits_get(chunk, pageind);
@ -1349,7 +1350,7 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
assert(!tsdn_null(tsdn) || tcache == NULL);
if (likely(extent_slab_get(extent))) {
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
if (config_prof && opt_prof) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>

View File

@ -45,11 +45,11 @@ chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
extent_t *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool slab);
chunk_hooks_t *chunk_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab);
extent_t *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit, bool slab);
chunk_hooks_t *chunk_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool *commit, bool slab);
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_t *extent);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,

View File

@ -88,8 +88,10 @@ ph_proto(, extent_heap_, extent_heap_t, extent_t)
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *extent_arena_get(const extent_t *extent);
void *extent_base_get(const extent_t *extent);
void *extent_addr_get(const extent_t *extent);
size_t extent_size_get(const extent_t *extent);
size_t extent_usize_get(const extent_t *extent);
void *extent_before_get(const extent_t *extent);
void *extent_last_get(const extent_t *extent);
void *extent_past_get(const extent_t *extent);
@ -102,6 +104,7 @@ bool extent_slab_get(const extent_t *extent);
prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
void extent_arena_set(extent_t *extent, arena_t *arena);
void extent_addr_set(extent_t *extent, void *addr);
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
void extent_size_set(extent_t *extent, size_t size);
void extent_active_set(extent_t *extent, bool active);
void extent_dirty_set(extent_t *extent, bool dirty);
@ -125,10 +128,21 @@ extent_arena_get(const extent_t *extent)
return (extent->e_arena);
}
JEMALLOC_INLINE void *
extent_base_get(const extent_t *extent)
{
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent->e_slab);
return (PAGE_ADDR2BASE(extent->e_addr));
}
JEMALLOC_INLINE void *
extent_addr_get(const extent_t *extent)
{
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent->e_slab);
return (extent->e_addr);
}
@ -139,6 +153,14 @@ extent_size_get(const extent_t *extent)
return (extent->e_size);
}
JEMALLOC_INLINE size_t
extent_usize_get(const extent_t *extent)
{
assert(!extent->e_slab);
return (extent->e_size - large_pad);
}
JEMALLOC_INLINE void *
extent_before_get(const extent_t *extent)
{
@ -224,6 +246,24 @@ extent_addr_set(extent_t *extent, void *addr)
extent->e_addr = addr;
}
JEMALLOC_INLINE void
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
{
assert(extent_base_get(extent) == extent_addr_get(extent));
if (alignment < PAGE) {
unsigned lg_range = LG_PAGE -
lg_floor(CACHELINE_CEILING(alignment));
uint64_t r =
prng_lg_range(&extent_arena_get(extent)->offset_state,
lg_range, true);
uintptr_t random_offset = ((uintptr_t)r) << lg_range;
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
random_offset);
}
}
JEMALLOC_INLINE void
extent_size_set(extent_t *extent, size_t size)
{
@ -278,6 +318,8 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
bool active, bool dirty, bool zeroed, bool committed, bool slab)
{
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
extent_arena_set(extent, arena);
extent_addr_set(extent, addr);
extent_size_set(extent, size);

View File

@ -206,10 +206,12 @@ dss_prec_names
extent_active_get
extent_active_set
extent_addr_get
extent_addr_randomize
extent_addr_set
extent_alloc
extent_arena_get
extent_arena_set
extent_base_get
extent_before_get
extent_committed_get
extent_committed_set
@ -230,6 +232,7 @@ extent_size_quantize_ceil
extent_size_quantize_floor
extent_slab_get
extent_slab_set
extent_usize_get
extent_zeroed_get
extent_zeroed_set
ffs_llu
@ -373,6 +376,7 @@ pow2_ceil_u64
pow2_ceil_zu
prng_lg_range
prng_range
prng_state_next
prof_active
prof_active_get
prof_active_get_unlocked

View File

@ -35,28 +35,45 @@
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t prng_lg_range(uint64_t *state, unsigned lg_range);
uint64_t prng_range(uint64_t *state, uint64_t range);
uint64_t prng_state_next(uint64_t state);
uint64_t prng_lg_range(uint64_t *state, unsigned lg_range, bool atomic);
uint64_t prng_range(uint64_t *state, uint64_t range, bool atomic);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
JEMALLOC_ALWAYS_INLINE uint64_t
prng_lg_range(uint64_t *state, unsigned lg_range)
prng_state_next(uint64_t state)
{
uint64_t ret;
return ((state * PRNG_A) + PRNG_C);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_lg_range(uint64_t *state, unsigned lg_range, bool atomic)
{
uint64_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 64);
ret = (*state * PRNG_A) + PRNG_C;
*state = ret;
ret >>= (64 - lg_range);
if (atomic) {
uint64_t state0;
do {
state0 = atomic_read_uint64(state);
state1 = prng_state_next(state0);
} while (atomic_cas_uint64(state, state0, state1));
} else {
state1 = prng_state_next(*state);
*state = state1;
}
ret = state1 >> (64 - lg_range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_range(uint64_t *state, uint64_t range)
prng_range(uint64_t *state, uint64_t range, bool atomic)
{
uint64_t ret;
unsigned lg_range;
@ -68,7 +85,7 @@ prng_range(uint64_t *state, uint64_t range)
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range(state, lg_range);
ret = prng_lg_range(state, lg_range, atomic);
} while (ret >= range);
return (ret);

View File

@ -57,7 +57,7 @@ static void arena_bin_lower_run(tsdn_t *tsdn, arena_t *arena,
JEMALLOC_INLINE_C size_t
arena_miscelm_size_get(extent_t *extent, const arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
size_t pageind = arena_miscelm_to_pageind(extent, miscelm);
size_t mapbits = arena_mapbits_get(chunk, pageind);
return (arena_mapbits_size_decode(mapbits));
@ -154,7 +154,7 @@ static void
arena_avail_insert(arena_t *arena, extent_t *extent, size_t pageind,
size_t npages)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
extent, arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
@ -167,7 +167,7 @@ static void
arena_avail_remove(arena_t *arena, extent_t *extent, size_t pageind,
size_t npages)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
extent, arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
@ -221,14 +221,14 @@ arena_chunk_dirty_npages(const extent_t *extent)
static extent_t *
arena_chunk_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool slab)
chunk_hooks_t *chunk_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab)
{
malloc_mutex_assert_owner(tsdn, &arena->lock);
return (chunk_alloc_cache(tsdn, arena, chunk_hooks, new_addr, size,
alignment, zero, slab));
return (chunk_alloc_cache(tsdn, arena, chunk_hooks, new_addr, usize,
pad, alignment, zero, slab));
}
extent_t *
@ -240,7 +240,7 @@ arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
malloc_mutex_lock(tsdn, &arena->lock);
extent = arena_chunk_cache_alloc_locked(tsdn, arena, chunk_hooks,
new_addr, size, alignment, zero, false);
new_addr, size, 0, alignment, zero, false);
malloc_mutex_unlock(tsdn, &arena->lock);
return (extent);
@ -388,7 +388,7 @@ JEMALLOC_INLINE_C void
arena_run_reg_dalloc(tsdn_t *tsdn, arena_run_t *run, extent_t *extent,
void *ptr)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
szind_t binind = arena_ptr_small_binind_get(tsdn, ptr, mapbits);
@ -460,7 +460,7 @@ static void
arena_run_split_remove(arena_t *arena, extent_t *extent, size_t run_ind,
size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
size_t total_pages, rem_pages;
assert(flag_dirty == 0 || flag_decommitted == 0);
@ -509,7 +509,7 @@ arena_run_split_large_helper(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
size_t flag_dirty, flag_decommitted, run_ind, need_pages;
size_t flag_unzeroed_mask;
chunk = (arena_chunk_t *)extent_addr_get(extent);
chunk = (arena_chunk_t *)extent_base_get(extent);
miscelm = arena_run_to_miscelm(extent, run);
run_ind = arena_miscelm_to_pageind(extent, miscelm);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
@ -592,7 +592,7 @@ arena_run_split_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
assert(binind != BININD_INVALID);
chunk = (arena_chunk_t *)extent_addr_get(extent);
chunk = (arena_chunk_t *)extent_base_get(extent);
miscelm = arena_run_to_miscelm(extent, run);
run_ind = arena_miscelm_to_pageind(extent, miscelm);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
@ -629,16 +629,16 @@ arena_chunk_init_spare(arena_t *arena)
arena->spare = NULL;
assert(arena_mapbits_allocated_get((arena_chunk_t *)
extent_addr_get(extent), map_bias) == 0);
extent_base_get(extent), map_bias) == 0);
assert(arena_mapbits_allocated_get((arena_chunk_t *)
extent_addr_get(extent), chunk_npages-1) == 0);
extent_base_get(extent), chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get((arena_chunk_t *)
extent_addr_get(extent), map_bias) == arena_maxrun);
extent_base_get(extent), map_bias) == arena_maxrun);
assert(arena_mapbits_unallocated_size_get((arena_chunk_t *)
extent_addr_get(extent), chunk_npages-1) == arena_maxrun);
extent_base_get(extent), chunk_npages-1) == arena_maxrun);
assert(arena_mapbits_dirty_get((arena_chunk_t *)
extent_addr_get(extent), map_bias) ==
arena_mapbits_dirty_get((arena_chunk_t *)extent_addr_get(extent),
extent_base_get(extent), map_bias) ==
arena_mapbits_dirty_get((arena_chunk_t *)extent_base_get(extent),
chunk_npages-1));
return (extent);
@ -653,7 +653,7 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
malloc_mutex_unlock(tsdn, &arena->lock);
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize,
PAGE, zero, commit, true);
0, CACHELINE, zero, commit, true);
if (extent != NULL && !*commit) {
/* Commit header. */
if (chunk_commit_wrapper(tsdn, arena, chunk_hooks, extent, 0,
@ -676,7 +676,7 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
chunksize, PAGE, zero, true);
chunksize, 0, CACHELINE, zero, true);
if (extent != NULL)
*commit = true;
if (extent == NULL) {
@ -717,7 +717,7 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
*/
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
arena_mapbits_unallocated_set((arena_chunk_t *)extent_addr_get(extent),
arena_mapbits_unallocated_set((arena_chunk_t *)extent_base_get(extent),
map_bias, arena_maxrun, flag_unzeroed | flag_decommitted);
/*
* There is no need to initialize the internal page map entries unless
@ -726,18 +726,18 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
if (!zero) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
arena_mapbits_internal_set((arena_chunk_t *)
extent_addr_get(extent), i, flag_unzeroed);
extent_base_get(extent), i, flag_unzeroed);
}
} else {
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(
(arena_chunk_t *)extent_addr_get(extent), i)
(arena_chunk_t *)extent_base_get(extent), i)
== flag_unzeroed);
}
}
}
arena_mapbits_unallocated_set((arena_chunk_t *)extent_addr_get(extent),
arena_mapbits_unallocated_set((arena_chunk_t *)extent_base_get(extent),
chunk_npages-1, arena_maxrun, flag_unzeroed);
return (extent);
@ -770,7 +770,7 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
extent_committed_set(extent,
(arena_mapbits_decommitted_get((arena_chunk_t *)
extent_addr_get(extent), map_bias) == 0));
extent_base_get(extent), map_bias) == 0));
if (!extent_committed_get(extent)) {
/*
* Decommit the header. Mark the chunk as decommitted even if
@ -796,10 +796,10 @@ arena_spare_discard(tsdn_t *tsdn, arena_t *arena, extent_t *spare)
assert(arena->spare != spare);
if (arena_mapbits_dirty_get((arena_chunk_t *)extent_addr_get(spare),
if (arena_mapbits_dirty_get((arena_chunk_t *)extent_base_get(spare),
map_bias) != 0) {
arena_run_dirty_remove(arena, (arena_chunk_t *)
extent_addr_get(spare), map_bias, chunk_npages-map_bias);
extent_base_get(spare), map_bias, chunk_npages-map_bias);
}
arena_chunk_discard(tsdn, arena, spare);
@ -808,7 +808,7 @@ arena_spare_discard(tsdn_t *tsdn, arena_t *arena, extent_t *spare)
static void
arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
extent_t *spare;
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
@ -898,7 +898,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
bool commit = true;
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, usize,
alignment, zero, &commit, false);
large_pad, alignment, zero, &commit, false);
if (extent == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
@ -930,7 +930,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
arena_nactive_add(arena, usize >> LG_PAGE);
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
usize, alignment, zero, false);
usize, large_pad, alignment, zero, false);
malloc_mutex_unlock(tsdn, &arena->lock);
if (extent == NULL) {
extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
@ -1046,7 +1046,7 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
extent = arena_chunk_alloc(tsdn, arena);
if (extent != NULL) {
run = &arena_miscelm_get_mutable((arena_chunk_t *)
extent_addr_get(extent), map_bias)->run;
extent_base_get(extent), map_bias)->run;
if (arena_run_split_large(tsdn, arena, iealloc(tsdn, run), run,
size, zero))
run = NULL;
@ -1095,7 +1095,7 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
extent = arena_chunk_alloc(tsdn, arena);
if (extent != NULL) {
run = &arena_miscelm_get_mutable(
(arena_chunk_t *)extent_addr_get(extent), map_bias)->run;
(arena_chunk_t *)extent_base_get(extent), map_bias)->run;
if (arena_run_split_small(tsdn, arena, iealloc(tsdn, run), run,
size, binind))
run = NULL;
@ -1161,7 +1161,7 @@ arena_decay_deadline_init(arena_t *arena)
nstime_t jitter;
nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
nstime_ns(&arena->decay_interval)));
nstime_ns(&arena->decay_interval), false));
nstime_add(&arena->decay_deadline, &jitter);
}
}
@ -1428,7 +1428,7 @@ arena_dirty_count(tsdn_t *tsdn, arena_t *arena)
} else {
extent_t *extent = iealloc(tsdn, rdelm);
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
(arena_chunk_t *)extent_base_get(extent);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(extent, rdelm);
size_t pageind = arena_miscelm_to_pageind(extent,
@ -1476,8 +1476,9 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Allocate. */
zero = false;
extent = arena_chunk_cache_alloc_locked(tsdn, arena,
chunk_hooks, extent_addr_get(chunkselm),
extent_size_get(chunkselm), PAGE, &zero, false);
chunk_hooks, extent_base_get(chunkselm),
extent_size_get(chunkselm), 0, CACHELINE, &zero,
false);
assert(extent == chunkselm);
assert(zero == extent_zeroed_get(chunkselm));
extent_dirty_insert(chunkselm, purge_runs_sentinel,
@ -1494,7 +1495,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_run_t *run = &miscelm->run;
size_t run_size =
arena_mapbits_unallocated_size_get((arena_chunk_t *)
extent_addr_get(extent), pageind);
extent_base_get(extent), pageind);
npages = run_size >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
@ -1503,9 +1504,9 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get((arena_chunk_t *)
extent_addr_get(extent), pageind) ==
extent_base_get(extent), pageind) ==
arena_mapbits_dirty_get((arena_chunk_t *)
extent_addr_get(extent), pageind+npages-1));
extent_base_get(extent), pageind+npages-1));
/*
* If purging the spare chunk's run, make it available
@ -1572,7 +1573,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
bool decommitted;
extent_t *extent = iealloc(tsdn, rdelm);
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
(arena_chunk_t *)extent_base_get(extent);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(extent, rdelm);
pageind = arena_miscelm_to_pageind(extent, miscelm);
@ -1653,7 +1654,7 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
} else {
extent_t *extent = iealloc(tsdn, rdelm);
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
(arena_chunk_t *)extent_base_get(extent);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(extent, rdelm);
size_t pageind = arena_miscelm_to_pageind(extent,
@ -1734,7 +1735,7 @@ arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
static void
arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, extent_t *extent)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
size_t pageind, npages;
cassert(config_prof);
@ -1809,7 +1810,7 @@ arena_reset(tsd_t *tsd, arena_t *arena)
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
for (extent = ql_last(&arena->huge, ql_link); extent != NULL; extent =
ql_last(&arena->huge, ql_link)) {
void *ptr = extent_addr_get(extent);
void *ptr = extent_base_get(extent);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
@ -1882,7 +1883,7 @@ arena_run_coalesce(arena_t *arena, extent_t *extent, size_t *p_size,
size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
size_t flag_decommitted)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
size_t size = *p_size;
size_t run_ind = *p_run_ind;
size_t run_pages = *p_run_pages;
@ -2000,7 +2001,7 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
arena_chunk_map_misc_t *miscelm;
size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
chunk = (arena_chunk_t *)extent_addr_get(extent);
chunk = (arena_chunk_t *)extent_base_get(extent);
miscelm = arena_run_to_miscelm(extent, run);
run_ind = arena_miscelm_to_pageind(extent, miscelm);
assert(run_ind >= map_bias);
@ -2260,7 +2261,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
* were just deallocated from the run.
*/
extent = iealloc(tsdn, run);
chunk = (arena_chunk_t *)extent_addr_get(extent);
chunk = (arena_chunk_t *)extent_base_get(extent);
if (run->nfree == bin_info->nregs) {
arena_dalloc_bin_run(tsdn, arena, chunk, extent,
run, bin);
@ -2425,7 +2426,6 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
/* Large allocation. */
usize = index2size(binind);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_cache_oblivious) {
uint64_t r;
@ -2434,10 +2434,12 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
* that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
* for 4 KiB pages and 64-byte cachelines.
*/
r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE,
true);
random_offset = ((uintptr_t)r) << LG_CACHELINE;
} else
random_offset = 0;
malloc_mutex_lock(tsdn, &arena->lock);
run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
if (run == NULL) {
malloc_mutex_unlock(tsdn, &arena->lock);
@ -2526,7 +2528,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
return (NULL);
}
extent = iealloc(tsdn, run);
chunk = (arena_chunk_t *)extent_addr_get(extent);
chunk = (arena_chunk_t *)extent_base_get(extent);
miscelm = arena_run_to_miscelm(extent, run);
rpages = arena_miscelm_to_rpages(extent, miscelm);
@ -2616,7 +2618,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (likely(usize <= large_maxclass)) {
ret = arena_palloc_large(tsdn, arena, usize, alignment,
zero);
} else if (likely(alignment <= PAGE))
} else if (likely(alignment <= CACHELINE))
ret = huge_malloc(tsdn, arena, usize, zero);
else
ret = huge_palloc(tsdn, arena, usize, alignment, zero);
@ -2634,12 +2636,11 @@ arena_prof_promoted(tsdn_t *tsdn, const extent_t *extent, const void *ptr,
cassert(config_prof);
assert(ptr != NULL);
assert(extent_addr_get(extent) != ptr);
assert(isalloc(tsdn, extent, ptr, false) == LARGE_MINCLASS);
assert(isalloc(tsdn, extent, ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)extent_addr_get(extent);
chunk = (arena_chunk_t *)extent_base_get(extent);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
binind = size2index(size);
assert(binind < NBINS);
@ -3030,7 +3031,7 @@ arena_ralloc_large(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
return (false);
}
chunk = (arena_chunk_t *)extent_addr_get(extent);
chunk = (arena_chunk_t *)extent_base_get(extent);
arena = extent_arena_get(extent);
if (oldsize < usize_max) {

View File

@ -161,7 +161,7 @@ extent_rtree_acquire(tsdn_t *tsdn, const extent_t *extent, bool dependent,
{
*r_elm_a = rtree_elm_acquire(tsdn, &chunks_rtree,
(uintptr_t)extent_addr_get(extent), dependent, init_missing);
(uintptr_t)extent_base_get(extent), dependent, init_missing);
if (!dependent && *r_elm_a == NULL)
return (true);
assert(*r_elm_a != NULL);
@ -207,7 +207,7 @@ chunk_interior_register(tsdn_t *tsdn, const extent_t *extent)
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_write(tsdn, &chunks_rtree,
(uintptr_t)extent_addr_get(extent) + (uintptr_t)(i <<
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
LG_PAGE), extent);
}
}
@ -252,7 +252,7 @@ chunk_interior_deregister(tsdn_t *tsdn, const extent_t *extent)
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_clear(tsdn, &chunks_rtree,
(uintptr_t)extent_addr_get(extent) + (uintptr_t)(i <<
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
LG_PAGE));
}
}
@ -315,14 +315,19 @@ chunk_leak(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool cache,
static extent_t *
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit, bool slab)
size_t usize, size_t pad, size_t alignment, bool *zero, bool *commit,
bool slab)
{
extent_t *extent;
size_t alloc_size, leadsize, trailsize;
size_t size, alloc_size, leadsize, trailsize;
alloc_size = s2u(size + alignment - PAGE);
assert(new_addr == NULL || !slab);
assert(pad == 0 || !slab);
size = usize + pad;
alloc_size = s2u(size + PAGE_CEILING(alignment) - PAGE);
/* Beware size_t wrap-around. */
if (alloc_size < size)
if (alloc_size < usize)
return (NULL);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
@ -350,8 +355,8 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_heaps_remove(extent_heaps, extent);
arena_chunk_cache_maybe_remove(arena, extent, cache);
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
assert(new_addr == NULL || leadsize == 0);
assert(extent_size_get(extent) >= leadsize + size);
trailsize = extent_size_get(extent) - leadsize - size;
@ -388,7 +393,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
if (!extent_committed_get(extent) &&
chunk_hooks->commit(extent_addr_get(extent),
chunk_hooks->commit(extent_base_get(extent),
extent_size_get(extent), 0, extent_size_get(extent), arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, extent_heaps, cache,
@ -396,6 +401,8 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (NULL);
}
if (pad != 0)
extent_addr_randomize(tsdn, extent, alignment);
extent_active_set(extent, true);
if (slab) {
extent_slab_set(extent, slab);
@ -407,13 +414,13 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (*zero) {
if (!extent_zeroed_get(extent)) {
memset(extent_addr_get(extent), 0,
extent_size_get(extent));
extent_usize_get(extent));
} else if (config_debug) {
size_t i;
size_t *p = (size_t *)(uintptr_t)
extent_addr_get(extent);
for (i = 0; i < size / sizeof(size_t); i++)
for (i = 0; i < usize / sizeof(size_t); i++)
assert(p[i] == 0);
}
}
@ -456,17 +463,18 @@ chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_t *
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool slab)
void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
bool slab)
{
extent_t *extent;
bool commit;
assert(size != 0);
assert(usize + pad != 0);
assert(alignment != 0);
commit = true;
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
true, new_addr, size, alignment, zero, &commit, slab);
true, new_addr, usize, pad, alignment, zero, &commit, slab);
if (extent == NULL)
return (NULL);
assert(commit);
@ -507,31 +515,34 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
static extent_t *
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool slab)
void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
bool *commit, bool slab)
{
extent_t *extent;
assert(size != 0);
assert(usize != 0);
assert(alignment != 0);
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
false, new_addr, size, alignment, zero, commit, slab);
if (config_stats && extent != NULL)
false, new_addr, usize, pad, alignment, zero, commit, slab);
if (extent != NULL && config_stats) {
size_t size = usize + pad;
arena->stats.retained -= size;
}
return (extent);
}
static extent_t *
chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit, bool slab)
chunk_hooks_t *chunk_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool *commit, bool slab)
{
extent_t *extent;
size_t size;
void *addr;
size = usize + pad;
extent = extent_alloc(tsdn, arena);
if (extent == NULL)
return (NULL);
@ -542,6 +553,8 @@ chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
return (NULL);
}
extent_init(extent, arena, addr, size, true, false, zero, commit, slab);
if (pad != 0)
extent_addr_randomize(tsdn, extent, alignment);
if (chunk_register(tsdn, extent)) {
chunk_leak(tsdn, arena, chunk_hooks, false, extent);
return (NULL);
@ -552,18 +565,18 @@ chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
extent_t *
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool slab)
void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
bool *commit, bool slab)
{
extent_t *extent;
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
extent = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
alignment, zero, commit, slab);
extent = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, usize,
pad, alignment, zero, commit, slab);
if (extent == NULL) {
extent = chunk_alloc_wrapper_hard(tsdn, arena, chunk_hooks,
new_addr, size, alignment, zero, commit, slab);
new_addr, usize, pad, alignment, zero, commit, slab);
}
return (extent);
@ -629,7 +642,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_slab_set(extent, false);
}
assert(chunk_lookup(tsdn, extent_addr_get(extent), true) == extent);
assert(chunk_lookup(tsdn, extent_base_get(extent), true) == extent);
extent_heaps_insert(extent_heaps, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
@ -657,9 +670,10 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_t *extent)
{
assert(extent_addr_get(extent) != NULL);
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
extent_addr_set(extent, extent_base_get(extent));
extent_zeroed_set(extent, false);
chunk_record(tsdn, arena, chunk_hooks, arena->chunks_cached, true,
@ -681,12 +695,14 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_t *extent)
{
assert(extent_addr_get(extent) != NULL);
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
extent_addr_set(extent, extent_base_get(extent));
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
/* Try to deallocate. */
if (!chunk_hooks->dalloc(extent_addr_get(extent),
if (!chunk_hooks->dalloc(extent_base_get(extent),
extent_size_get(extent), extent_committed_get(extent),
arena->ind)) {
chunk_deregister(tsdn, extent);
@ -696,12 +712,12 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Try to decommit; purge if that fails. */
if (extent_committed_get(extent)) {
extent_committed_set(extent,
chunk_hooks->decommit(extent_addr_get(extent),
chunk_hooks->decommit(extent_base_get(extent),
extent_size_get(extent), 0, extent_size_get(extent),
arena->ind));
}
extent_zeroed_set(extent, !extent_committed_get(extent) ||
!chunk_hooks->purge(extent_addr_get(extent),
!chunk_hooks->purge(extent_base_get(extent),
extent_size_get(extent), 0, extent_size_get(extent), arena->ind));
if (config_stats)
@ -726,7 +742,7 @@ chunk_commit_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
{
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
return (chunk_hooks->commit(extent_addr_get(extent),
return (chunk_hooks->commit(extent_base_get(extent),
extent_size_get(extent), offset, length, arena->ind));
}
@ -745,7 +761,7 @@ chunk_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
{
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
return (chunk_hooks->decommit(extent_addr_get(extent),
return (chunk_hooks->decommit(extent_base_get(extent),
extent_size_get(extent), offset, length, arena->ind));
}
@ -769,7 +785,7 @@ chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
{
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
return (chunk_hooks->purge(extent_addr_get(extent),
return (chunk_hooks->purge(extent_base_get(extent),
extent_size_get(extent), offset, length, arena->ind));
}
@ -811,7 +827,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
goto label_error_b;
}
extent_init(trail, arena, (void *)((uintptr_t)extent_addr_get(extent) +
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
size_a), size_b, extent_active_get(extent),
extent_dirty_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_slab_get(extent));
@ -819,7 +835,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
&trail_elm_b))
goto label_error_c;
if (chunk_hooks->split(extent_addr_get(extent), size_a + size_b, size_a,
if (chunk_hooks->split(extent_base_get(extent), size_a + size_b, size_a,
size_b, extent_committed_get(extent), arena->ind))
goto label_error_d;
@ -865,8 +881,8 @@ chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
if (chunk_hooks->merge(extent_base_get(a), extent_size_get(a),
extent_base_get(b), extent_size_get(b), extent_committed_get(a),
arena->ind))
return (true);

View File

@ -99,7 +99,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
offset = (unsigned)prng_lg_range(&ckh->prng_state, LG_CKH_BUCKET_CELLS);
offset = (unsigned)prng_lg_range(&ckh->prng_state, LG_CKH_BUCKET_CELLS,
false);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
@ -142,7 +143,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* bucket.
*/
i = (unsigned)prng_lg_range(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
LG_CKH_BUCKET_CELLS, false);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);

View File

@ -9,7 +9,7 @@ huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
assert(usize == s2u(usize));
return (huge_palloc(tsdn, arena, usize, PAGE, zero));
return (huge_palloc(tsdn, arena, usize, CACHELINE, zero));
}
void *
@ -46,11 +46,11 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed) {
memset(extent_addr_get(extent), 0,
extent_size_get(extent));
extent_usize_get(extent));
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
extent_size_get(extent));
extent_usize_get(extent));
}
arena_decay_tick(tsdn, arena);
@ -84,28 +84,28 @@ static bool
huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
{
arena_t *arena = extent_arena_get(extent);
size_t oldsize = extent_size_get(extent);
size_t oldusize = extent_usize_get(extent);
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
size_t diff = oldsize - usize;
size_t diff = extent_size_get(extent) - (usize + large_pad);
assert(oldsize > usize);
assert(oldusize > usize);
/* Split excess pages. */
if (diff != 0) {
extent_t *trail = chunk_split_wrapper(tsdn, arena, &chunk_hooks,
extent, usize, diff);
extent, usize + large_pad, diff);
if (trail == NULL)
return (true);
if (config_fill && unlikely(opt_junk_free)) {
huge_dalloc_junk(tsdn, extent_addr_get(trail),
extent_size_get(trail));
extent_usize_get(trail));
}
arena_chunk_cache_dalloc(tsdn, arena, &chunk_hooks, trail);
}
arena_chunk_ralloc_huge_shrink(tsdn, arena, extent, oldsize);
arena_chunk_ralloc_huge_shrink(tsdn, arena, extent, oldusize);
return (false);
}
@ -115,19 +115,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
bool zero)
{
arena_t *arena = extent_arena_get(extent);
size_t oldsize = extent_size_get(extent);
size_t oldusize = extent_usize_get(extent);
bool is_zeroed_trail = false;
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
size_t trailsize = usize - oldsize;
size_t trailsize = usize - extent_usize_get(extent);
extent_t *trail;
if ((trail = arena_chunk_cache_alloc(tsdn, arena, &chunk_hooks,
extent_past_get(extent), trailsize, PAGE, &is_zeroed_trail)) ==
NULL) {
extent_past_get(extent), trailsize, CACHELINE, &is_zeroed_trail))
== NULL) {
bool commit = true;
if ((trail = chunk_alloc_wrapper(tsdn, arena, &chunk_hooks,
extent_past_get(extent), trailsize, PAGE, &is_zeroed_trail,
&commit, false)) == NULL)
extent_past_get(extent), trailsize, 0, CACHELINE,
&is_zeroed_trail, &commit, false)) == NULL)
return (true);
}
@ -137,16 +137,32 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
}
if (zero || (config_fill && unlikely(opt_zero))) {
if (config_cache_oblivious) {
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
* There will always be trailing bytes, because ptr's
* offset from the beginning of the run is a multiple of
* CACHELINE in [0 .. PAGE).
*/
void *zbase = (void *)
((uintptr_t)extent_addr_get(extent) + oldusize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
assert(nzero > 0);
memset(zbase, 0, nzero);
}
if (!is_zeroed_trail) {
memset((void *)((uintptr_t)extent_addr_get(extent) +
oldsize), 0, usize - oldsize);
oldusize), 0, usize - oldusize);
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)extent_addr_get(extent) + oldsize),
JEMALLOC_ALLOC_JUNK, usize - oldsize);
memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
JEMALLOC_ALLOC_JUNK, usize - oldusize);
}
arena_chunk_ralloc_huge_expand(tsdn, arena, extent, oldsize);
arena_chunk_ralloc_huge_expand(tsdn, arena, extent, oldusize);
return (false);
}
@ -156,13 +172,13 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
size_t usize_max, bool zero)
{
assert(s2u(extent_size_get(extent)) == extent_size_get(extent));
assert(s2u(extent_usize_get(extent)) == extent_usize_get(extent));
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
/* Both allocation sizes must be huge to avoid a move. */
assert(extent_size_get(extent) >= chunksize && usize_max >= chunksize);
assert(extent_usize_get(extent) >= chunksize && usize_max >= chunksize);
if (usize_max > extent_size_get(extent)) {
if (usize_max > extent_usize_get(extent)) {
/* Attempt to expand the allocation in-place. */
if (!huge_ralloc_no_move_expand(tsdn, extent, usize_max,
zero)) {
@ -170,9 +186,9 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
return (false);
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && usize_min > extent_size_get(extent)
&& huge_ralloc_no_move_expand(tsdn, extent, usize_min,
zero)) {
if (usize_min < usize_max && usize_min >
extent_usize_get(extent) && huge_ralloc_no_move_expand(tsdn,
extent, usize_min, zero)) {
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
}
@ -182,14 +198,14 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
* Avoid moving the allocation if the existing chunk size accommodates
* the new size.
*/
if (extent_size_get(extent) >= usize_min && extent_size_get(extent) <=
if (extent_usize_get(extent) >= usize_min && extent_usize_get(extent) <=
usize_max) {
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
}
/* Attempt to shrink the allocation in-place. */
if (extent_size_get(extent) > usize_max) {
if (extent_usize_get(extent) > usize_max) {
if (!huge_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
@ -203,7 +219,7 @@ huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero)
{
if (alignment <= PAGE)
if (alignment <= CACHELINE)
return (huge_malloc(tsdn, arena, usize, zero));
return (huge_palloc(tsdn, arena, usize, alignment, zero));
}
@ -218,7 +234,7 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= HUGE_MAXCLASS);
/* Both allocation sizes must be huge to avoid a move. */
assert(extent_size_get(extent) >= chunksize && usize >= chunksize);
assert(extent_usize_get(extent) >= chunksize && usize >= chunksize);
/* Try to avoid moving the allocation. */
if (!huge_ralloc_no_move(tsdn, extent, usize, usize, zero))
@ -233,11 +249,11 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
if (ret == NULL)
return (NULL);
copysize = (usize < extent_size_get(extent)) ? usize :
extent_size_get(extent);
copysize = (usize < extent_usize_get(extent)) ? usize :
extent_usize_get(extent);
memcpy(ret, extent_addr_get(extent), copysize);
isdalloct(tsdn, extent, extent_addr_get(extent),
extent_size_get(extent), tcache, true);
extent_usize_get(extent), tcache, true);
return (ret);
}
@ -252,7 +268,7 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent)
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
huge_dalloc_junk(tsdn, extent_addr_get(extent),
extent_size_get(extent));
extent_usize_get(extent));
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent), extent);
arena_decay_tick(tsdn, arena);
@ -261,15 +277,15 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent)
size_t
huge_salloc(tsdn_t *tsdn, const extent_t *extent)
{
size_t size;
size_t usize;
arena_t *arena;
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
size = extent_size_get(extent);
usize = extent_usize_get(extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (size);
return (usize);
}
prof_tctx_t *

View File

@ -879,7 +879,7 @@ prof_sample_threshold_update(prof_tdata_t *tdata)
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
r = prng_lg_range(&tdata->prng_state, 53);
r = prng_lg_range(&tdata->prng_state, 53, false);
u = (double)r * (1.0/9007199254740992.0L);
tdata->bytes_until_sample = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))

View File

@ -128,7 +128,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent_arena_get(extent) == bin_arena) {
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
(arena_chunk_t *)extent_base_get(extent);
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
@ -214,7 +214,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent_arena_get(extent) == locked_arena) {
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
(arena_chunk_t *)extent_base_get(extent);
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
locked_arena, chunk, extent, ptr);
} else {

View File

@ -1,33 +1,34 @@
#include "test/jemalloc_test.h"
TEST_BEGIN(test_prng_lg_range)
static void
test_prng_lg_range(bool atomic)
{
uint64_t sa, sb, ra, rb;
unsigned lg_range;
sa = 42;
ra = prng_lg_range(&sa, 64);
ra = prng_lg_range(&sa, 64, atomic);
sa = 42;
rb = prng_lg_range(&sa, 64);
rb = prng_lg_range(&sa, 64, atomic);
assert_u64_eq(ra, rb,
"Repeated generation should produce repeated results");
sb = 42;
rb = prng_lg_range(&sb, 64);
rb = prng_lg_range(&sb, 64, atomic);
assert_u64_eq(ra, rb,
"Equivalent generation should produce equivalent results");
sa = 42;
ra = prng_lg_range(&sa, 64);
rb = prng_lg_range(&sa, 64);
ra = prng_lg_range(&sa, 64, atomic);
rb = prng_lg_range(&sa, 64, atomic);
assert_u64_ne(ra, rb,
"Full-width results must not immediately repeat");
sa = 42;
ra = prng_lg_range(&sa, 64);
ra = prng_lg_range(&sa, 64, atomic);
for (lg_range = 63; lg_range > 0; lg_range--) {
sb = 42;
rb = prng_lg_range(&sb, lg_range);
rb = prng_lg_range(&sb, lg_range, atomic);
assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
0, "High order bits should be 0, lg_range=%u", lg_range);
assert_u64_eq(rb, (ra >> (64 - lg_range)),
@ -35,9 +36,23 @@ TEST_BEGIN(test_prng_lg_range)
"lg_range=%u", lg_range);
}
}
TEST_BEGIN(test_prng_lg_range_nonatomic)
{
test_prng_lg_range(false);
}
TEST_END
TEST_BEGIN(test_prng_range)
TEST_BEGIN(test_prng_lg_range_atomic)
{
test_prng_lg_range(true);
}
TEST_END
static void
test_prng_range(bool atomic)
{
uint64_t range;
#define MAX_RANGE 10000000
@ -50,12 +65,25 @@ TEST_BEGIN(test_prng_range)
s = range;
for (rep = 0; rep < NREPS; rep++) {
uint64_t r = prng_range(&s, range);
uint64_t r = prng_range(&s, range, atomic);
assert_u64_lt(r, range, "Out of range");
}
}
}
TEST_BEGIN(test_prng_range_nonatomic)
{
test_prng_range(false);
}
TEST_END
TEST_BEGIN(test_prng_range_atomic)
{
test_prng_range(true);
}
TEST_END
int
@ -63,6 +91,8 @@ main(void)
{
return (test(
test_prng_lg_range,
test_prng_range));
test_prng_lg_range_nonatomic,
test_prng_lg_range_atomic,
test_prng_range_nonatomic,
test_prng_range_atomic));
}