Convert extent_t's usize to szind.
Rather than storing usize only for large (and prof-promoted) allocations, store the size class index for allocations that reside within the extent, such that the size class index is valid for all extents that contain extant allocations, and invalid otherwise (mainly to make debugging simpler).
This commit is contained in:
parent
bda12bd925
commit
e8921cf2eb
@ -147,15 +147,15 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||
extent, ptr);
|
||||
}
|
||||
} else {
|
||||
size_t usize = extent_usize_get(extent);
|
||||
szind_t szind = extent_szind_get(extent);
|
||||
|
||||
if (likely(tcache != NULL) && usize <= tcache_maxclass) {
|
||||
if (config_prof && unlikely(usize <= SMALL_MAXCLASS)) {
|
||||
if (likely(tcache != NULL) && szind < nhbins) {
|
||||
if (config_prof && unlikely(szind < NBINS)) {
|
||||
arena_dalloc_promoted(tsdn, extent, ptr,
|
||||
tcache, slow_path);
|
||||
} else {
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache,
|
||||
ptr, usize, slow_path);
|
||||
ptr, szind, slow_path);
|
||||
}
|
||||
} else {
|
||||
large_dalloc(tsdn, extent);
|
||||
@ -169,25 +169,25 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(ptr != NULL);
|
||||
|
||||
szind_t szind = size2index(size);
|
||||
if (likely(extent_slab_get(extent))) {
|
||||
/* Small allocation. */
|
||||
if (likely(tcache != NULL)) {
|
||||
szind_t binind = size2index(size);
|
||||
assert(binind == extent_slab_data_get(extent)->binind);
|
||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
||||
assert(szind == extent_slab_data_get(extent)->binind);
|
||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
||||
slow_path);
|
||||
} else {
|
||||
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
||||
extent, ptr);
|
||||
}
|
||||
} else {
|
||||
if (likely(tcache != NULL) && size <= tcache_maxclass) {
|
||||
if (config_prof && unlikely(size <= SMALL_MAXCLASS)) {
|
||||
if (likely(tcache != NULL) && szind < nhbins) {
|
||||
if (config_prof && unlikely(szind < NBINS)) {
|
||||
arena_dalloc_promoted(tsdn, extent, ptr,
|
||||
tcache, slow_path);
|
||||
} else {
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
||||
size, slow_path);
|
||||
szind, slow_path);
|
||||
}
|
||||
} else {
|
||||
large_dalloc(tsdn, extent);
|
||||
|
@ -4,15 +4,15 @@
|
||||
extern rtree_t extents_rtree;
|
||||
extern const extent_hooks_t extent_hooks_default;
|
||||
|
||||
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
||||
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
||||
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||
|
||||
extent_hooks_t *extent_hooks_get(arena_t *arena);
|
||||
extent_hooks_t *extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks);
|
||||
extent_hooks_t *extent_hooks_get(arena_t *arena);
|
||||
extent_hooks_t *extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks);
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
size_t extent_size_quantize_floor(size_t size);
|
||||
size_t extent_size_quantize_ceil(size_t size);
|
||||
size_t extent_size_quantize_floor(size_t size);
|
||||
size_t extent_size_quantize_ceil(size_t size);
|
||||
#endif
|
||||
|
||||
ph_proto(, extent_heap_, extent_heap_t, extent_t)
|
||||
@ -23,8 +23,8 @@ extent_state_t extents_state_get(const extents_t *extents);
|
||||
size_t extents_npages_get(extents_t *extents);
|
||||
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
|
||||
size_t usize, size_t pad, size_t alignment, bool *zero, bool *commit,
|
||||
bool slab);
|
||||
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
|
||||
bool *zero, bool *commit);
|
||||
void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
|
||||
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
|
||||
@ -32,32 +32,32 @@ extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
|
||||
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
|
||||
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
|
||||
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
|
||||
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
||||
size_t alignment, bool *zero, bool *commit, bool slab);
|
||||
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||
bool extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
||||
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
|
||||
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||
bool extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||
size_t length);
|
||||
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||
size_t length);
|
||||
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||
size_t length);
|
||||
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||
size_t length);
|
||||
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
|
||||
size_t usize_a, size_t size_b, size_t usize_b);
|
||||
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
szind_t szind_a, size_t size_b, szind_t szind_b);
|
||||
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
|
||||
|
||||
bool extent_boot(void);
|
||||
bool extent_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
|
||||
|
@ -2,37 +2,38 @@
|
||||
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
extent_t *extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent);
|
||||
arena_t *extent_arena_get(const extent_t *extent);
|
||||
void *extent_base_get(const extent_t *extent);
|
||||
void *extent_addr_get(const extent_t *extent);
|
||||
size_t extent_size_get(const extent_t *extent);
|
||||
size_t extent_usize_get(const extent_t *extent);
|
||||
void *extent_before_get(const extent_t *extent);
|
||||
void *extent_last_get(const extent_t *extent);
|
||||
void *extent_past_get(const extent_t *extent);
|
||||
size_t extent_sn_get(const extent_t *extent);
|
||||
extent_state_t extent_state_get(const extent_t *extent);
|
||||
bool extent_zeroed_get(const extent_t *extent);
|
||||
bool extent_committed_get(const extent_t *extent);
|
||||
bool extent_slab_get(const extent_t *extent);
|
||||
arena_slab_data_t *extent_slab_data_get(extent_t *extent);
|
||||
const arena_slab_data_t *extent_slab_data_get_const(const extent_t *extent);
|
||||
prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
|
||||
void extent_arena_set(extent_t *extent, arena_t *arena);
|
||||
void extent_addr_set(extent_t *extent, void *addr);
|
||||
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
||||
void extent_size_set(extent_t *extent, size_t size);
|
||||
void extent_usize_set(extent_t *extent, size_t usize);
|
||||
void extent_sn_set(extent_t *extent, size_t sn);
|
||||
void extent_state_set(extent_t *extent, extent_state_t state);
|
||||
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
||||
void extent_committed_set(extent_t *extent, bool committed);
|
||||
void extent_slab_set(extent_t *extent, bool slab);
|
||||
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
||||
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
||||
size_t size, size_t usize, size_t sn, extent_state_t state, bool zeroed,
|
||||
bool committed, bool slab);
|
||||
extent_t *extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent);
|
||||
arena_t *extent_arena_get(const extent_t *extent);
|
||||
void *extent_base_get(const extent_t *extent);
|
||||
void *extent_addr_get(const extent_t *extent);
|
||||
size_t extent_size_get(const extent_t *extent);
|
||||
szind_t extent_szind_get(const extent_t *extent);
|
||||
size_t extent_usize_get(const extent_t *extent);
|
||||
void *extent_before_get(const extent_t *extent);
|
||||
void *extent_last_get(const extent_t *extent);
|
||||
void *extent_past_get(const extent_t *extent);
|
||||
size_t extent_sn_get(const extent_t *extent);
|
||||
extent_state_t extent_state_get(const extent_t *extent);
|
||||
bool extent_zeroed_get(const extent_t *extent);
|
||||
bool extent_committed_get(const extent_t *extent);
|
||||
bool extent_slab_get(const extent_t *extent);
|
||||
arena_slab_data_t *extent_slab_data_get(extent_t *extent);
|
||||
const arena_slab_data_t *extent_slab_data_get_const(const extent_t *extent);
|
||||
prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
|
||||
void extent_arena_set(extent_t *extent, arena_t *arena);
|
||||
void extent_addr_set(extent_t *extent, void *addr);
|
||||
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
||||
void extent_size_set(extent_t *extent, size_t size);
|
||||
void extent_szind_set(extent_t *extent, szind_t szind);
|
||||
void extent_sn_set(extent_t *extent, size_t sn);
|
||||
void extent_state_set(extent_t *extent, extent_state_t state);
|
||||
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
||||
void extent_committed_set(extent_t *extent, bool committed);
|
||||
void extent_slab_set(extent_t *extent, bool slab);
|
||||
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
||||
void extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||
bool committed);
|
||||
void extent_list_init(extent_list_t *list);
|
||||
extent_t *extent_list_first(const extent_list_t *list);
|
||||
extent_t *extent_list_last(const extent_list_t *list);
|
||||
@ -40,9 +41,9 @@ void extent_list_append(extent_list_t *list, extent_t *extent);
|
||||
void extent_list_replace(extent_list_t *list, extent_t *to_remove,
|
||||
extent_t *to_insert);
|
||||
void extent_list_remove(extent_list_t *list, extent_t *extent);
|
||||
int extent_sn_comp(const extent_t *a, const extent_t *b);
|
||||
int extent_ad_comp(const extent_t *a, const extent_t *b);
|
||||
int extent_snad_comp(const extent_t *a, const extent_t *b);
|
||||
int extent_sn_comp(const extent_t *a, const extent_t *b);
|
||||
int extent_ad_comp(const extent_t *a, const extent_t *b);
|
||||
int extent_snad_comp(const extent_t *a, const extent_t *b);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
||||
@ -79,10 +80,15 @@ extent_size_get(const extent_t *extent) {
|
||||
return extent->e_size;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE szind_t
|
||||
extent_szind_get(const extent_t *extent) {
|
||||
assert(extent->e_szind < NSIZES); /* Never call when "invalid". */
|
||||
return extent->e_szind;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
extent_usize_get(const extent_t *extent) {
|
||||
assert(!extent->e_slab);
|
||||
return extent->e_usize;
|
||||
return index2size(extent_szind_get(extent));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
@ -180,8 +186,9 @@ extent_size_set(extent_t *extent, size_t size) {
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_usize_set(extent_t *extent, size_t usize) {
|
||||
extent->e_usize = usize;
|
||||
extent_szind_set(extent_t *extent, szind_t szind) {
|
||||
assert(szind <= NSIZES); /* NSIZES means "invalid". */
|
||||
extent->e_szind = szind;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
@ -216,19 +223,19 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
size_t usize, size_t sn, extent_state_t state, bool zeroed, bool committed,
|
||||
bool slab) {
|
||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||
bool committed) {
|
||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||
|
||||
extent_arena_set(extent, arena);
|
||||
extent_addr_set(extent, addr);
|
||||
extent_size_set(extent, size);
|
||||
extent_usize_set(extent, usize);
|
||||
extent_slab_set(extent, slab);
|
||||
extent_szind_set(extent, szind);
|
||||
extent_sn_set(extent, sn);
|
||||
extent_state_set(extent, state);
|
||||
extent_zeroed_set(extent, zeroed);
|
||||
extent_committed_set(extent, committed);
|
||||
extent_slab_set(extent, slab);
|
||||
if (config_prof) {
|
||||
extent_prof_tctx_set(extent, NULL);
|
||||
}
|
||||
|
@ -20,10 +20,12 @@ struct extent_s {
|
||||
size_t e_size;
|
||||
|
||||
/*
|
||||
* Usable size, typically smaller than extent size due to large_pad or
|
||||
* Usable size class index for allocations residing in this extent,
|
||||
* regardless of whether the extent is a slab. Extent size and usable
|
||||
* size often differ even for non-slabs, either due to large_pad or
|
||||
* promotion of sampled small regions.
|
||||
*/
|
||||
size_t e_usize;
|
||||
szind_t e_szind;
|
||||
|
||||
/*
|
||||
* Serial number (potentially non-unique).
|
||||
|
@ -536,8 +536,6 @@ void jemalloc_postfork_child(void);
|
||||
#include "jemalloc/internal/witness_inlines.h"
|
||||
#include "jemalloc/internal/mutex_inlines.h"
|
||||
#include "jemalloc/internal/rtree_inlines.h"
|
||||
#include "jemalloc/internal/extent_inlines.h"
|
||||
#include "jemalloc/internal/base_inlines.h"
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
pszind_t psz2ind(size_t psz);
|
||||
@ -565,7 +563,6 @@ ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
|
||||
malloc_cpuid_t malloc_getcpu(void);
|
||||
unsigned percpu_arena_choose(void);
|
||||
unsigned percpu_arena_ind_limit(void);
|
||||
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||
@ -882,8 +879,6 @@ percpu_arena_ind_limit(void) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
JEMALLOC_INLINE arena_tdata_t *
|
||||
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
|
||||
arena_tdata_t *tdata;
|
||||
@ -938,6 +933,8 @@ decay_ticker_get(tsd_t *tsd, unsigned ind) {
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "jemalloc/internal/extent_inlines.h"
|
||||
#include "jemalloc/internal/base_inlines.h"
|
||||
#include "jemalloc/internal/bitmap_inlines.h"
|
||||
/*
|
||||
* Include portions of arena code interleaved with tcache code in order to
|
||||
|
@ -191,8 +191,9 @@ extent_snad_comp
|
||||
extent_split_wrapper
|
||||
extent_state_get
|
||||
extent_state_set
|
||||
extent_szind_get
|
||||
extent_szind_set
|
||||
extent_usize_get
|
||||
extent_usize_set
|
||||
extent_zeroed_get
|
||||
extent_zeroed_set
|
||||
extents_alloc
|
||||
|
@ -15,7 +15,7 @@ void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||
szind_t binind, bool slow_path);
|
||||
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||
size_t size, bool slow_path);
|
||||
szind_t binind, bool slow_path);
|
||||
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
||||
#endif
|
||||
|
||||
@ -271,19 +271,16 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
bool slow_path) {
|
||||
szind_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
||||
|
||||
binind = size2index(size);
|
||||
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||
large_dalloc_junk(ptr, size);
|
||||
large_dalloc_junk(ptr, index2size(binind));
|
||||
}
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
|
34
src/arena.c
34
src/arena.c
@ -449,25 +449,25 @@ arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
|
||||
extent_t *
|
||||
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool *zero) {
|
||||
extent_t *extent;
|
||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
szind_t szind = size2index(usize);
|
||||
size_t mapped_add;
|
||||
bool commit = true;
|
||||
extent = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_dirty, NULL, usize, large_pad, alignment, zero,
|
||||
&commit, false);
|
||||
extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_dirty, NULL, usize, large_pad, alignment, false,
|
||||
szind, zero, &commit);
|
||||
if (extent == NULL) {
|
||||
extent = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_muzzy, NULL, usize, large_pad, alignment,
|
||||
zero, &commit, false);
|
||||
false, szind, zero, &commit);
|
||||
}
|
||||
size_t size = usize + large_pad;
|
||||
if (extent == NULL) {
|
||||
extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
|
||||
usize, large_pad, alignment, zero, &commit, false);
|
||||
usize, large_pad, alignment, false, szind, zero, &commit);
|
||||
if (config_stats) {
|
||||
/*
|
||||
* extent may be NULL on OOM, but in that case
|
||||
@ -1133,7 +1133,8 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
|
||||
|
||||
static extent_t *
|
||||
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info) {
|
||||
extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info,
|
||||
szind_t szind) {
|
||||
extent_t *slab;
|
||||
bool zero, commit;
|
||||
|
||||
@ -1142,7 +1143,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
zero = false;
|
||||
commit = true;
|
||||
slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
|
||||
bin_info->slab_size, 0, PAGE, &zero, &commit, true);
|
||||
bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
|
||||
|
||||
if (config_stats && slab != NULL) {
|
||||
arena_stats_mapped_add(tsdn, &arena->stats,
|
||||
@ -1158,19 +1159,20 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||
szind_t szind = size2index(bin_info->reg_size);
|
||||
bool zero = false;
|
||||
bool commit = true;
|
||||
extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, &zero,
|
||||
&commit, true);
|
||||
&arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
|
||||
binind, &zero, &commit);
|
||||
if (slab == NULL) {
|
||||
slab = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
|
||||
&zero, &commit, true);
|
||||
true, binind, &zero, &commit);
|
||||
}
|
||||
if (slab == NULL) {
|
||||
slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
|
||||
bin_info);
|
||||
bin_info, szind);
|
||||
if (slab == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -1467,7 +1469,7 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
|
||||
assert(usize <= SMALL_MAXCLASS);
|
||||
|
||||
extent_usize_set(extent, usize);
|
||||
extent_szind_set(extent, size2index(usize));
|
||||
|
||||
prof_accum_cancel(tsdn, &arena->prof_accum, usize);
|
||||
|
||||
@ -1479,7 +1481,7 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
extent_usize_set(extent, LARGE_MINCLASS);
|
||||
extent_szind_set(extent, NBINS);
|
||||
|
||||
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
|
||||
|
||||
@ -1496,8 +1498,8 @@ arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
||||
|
||||
usize = arena_prof_demote(tsdn, extent, ptr);
|
||||
if (usize <= tcache_maxclass) {
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, usize,
|
||||
slow_path);
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
||||
size2index(usize), slow_path);
|
||||
} else {
|
||||
large_dalloc(tsdn, extent);
|
||||
}
|
||||
|
@ -87,8 +87,8 @@ base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
|
||||
sn = *extent_sn_next;
|
||||
(*extent_sn_next)++;
|
||||
|
||||
extent_init(extent, NULL, addr, size, 0, sn, extent_state_active, true,
|
||||
true, false);
|
||||
extent_init(extent, NULL, addr, size, false, NSIZES, sn,
|
||||
extent_state_active, true, true);
|
||||
}
|
||||
|
||||
static void *
|
||||
@ -104,8 +104,9 @@ base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
|
||||
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
|
||||
assert(extent_size_get(extent) >= *gap_size + size);
|
||||
extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
|
||||
*gap_size + size), extent_size_get(extent) - *gap_size - size, 0,
|
||||
extent_sn_get(extent), extent_state_active, true, true, false);
|
||||
*gap_size + size), extent_size_get(extent) - *gap_size - size,
|
||||
false, NSIZES, extent_sn_get(extent), extent_state_active, true,
|
||||
true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
187
src/extent.c
187
src/extent.c
@ -71,8 +71,8 @@ static size_t highpages;
|
||||
static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
|
||||
static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
|
||||
size_t usize, size_t pad, size_t alignment, bool *zero, bool *commit,
|
||||
bool slab);
|
||||
size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
|
||||
bool *zero, bool *commit);
|
||||
static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
||||
extent_t *extent, bool *coalesced);
|
||||
@ -299,14 +299,14 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
extent_t *
|
||||
extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
extents_t *extents, void *new_addr, size_t usize, size_t pad,
|
||||
size_t alignment, bool *zero, bool *commit, bool slab) {
|
||||
assert(usize + pad != 0);
|
||||
extents_t *extents, void *new_addr, size_t size, size_t pad,
|
||||
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
||||
assert(size + pad != 0);
|
||||
assert(alignment != 0);
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr,
|
||||
usize, pad, alignment, zero, commit, slab);
|
||||
size, pad, alignment, slab, szind, zero, commit);
|
||||
}
|
||||
|
||||
void
|
||||
@ -615,8 +615,8 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent) {
|
||||
static extent_t *
|
||||
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
||||
bool locked, void *new_addr, size_t usize, size_t pad, size_t alignment,
|
||||
bool *zero, bool *commit) {
|
||||
bool locked, void *new_addr, size_t size, size_t pad, size_t alignment,
|
||||
bool slab, bool *zero, bool *commit) {
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, locked ? 1 : 0);
|
||||
if (locked) {
|
||||
malloc_mutex_assert_owner(tsdn, &extents->mtx);
|
||||
@ -639,10 +639,10 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
|
||||
assert(alignment <= PAGE);
|
||||
}
|
||||
|
||||
size_t size = usize + pad;
|
||||
size_t alloc_size = size + PAGE_CEILING(alignment) - PAGE;
|
||||
size_t esize = size + pad;
|
||||
size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < usize) {
|
||||
if (alloc_size < esize) {
|
||||
return NULL;
|
||||
}
|
||||
if (!locked) {
|
||||
@ -661,7 +661,7 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
|
||||
if (extent != NULL) {
|
||||
assert(extent_base_get(extent) == new_addr);
|
||||
if (extent_arena_get(extent) != arena ||
|
||||
extent_size_get(extent) < size ||
|
||||
extent_size_get(extent) < esize ||
|
||||
extent_state_get(extent) !=
|
||||
extents_state_get(extents)) {
|
||||
extent = NULL;
|
||||
@ -700,21 +700,20 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
|
||||
static extent_t *
|
||||
extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
||||
void *new_addr, size_t usize, size_t pad, size_t alignment,
|
||||
extent_t *extent) {
|
||||
size_t size = usize + pad;
|
||||
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
||||
szind_t szind, extent_t *extent) {
|
||||
size_t esize = size + pad;
|
||||
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
|
||||
PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
|
||||
assert(new_addr == NULL || leadsize == 0);
|
||||
assert(extent_size_get(extent) >= leadsize + size);
|
||||
size_t trailsize = extent_size_get(extent) - leadsize - size;
|
||||
assert(extent_size_get(extent) >= leadsize + esize);
|
||||
size_t trailsize = extent_size_get(extent) - leadsize - esize;
|
||||
|
||||
/* Split the lead. */
|
||||
if (leadsize != 0) {
|
||||
extent_t *lead = extent;
|
||||
extent = extent_split_wrapper(tsdn, arena, r_extent_hooks,
|
||||
lead, leadsize, leadsize, size + trailsize, usize +
|
||||
trailsize);
|
||||
lead, leadsize, NSIZES, esize + trailsize, szind);
|
||||
if (extent == NULL) {
|
||||
extent_deregister(tsdn, lead);
|
||||
extents_leak(tsdn, arena, r_extent_hooks, extents,
|
||||
@ -727,7 +726,7 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
|
||||
/* Split the trail. */
|
||||
if (trailsize != 0) {
|
||||
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
||||
r_extent_hooks, extent, size, usize, trailsize, trailsize);
|
||||
r_extent_hooks, extent, esize, szind, trailsize, NSIZES);
|
||||
if (trail == NULL) {
|
||||
extent_deregister(tsdn, extent);
|
||||
extents_leak(tsdn, arena, r_extent_hooks, extents,
|
||||
@ -737,10 +736,10 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_deactivate(tsdn, arena, extents, trail, false);
|
||||
} else if (leadsize == 0) {
|
||||
/*
|
||||
* Splitting causes usize to be set as a side effect, but no
|
||||
* Splitting causes szind to be set as a side effect, but no
|
||||
* splitting occurred.
|
||||
*/
|
||||
extent_usize_set(extent, usize);
|
||||
extent_szind_set(extent, szind);
|
||||
}
|
||||
|
||||
return extent;
|
||||
@ -748,24 +747,25 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
static extent_t *
|
||||
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
extents_t *extents, void *new_addr, size_t usize, size_t pad,
|
||||
size_t alignment, bool *zero, bool *commit, bool slab) {
|
||||
extents_t *extents, void *new_addr, size_t size, size_t pad,
|
||||
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
assert(new_addr == NULL || !slab);
|
||||
assert(pad == 0 || !slab);
|
||||
assert(!*zero || !slab);
|
||||
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
|
||||
extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
|
||||
rtree_ctx, extents, false, new_addr, usize, pad, alignment, zero,
|
||||
commit);
|
||||
rtree_ctx, extents, false, new_addr, size, pad, alignment, slab,
|
||||
zero, commit);
|
||||
if (extent == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
|
||||
extents, new_addr, usize, pad, alignment, extent);
|
||||
extents, new_addr, size, pad, alignment, slab, szind, extent);
|
||||
if (extent == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -790,18 +790,15 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
}
|
||||
|
||||
if (*zero) {
|
||||
void *addr = extent_base_get(extent);
|
||||
size_t size = extent_size_get(extent);
|
||||
if (!extent_zeroed_get(extent)) {
|
||||
if (pages_purge_forced(extent_base_get(extent),
|
||||
extent_size_get(extent))) {
|
||||
memset(extent_addr_get(extent), 0,
|
||||
extent_usize_get(extent));
|
||||
if (pages_purge_forced(addr, size)) {
|
||||
memset(addr, 0, size);
|
||||
}
|
||||
} else if (config_debug) {
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)
|
||||
extent_addr_get(extent);
|
||||
|
||||
for (i = 0; i < usize / sizeof(size_t); i++) {
|
||||
size_t *p = (size_t *)(uintptr_t)addr;
|
||||
for (size_t i = 0; i < size / sizeof(size_t); i++) {
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
}
|
||||
@ -882,12 +879,10 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
|
||||
*/
|
||||
static extent_t *
|
||||
extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
||||
size_t alignment, bool *zero, bool *commit, bool slab) {
|
||||
extent_t *extent;
|
||||
void *ptr;
|
||||
size_t size, alloc_size, alloc_size_min, leadsize, trailsize;
|
||||
bool zeroed, committed;
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
||||
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
||||
assert(pad == 0 || !slab);
|
||||
assert(!*zero || !slab);
|
||||
|
||||
/*
|
||||
* Check whether the next extent size in the series would be large
|
||||
@ -895,37 +890,37 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
* series of unsatisfiable allocation requests doesn't cause unused
|
||||
* extent creation as a side effect.
|
||||
*/
|
||||
size = usize + pad;
|
||||
alloc_size = pind2sz(atomic_read_u(&arena->extent_grow_next));
|
||||
alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
|
||||
size_t esize = size + pad;
|
||||
size_t alloc_size = pind2sz(atomic_read_u(&arena->extent_grow_next));
|
||||
size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size_min < usize) {
|
||||
if (alloc_size_min < esize) {
|
||||
return NULL;
|
||||
}
|
||||
if (alloc_size < alloc_size_min) {
|
||||
return NULL;
|
||||
}
|
||||
extent = extent_alloc(tsdn, arena);
|
||||
extent_t *extent = extent_alloc(tsdn, arena);
|
||||
if (extent == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
zeroed = false;
|
||||
committed = false;
|
||||
ptr = extent_alloc_core(tsdn, arena, new_addr, alloc_size, PAGE,
|
||||
bool zeroed = false;
|
||||
bool committed = false;
|
||||
void *ptr = extent_alloc_core(tsdn, arena, new_addr, alloc_size, PAGE,
|
||||
&zeroed, &committed, arena->dss_prec);
|
||||
extent_init(extent, arena, ptr, alloc_size, alloc_size,
|
||||
extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
|
||||
arena_extent_sn_next(arena), extent_state_active, zeroed,
|
||||
committed, false);
|
||||
committed);
|
||||
if (ptr == NULL || extent_register_no_gdump_add(tsdn, extent)) {
|
||||
extent_dalloc(tsdn, arena, extent);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)ptr, PAGE_CEILING(alignment)) -
|
||||
(uintptr_t)ptr;
|
||||
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr,
|
||||
PAGE_CEILING(alignment)) - (uintptr_t)ptr;
|
||||
assert(new_addr == NULL || leadsize == 0);
|
||||
assert(alloc_size >= leadsize + size);
|
||||
trailsize = alloc_size - leadsize - size;
|
||||
assert(alloc_size >= leadsize + esize);
|
||||
size_t trailsize = alloc_size - leadsize - esize;
|
||||
if (extent_zeroed_get(extent)) {
|
||||
*zero = true;
|
||||
}
|
||||
@ -937,7 +932,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
if (leadsize != 0) {
|
||||
extent_t *lead = extent;
|
||||
extent = extent_split_wrapper(tsdn, arena, r_extent_hooks, lead,
|
||||
leadsize, leadsize, size + trailsize, usize + trailsize);
|
||||
leadsize, NSIZES, esize + trailsize, szind);
|
||||
if (extent == NULL) {
|
||||
extent_deregister(tsdn, lead);
|
||||
extents_leak(tsdn, arena, r_extent_hooks, false, lead);
|
||||
@ -950,7 +945,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
/* Split the trail. */
|
||||
if (trailsize != 0) {
|
||||
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
||||
r_extent_hooks, extent, size, usize, trailsize, trailsize);
|
||||
r_extent_hooks, extent, esize, szind, trailsize, NSIZES);
|
||||
if (trail == NULL) {
|
||||
extent_deregister(tsdn, extent);
|
||||
extents_leak(tsdn, arena, r_extent_hooks,
|
||||
@ -961,10 +956,10 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
&arena->extents_retained, trail);
|
||||
} else if (leadsize == 0) {
|
||||
/*
|
||||
* Splitting causes usize to be set as a side effect, but no
|
||||
* Splitting causes szind to be set as a side effect, but no
|
||||
* splitting occurred.
|
||||
*/
|
||||
extent_usize_set(extent, usize);
|
||||
extent_szind_set(extent, szind);
|
||||
}
|
||||
|
||||
if (*commit && !extent_committed_get(extent)) {
|
||||
@ -993,10 +988,10 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_interior_register(tsdn, rtree_ctx, extent);
|
||||
}
|
||||
if (*zero && !extent_zeroed_get(extent)) {
|
||||
if (pages_purge_forced(extent_base_get(extent),
|
||||
extent_size_get(extent))) {
|
||||
memset(extent_addr_get(extent), 0,
|
||||
extent_usize_get(extent));
|
||||
void *addr = extent_base_get(extent);
|
||||
size_t size = extent_size_get(extent);
|
||||
if (pages_purge_forced(addr, size)) {
|
||||
memset(addr, 0, size);
|
||||
}
|
||||
}
|
||||
/*
|
||||
@ -1019,16 +1014,16 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
static extent_t *
|
||||
extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
||||
size_t alignment, bool *zero, bool *commit, bool slab) {
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
||||
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
||||
extent_t *extent;
|
||||
|
||||
assert(usize != 0);
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
|
||||
extent = extent_recycle(tsdn, arena, r_extent_hooks,
|
||||
&arena->extents_retained, new_addr, usize, pad, alignment, zero,
|
||||
commit, slab);
|
||||
&arena->extents_retained, new_addr, size, pad, alignment, slab,
|
||||
szind, zero, commit);
|
||||
if (extent != NULL) {
|
||||
if (config_prof) {
|
||||
extent_gdump_add(tsdn, extent);
|
||||
@ -1036,7 +1031,7 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
if (!config_munmap && extent == NULL) {
|
||||
extent = extent_grow_retained(tsdn, arena, r_extent_hooks,
|
||||
new_addr, usize, pad, alignment, zero, commit, slab);
|
||||
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
||||
}
|
||||
|
||||
return extent;
|
||||
@ -1044,32 +1039,28 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
static extent_t *
|
||||
extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
||||
size_t alignment, bool *zero, bool *commit, bool slab) {
|
||||
extent_t *extent;
|
||||
size_t size;
|
||||
void *addr;
|
||||
|
||||
size = usize + pad;
|
||||
extent = extent_alloc(tsdn, arena);
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
||||
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
||||
size_t esize = size + pad;
|
||||
extent_t *extent = extent_alloc(tsdn, arena);
|
||||
if (extent == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
void *addr;
|
||||
if (*r_extent_hooks == &extent_hooks_default) {
|
||||
/* Call directly to propagate tsdn. */
|
||||
addr = extent_alloc_default_impl(tsdn, arena, new_addr, size,
|
||||
addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
|
||||
alignment, zero, commit);
|
||||
} else {
|
||||
addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, size,
|
||||
alignment, zero, commit, arena_ind_get(arena));
|
||||
addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
|
||||
esize, alignment, zero, commit, arena_ind_get(arena));
|
||||
}
|
||||
if (addr == NULL) {
|
||||
extent_dalloc(tsdn, arena, extent);
|
||||
return NULL;
|
||||
}
|
||||
extent_init(extent, arena, addr, size, usize,
|
||||
arena_extent_sn_next(arena), extent_state_active, zero, commit,
|
||||
slab);
|
||||
extent_init(extent, arena, addr, esize, slab, szind,
|
||||
arena_extent_sn_next(arena), extent_state_active, zero, commit);
|
||||
if (pad != 0) {
|
||||
extent_addr_randomize(tsdn, extent, alignment);
|
||||
}
|
||||
@ -1084,17 +1075,17 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
extent_t *
|
||||
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
||||
size_t alignment, bool *zero, bool *commit, bool slab) {
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
||||
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
||||
|
||||
extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
|
||||
new_addr, usize, pad, alignment, zero, commit, slab);
|
||||
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
||||
if (extent == NULL) {
|
||||
extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
|
||||
new_addr, usize, pad, alignment, zero, commit, slab);
|
||||
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
||||
}
|
||||
|
||||
return extent;
|
||||
@ -1232,7 +1223,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
malloc_mutex_lock(tsdn, &extents->mtx);
|
||||
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
||||
|
||||
extent_usize_set(extent, 0);
|
||||
extent_szind_set(extent, NSIZES);
|
||||
if (extent_slab_get(extent)) {
|
||||
extent_interior_deregister(tsdn, rtree_ctx, extent);
|
||||
extent_slab_set(extent, false);
|
||||
@ -1474,7 +1465,7 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
extent_t *
|
||||
extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
|
||||
size_t usize_a, size_t size_b, size_t usize_b) {
|
||||
szind_t szind_a, size_t size_b, szind_t szind_b) {
|
||||
assert(extent_size_get(extent) == size_a + size_b);
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
@ -1498,9 +1489,9 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t lead;
|
||||
|
||||
extent_init(&lead, arena, extent_addr_get(extent), size_a,
|
||||
usize_a, extent_sn_get(extent), extent_state_get(extent),
|
||||
extent_zeroed_get(extent), extent_committed_get(extent),
|
||||
extent_slab_get(extent));
|
||||
extent_slab_get(extent), szind_a, extent_sn_get(extent),
|
||||
extent_state_get(extent), extent_zeroed_get(extent),
|
||||
extent_committed_get(extent));
|
||||
|
||||
if (extent_rtree_acquire(tsdn, rtree_ctx, &lead, false, true,
|
||||
&lead_elm_a, &lead_elm_b)) {
|
||||
@ -1509,9 +1500,9 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
||||
size_a), size_b, usize_b, extent_sn_get(extent),
|
||||
extent_state_get(extent), extent_zeroed_get(extent),
|
||||
extent_committed_get(extent), extent_slab_get(extent));
|
||||
size_a), size_b, extent_slab_get(extent), szind_b,
|
||||
extent_sn_get(extent), extent_state_get(extent),
|
||||
extent_zeroed_get(extent), extent_committed_get(extent));
|
||||
if (extent_rtree_acquire(tsdn, rtree_ctx, trail, false, true,
|
||||
&trail_elm_a, &trail_elm_b)) {
|
||||
goto label_error_c;
|
||||
@ -1524,7 +1515,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
extent_size_set(extent, size_a);
|
||||
extent_usize_set(extent, usize_a);
|
||||
extent_szind_set(extent, szind_a);
|
||||
|
||||
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
|
||||
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
|
||||
@ -1617,7 +1608,7 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
|
||||
extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b));
|
||||
extent_szind_set(a, NSIZES);
|
||||
extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
|
||||
extent_sn_get(a) : extent_sn_get(b));
|
||||
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
|
||||
|
@ -139,9 +139,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
(uintptr_t)gap_addr_page;
|
||||
if (gap_size_page != 0) {
|
||||
extent_init(gap, arena, gap_addr_page,
|
||||
gap_size_page, gap_size_page,
|
||||
gap_size_page, false, NSIZES,
|
||||
arena_extent_sn_next(arena),
|
||||
extent_state_active, false, true, false);
|
||||
extent_state_active, false, true);
|
||||
}
|
||||
/*
|
||||
* Compute the address just past the end of the desired
|
||||
@ -189,8 +189,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
extent_t extent;
|
||||
|
||||
extent_init(&extent, arena, ret, size,
|
||||
size, 0, extent_state_active, false,
|
||||
true, false);
|
||||
size, false, NSIZES,
|
||||
extent_state_active, false, true);
|
||||
if (extent_purge_forced_wrapper(tsdn,
|
||||
arena, &extent_hooks, &extent, 0,
|
||||
size)) {
|
||||
|
49
src/large.c
49
src/large.c
@ -114,15 +114,15 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
|
||||
/* Split excess pages. */
|
||||
if (diff != 0) {
|
||||
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
||||
&extent_hooks, extent, usize + large_pad, usize, diff,
|
||||
diff);
|
||||
&extent_hooks, extent, usize + large_pad, size2index(usize),
|
||||
diff, NSIZES);
|
||||
if (trail == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
large_dalloc_maybe_junk(extent_addr_get(trail),
|
||||
extent_usize_get(trail));
|
||||
extent_size_get(trail));
|
||||
}
|
||||
|
||||
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
|
||||
@ -139,7 +139,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||
size_t trailsize = usize - extent_usize_get(extent);
|
||||
size_t trailsize = usize - oldusize;
|
||||
|
||||
if (extent_hooks->merge == NULL) {
|
||||
return true;
|
||||
@ -160,17 +160,17 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||
bool new_mapping;
|
||||
if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_dirty, extent_past_get(extent), trailsize, 0,
|
||||
CACHELINE, &is_zeroed_trail, &commit, false)) != NULL
|
||||
CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL
|
||||
|| (trail = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
|
||||
CACHELINE, &is_zeroed_trail, &commit, false)) != NULL) {
|
||||
CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) {
|
||||
if (config_stats) {
|
||||
new_mapping = false;
|
||||
}
|
||||
} else {
|
||||
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
|
||||
extent_past_get(extent), trailsize, 0, CACHELINE,
|
||||
&is_zeroed_trail, &commit, false)) == NULL) {
|
||||
extent_past_get(extent), trailsize, 0, CACHELINE, false,
|
||||
NSIZES, &is_zeroed_trail, &commit)) == NULL) {
|
||||
return true;
|
||||
}
|
||||
if (config_stats) {
|
||||
@ -182,6 +182,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
|
||||
return true;
|
||||
}
|
||||
extent_szind_set(extent, size2index(usize));
|
||||
|
||||
if (config_stats && new_mapping) {
|
||||
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
|
||||
@ -218,14 +219,14 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||
bool
|
||||
large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
size_t usize_max, bool zero) {
|
||||
assert(s2u(extent_usize_get(extent)) == extent_usize_get(extent));
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
|
||||
/* Both allocation sizes must be large to avoid a move. */
|
||||
assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize_max >=
|
||||
LARGE_MINCLASS);
|
||||
assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS);
|
||||
|
||||
if (usize_max > extent_usize_get(extent)) {
|
||||
if (usize_max > oldusize) {
|
||||
/* Attempt to expand the allocation in-place. */
|
||||
if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
|
||||
zero)) {
|
||||
@ -233,8 +234,7 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
return false;
|
||||
}
|
||||
/* Try again, this time with usize_min. */
|
||||
if (usize_min < usize_max && usize_min >
|
||||
extent_usize_get(extent) &&
|
||||
if (usize_min < usize_max && usize_min > oldusize &&
|
||||
large_ralloc_no_move_expand(tsdn, extent, usize_min,
|
||||
zero)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
@ -246,14 +246,13 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
* Avoid moving the allocation if the existing extent size accommodates
|
||||
* the new size.
|
||||
*/
|
||||
if (extent_usize_get(extent) >= usize_min && extent_usize_get(extent) <=
|
||||
usize_max) {
|
||||
if (oldusize >= usize_min && oldusize <= usize_max) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Attempt to shrink the allocation in-place. */
|
||||
if (extent_usize_get(extent) > usize_max) {
|
||||
if (oldusize > usize_max) {
|
||||
if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return false;
|
||||
@ -274,14 +273,12 @@ large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
void *
|
||||
large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache) {
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize > 0 && usize <= LARGE_MAXCLASS);
|
||||
/* Both allocation sizes must be large to avoid a move. */
|
||||
assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize >=
|
||||
LARGE_MINCLASS);
|
||||
assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS);
|
||||
|
||||
/* Try to avoid moving the allocation. */
|
||||
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
|
||||
@ -293,16 +290,16 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
||||
* different size class. In that case, fall back to allocating new
|
||||
* space and copying.
|
||||
*/
|
||||
ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero);
|
||||
void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
|
||||
zero);
|
||||
if (ret == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
copysize = (usize < extent_usize_get(extent)) ? usize :
|
||||
extent_usize_get(extent);
|
||||
size_t copysize = (usize < oldusize) ? usize : oldusize;
|
||||
memcpy(ret, extent_addr_get(extent), copysize);
|
||||
isdalloct(tsdn, extent, extent_addr_get(extent),
|
||||
extent_usize_get(extent), tcache, true);
|
||||
isdalloct(tsdn, extent, extent_addr_get(extent), oldusize, tcache,
|
||||
true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -8,8 +8,8 @@ TEST_BEGIN(test_arena_slab_regind) {
|
||||
extent_t slab;
|
||||
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
extent_init(&slab, NULL, mallocx(bin_info->slab_size,
|
||||
MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, 0, 0,
|
||||
extent_state_active, false, true, true);
|
||||
MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true,
|
||||
binind, 0, extent_state_active, false, true);
|
||||
assert_ptr_not_null(extent_addr_get(&slab),
|
||||
"Unexpected malloc() failure");
|
||||
for (regind = 0; regind < bin_info->nregs; regind++) {
|
||||
|
Loading…
Reference in New Issue
Block a user