Rename extent_t to edata_t.
This frees us up from the unfortunate extent/extent2 naming collision.
This commit is contained in:
parent
865debda22
commit
a7862df616
@ -28,18 +28,18 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
|
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
|
||||||
arena_stats_extents_t *estats);
|
arena_stats_extents_t *estats);
|
||||||
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
ehooks_t *ehooks, extent_t *extent);
|
ehooks_t *ehooks, edata_t *edata);
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
|
size_t arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr);
|
||||||
#endif
|
#endif
|
||||||
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
||||||
size_t usize, size_t alignment, bool *zero);
|
size_t usize, size_t alignment, bool *zero);
|
||||||
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_t *extent);
|
edata_t *edata);
|
||||||
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_t *extent, size_t oldsize);
|
edata_t *edata, size_t oldsize);
|
||||||
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_t *extent, size_t oldsize);
|
edata_t *edata, size_t oldsize);
|
||||||
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
|
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
|
||||||
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
|
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
|
||||||
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
|
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
|
||||||
@ -64,7 +64,7 @@ void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
|
|||||||
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||||
bool slow_path);
|
bool slow_path);
|
||||||
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
||||||
szind_t binind, extent_t *extent, void *ptr);
|
szind_t binind, edata_t *edata, void *ptr);
|
||||||
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
|
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
|
||||||
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||||
size_t extra, bool zero, size_t *newsize);
|
size_t extra, bool zero, size_t *newsize);
|
||||||
|
@ -9,8 +9,8 @@
|
|||||||
#include "jemalloc/internal/ticker.h"
|
#include "jemalloc/internal/ticker.h"
|
||||||
|
|
||||||
static inline arena_t *
|
static inline arena_t *
|
||||||
arena_get_from_extent(extent_t *extent) {
|
arena_get_from_edata(edata_t *edata) {
|
||||||
return (arena_t *)atomic_load_p(&arenas[extent_arena_ind_get(extent)],
|
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
|
||||||
ATOMIC_RELAXED);
|
ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,20 +42,20 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, alloc_ctx_t *alloc_ctx,
|
|||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(prof_info != NULL);
|
assert(prof_info != NULL);
|
||||||
|
|
||||||
const extent_t *extent;
|
const edata_t *edata;
|
||||||
bool is_slab;
|
bool is_slab;
|
||||||
|
|
||||||
/* Static check. */
|
/* Static check. */
|
||||||
if (alloc_ctx == NULL) {
|
if (alloc_ctx == NULL) {
|
||||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
edata = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
is_slab = extent_slab_get(extent);
|
is_slab = edata_slab_get(edata);
|
||||||
} else if (!unlikely(is_slab = alloc_ctx->slab)) {
|
} else if (!unlikely(is_slab = alloc_ctx->slab)) {
|
||||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
edata = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!is_slab)) {
|
if (unlikely(!is_slab)) {
|
||||||
/* extent must have been initialized at this point. */
|
/* edata must have been initialized at this point. */
|
||||||
large_prof_info_get(extent, prof_info);
|
large_prof_info_get(edata, prof_info);
|
||||||
} else {
|
} else {
|
||||||
memset(prof_info, 0, sizeof(prof_info_t));
|
memset(prof_info, 0, sizeof(prof_info_t));
|
||||||
prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
|
prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
|
||||||
@ -69,9 +69,9 @@ arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
|||||||
|
|
||||||
/* Static check. */
|
/* Static check. */
|
||||||
if (alloc_ctx == NULL) {
|
if (alloc_ctx == NULL) {
|
||||||
extent_t *extent = iealloc(tsd_tsdn(tsd), ptr);
|
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
if (unlikely(!extent_slab_get(extent))) {
|
if (unlikely(!edata_slab_get(edata))) {
|
||||||
large_prof_tctx_reset(extent);
|
large_prof_tctx_reset(edata);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (unlikely(!alloc_ctx->slab)) {
|
if (unlikely(!alloc_ctx->slab)) {
|
||||||
@ -85,10 +85,10 @@ arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
|
|||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
extent_t *extent = iealloc(tsd_tsdn(tsd), ptr);
|
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
assert(!extent_slab_get(extent));
|
assert(!edata_slab_get(edata));
|
||||||
|
|
||||||
large_prof_tctx_reset(extent);
|
large_prof_tctx_reset(edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
@ -96,9 +96,9 @@ arena_prof_info_set(tsd_t *tsd, const void *ptr, prof_tctx_t *tctx) {
|
|||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
extent_t *extent = iealloc(tsd_tsdn(tsd), ptr);
|
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
assert(!extent_slab_get(extent));
|
assert(!edata_slab_get(edata));
|
||||||
large_prof_info_set(extent, tctx);
|
large_prof_info_set(edata, tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
@ -130,9 +130,9 @@ arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
/* Purge a single extent to retained / unmapped directly. */
|
/* Purge a single extent to retained / unmapped directly. */
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
arena_decay_extent(tsdn_t *tsdn,arena_t *arena, ehooks_t *ehooks,
|
arena_decay_extent(tsdn_t *tsdn,arena_t *arena, ehooks_t *ehooks,
|
||||||
extent_t *extent) {
|
edata_t *edata) {
|
||||||
size_t extent_size = extent_size_get(extent);
|
size_t extent_size = edata_size_get(edata);
|
||||||
extent_dalloc_wrapper(tsdn, arena, ehooks, extent);
|
extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
/* Update stats accordingly. */
|
/* Update stats accordingly. */
|
||||||
arena_stats_lock(tsdn, &arena->stats);
|
arena_stats_lock(tsdn, &arena->stats);
|
||||||
@ -169,7 +169,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||||
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
return (arena_t *)atomic_load_p(&arenas[extent_arena_ind_get(
|
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(
|
||||||
iealloc(tsdn, ptr))], ATOMIC_RELAXED);
|
iealloc(tsdn, ptr))], ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,19 +201,19 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
rtree_ctx_t rtree_ctx_fallback;
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
extent_t *extent;
|
edata_t *edata;
|
||||||
szind_t szind;
|
szind_t szind;
|
||||||
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
if (rtree_edata_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, false, &extent, &szind)) {
|
(uintptr_t)ptr, false, &edata, &szind)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extent == NULL) {
|
if (edata == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
assert(extent_state_get(extent) == extent_state_active);
|
assert(edata_state_get(edata) == extent_state_active);
|
||||||
/* Only slab members should be looked up via interior pointers. */
|
/* Only slab members should be looked up via interior pointers. */
|
||||||
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
assert(edata_addr_get(edata) == ptr || edata_slab_get(edata));
|
||||||
|
|
||||||
assert(szind != SC_NSIZES);
|
assert(szind != SC_NSIZES);
|
||||||
|
|
||||||
@ -225,8 +225,8 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
|
|||||||
if (config_prof && unlikely(szind < SC_NBINS)) {
|
if (config_prof && unlikely(szind < SC_NBINS)) {
|
||||||
arena_dalloc_promoted(tsdn, ptr, NULL, true);
|
arena_dalloc_promoted(tsdn, ptr, NULL, true);
|
||||||
} else {
|
} else {
|
||||||
extent_t *extent = iealloc(tsdn, ptr);
|
edata_t *edata = iealloc(tsdn, ptr);
|
||||||
large_dalloc(tsdn, extent);
|
large_dalloc(tsdn, edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -243,11 +243,11 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
|||||||
true, &szind, &slab);
|
true, &szind, &slab);
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
|
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree,
|
||||||
rtree_ctx, (uintptr_t)ptr, true);
|
rtree_ctx, (uintptr_t)ptr, true);
|
||||||
assert(szind == extent_szind_get(extent));
|
assert(szind == edata_szind_get(edata));
|
||||||
assert(szind < SC_NSIZES);
|
assert(szind < SC_NSIZES);
|
||||||
assert(slab == extent_slab_get(extent));
|
assert(slab == edata_slab_get(edata));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(slab)) {
|
if (likely(slab)) {
|
||||||
@ -269,8 +269,8 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
|
|||||||
slow_path);
|
slow_path);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
extent_t *extent = iealloc(tsdn, ptr);
|
edata_t *edata = iealloc(tsdn, ptr);
|
||||||
large_dalloc(tsdn, extent);
|
large_dalloc(tsdn, edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,11 +300,11 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||||
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
|
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree,
|
||||||
rtree_ctx, (uintptr_t)ptr, true);
|
rtree_ctx, (uintptr_t)ptr, true);
|
||||||
assert(szind == extent_szind_get(extent));
|
assert(szind == edata_szind_get(edata));
|
||||||
assert(szind < SC_NSIZES);
|
assert(szind < SC_NSIZES);
|
||||||
assert(slab == extent_slab_get(extent));
|
assert(slab == edata_slab_get(edata));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(slab)) {
|
if (likely(slab)) {
|
||||||
@ -344,10 +344,10 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
|||||||
assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
|
assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
extent_t *extent = rtree_extent_read(tsdn,
|
edata_t *edata = rtree_edata_read(tsdn,
|
||||||
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
||||||
assert(szind == extent_szind_get(extent));
|
assert(szind == edata_szind_get(edata));
|
||||||
assert(slab == extent_slab_get(extent));
|
assert(slab == edata_slab_get(edata));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -401,10 +401,10 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
|||||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||||
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, true, &szind, &slab);
|
(uintptr_t)ptr, true, &szind, &slab);
|
||||||
extent_t *extent = rtree_extent_read(tsdn,
|
edata_t *edata = rtree_edata_read(tsdn,
|
||||||
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
||||||
assert(szind == extent_szind_get(extent));
|
assert(szind == edata_szind_get(edata));
|
||||||
assert(slab == extent_slab_get(extent));
|
assert(slab == edata_slab_get(edata));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(slab)) {
|
if (likely(slab)) {
|
||||||
|
@ -94,8 +94,8 @@ struct arena_stats_s {
|
|||||||
*/
|
*/
|
||||||
atomic_zu_t retained; /* Derived. */
|
atomic_zu_t retained; /* Derived. */
|
||||||
|
|
||||||
/* Number of extent_t structs allocated by base, but not being used. */
|
/* Number of edata_t structs allocated by base, but not being used. */
|
||||||
atomic_zu_t extent_avail;
|
atomic_zu_t edata_avail;
|
||||||
|
|
||||||
arena_stats_decay_t decay_dirty;
|
arena_stats_decay_t decay_dirty;
|
||||||
arena_stats_decay_t decay_muzzy;
|
arena_stats_decay_t decay_muzzy;
|
||||||
|
@ -144,7 +144,7 @@ struct arena_s {
|
|||||||
*
|
*
|
||||||
* Synchronization: large_mtx.
|
* Synchronization: large_mtx.
|
||||||
*/
|
*/
|
||||||
extent_list_t large;
|
edata_list_t large;
|
||||||
/* Synchronizes all large allocation/update/deallocation. */
|
/* Synchronizes all large allocation/update/deallocation. */
|
||||||
malloc_mutex_t large_mtx;
|
malloc_mutex_t large_mtx;
|
||||||
|
|
||||||
@ -185,14 +185,14 @@ struct arena_s {
|
|||||||
malloc_mutex_t extent_grow_mtx;
|
malloc_mutex_t extent_grow_mtx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Available extent structures that were allocated via
|
* Available edata structures that were allocated via
|
||||||
* base_alloc_extent().
|
* base_alloc_edata().
|
||||||
*
|
*
|
||||||
* Synchronization: extent_avail_mtx.
|
* Synchronization: edata_avail_mtx.
|
||||||
*/
|
*/
|
||||||
extent_tree_t extent_avail;
|
edata_tree_t edata_avail;
|
||||||
atomic_zu_t extent_avail_cnt;
|
atomic_zu_t edata_avail_cnt;
|
||||||
malloc_mutex_t extent_avail_mtx;
|
malloc_mutex_t edata_avail_mtx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* bins is used to store heaps of free regions.
|
* bins is used to store heaps of free regions.
|
||||||
|
@ -11,7 +11,7 @@ ehooks_t *base_ehooks_get(base_t *base);
|
|||||||
extent_hooks_t *base_extent_hooks_set(base_t *base,
|
extent_hooks_t *base_extent_hooks_set(base_t *base,
|
||||||
extent_hooks_t *extent_hooks);
|
extent_hooks_t *extent_hooks);
|
||||||
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
|
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
|
||||||
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
|
edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
|
||||||
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
|
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
|
||||||
size_t *resident, size_t *mapped, size_t *n_thp);
|
size_t *resident, size_t *mapped, size_t *n_thp);
|
||||||
void base_prefork(tsdn_t *tsdn, base_t *base);
|
void base_prefork(tsdn_t *tsdn, base_t *base);
|
||||||
|
@ -16,7 +16,7 @@ struct base_block_s {
|
|||||||
base_block_t *next;
|
base_block_t *next;
|
||||||
|
|
||||||
/* Tracks unused trailing space. */
|
/* Tracks unused trailing space. */
|
||||||
extent_t extent;
|
edata_t edata;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct base_s {
|
struct base_s {
|
||||||
@ -47,7 +47,7 @@ struct base_s {
|
|||||||
base_block_t *blocks;
|
base_block_t *blocks;
|
||||||
|
|
||||||
/* Heap of extents that track unused trailing space within blocks. */
|
/* Heap of extents that track unused trailing space within blocks. */
|
||||||
extent_heap_t avail[SC_NSIZES];
|
edata_heap_t avail[SC_NSIZES];
|
||||||
|
|
||||||
/* Stats, only maintained if config_stats. */
|
/* Stats, only maintained if config_stats. */
|
||||||
size_t allocated;
|
size_t allocated;
|
||||||
|
@ -22,17 +22,17 @@ struct bin_s {
|
|||||||
* slabcur is reassigned, the previous slab must be deallocated or
|
* slabcur is reassigned, the previous slab must be deallocated or
|
||||||
* inserted into slabs_{nonfull,full}.
|
* inserted into slabs_{nonfull,full}.
|
||||||
*/
|
*/
|
||||||
extent_t *slabcur;
|
edata_t *slabcur;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Heap of non-full slabs. This heap is used to assure that new
|
* Heap of non-full slabs. This heap is used to assure that new
|
||||||
* allocations come from the non-full slab that is oldest/lowest in
|
* allocations come from the non-full slab that is oldest/lowest in
|
||||||
* memory.
|
* memory.
|
||||||
*/
|
*/
|
||||||
extent_heap_t slabs_nonfull;
|
edata_heap_t slabs_nonfull;
|
||||||
|
|
||||||
/* List used to track full slabs. */
|
/* List used to track full slabs. */
|
||||||
extent_list_t slabs_full;
|
edata_list_t slabs_full;
|
||||||
|
|
||||||
/* Bin statistics. */
|
/* Bin statistics. */
|
||||||
bin_stats_t stats;
|
bin_stats_t stats;
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
#include "jemalloc/internal/sc.h"
|
#include "jemalloc/internal/sc.h"
|
||||||
|
|
||||||
#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
|
#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
|
||||||
#define N_BIN_SHARDS_DEFAULT 1
|
#define N_BIN_SHARDS_DEFAULT 1
|
||||||
|
|
||||||
/* Used in TSD static initializer only. Real init in arena_bind(). */
|
/* Used in TSD static initializer only. Real init in arena_bind(). */
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_EXTENT_H
|
#ifndef JEMALLOC_INTERNAL_EDATA_H
|
||||||
#define JEMALLOC_INTERNAL_EXTENT_H
|
#define JEMALLOC_INTERNAL_EDATA_H
|
||||||
|
|
||||||
#include "jemalloc/internal/atomic.h"
|
#include "jemalloc/internal/atomic.h"
|
||||||
#include "jemalloc/internal/bin_info.h"
|
#include "jemalloc/internal/bin_info.h"
|
||||||
@ -26,11 +26,11 @@ enum extent_head_state_e {
|
|||||||
typedef enum extent_head_state_e extent_head_state_t;
|
typedef enum extent_head_state_e extent_head_state_t;
|
||||||
|
|
||||||
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
||||||
typedef struct extent_s extent_t;
|
typedef struct edata_s edata_t;
|
||||||
typedef ql_head(extent_t) extent_list_t;
|
typedef ql_head(edata_t) edata_list_t;
|
||||||
typedef ph(extent_t) extent_tree_t;
|
typedef ph(edata_t) edata_tree_t;
|
||||||
typedef ph(extent_t) extent_heap_t;
|
typedef ph(edata_t) edata_heap_t;
|
||||||
struct extent_s {
|
struct edata_s {
|
||||||
/*
|
/*
|
||||||
* Bitfield containing several fields:
|
* Bitfield containing several fields:
|
||||||
*
|
*
|
||||||
@ -105,48 +105,48 @@ struct extent_s {
|
|||||||
uint64_t e_bits;
|
uint64_t e_bits;
|
||||||
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
|
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
|
||||||
|
|
||||||
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
|
#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
|
||||||
#define EXTENT_BITS_ARENA_SHIFT 0
|
#define EDATA_BITS_ARENA_SHIFT 0
|
||||||
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
|
#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
|
||||||
|
|
||||||
#define EXTENT_BITS_SLAB_WIDTH 1
|
#define EDATA_BITS_SLAB_WIDTH 1
|
||||||
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
|
#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
|
||||||
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
|
#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
|
||||||
|
|
||||||
#define EXTENT_BITS_COMMITTED_WIDTH 1
|
#define EDATA_BITS_COMMITTED_WIDTH 1
|
||||||
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
|
#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
|
||||||
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
|
#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
|
||||||
|
|
||||||
#define EXTENT_BITS_DUMPABLE_WIDTH 1
|
#define EDATA_BITS_DUMPABLE_WIDTH 1
|
||||||
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
|
#define EDATA_BITS_DUMPABLE_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
|
||||||
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
|
#define EDATA_BITS_DUMPABLE_MASK MASK(EDATA_BITS_DUMPABLE_WIDTH, EDATA_BITS_DUMPABLE_SHIFT)
|
||||||
|
|
||||||
#define EXTENT_BITS_ZEROED_WIDTH 1
|
#define EDATA_BITS_ZEROED_WIDTH 1
|
||||||
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
|
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_DUMPABLE_WIDTH + EDATA_BITS_DUMPABLE_SHIFT)
|
||||||
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
|
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
|
||||||
|
|
||||||
#define EXTENT_BITS_STATE_WIDTH 2
|
#define EDATA_BITS_STATE_WIDTH 2
|
||||||
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
|
#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
|
||||||
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
|
#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
|
||||||
|
|
||||||
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
|
#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
|
||||||
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
|
#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
|
||||||
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
|
#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
|
||||||
|
|
||||||
#define EXTENT_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
|
#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
|
||||||
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
|
#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
|
||||||
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
|
#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
|
||||||
|
|
||||||
#define EXTENT_BITS_BINSHARD_WIDTH 6
|
#define EDATA_BITS_BINSHARD_WIDTH 6
|
||||||
#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
|
#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
|
||||||
#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
|
#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
|
||||||
|
|
||||||
#define EXTENT_BITS_IS_HEAD_WIDTH 1
|
#define EDATA_BITS_IS_HEAD_WIDTH 1
|
||||||
#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
|
#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
|
||||||
#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
|
#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
|
||||||
|
|
||||||
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
|
#define EDATA_BITS_SN_SHIFT (EDATA_BITS_IS_HEAD_WIDTH + EDATA_BITS_IS_HEAD_SHIFT)
|
||||||
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
|
#define EDATA_BITS_SN_MASK (UINT64_MAX << EDATA_BITS_SN_SHIFT)
|
||||||
|
|
||||||
/* Pointer to the extent that this structure is responsible for. */
|
/* Pointer to the extent that this structure is responsible for. */
|
||||||
void *e_addr;
|
void *e_addr;
|
||||||
@ -160,8 +160,8 @@ struct extent_s {
|
|||||||
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
|
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
|
||||||
*/
|
*/
|
||||||
size_t e_size_esn;
|
size_t e_size_esn;
|
||||||
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
|
#define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
|
||||||
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
|
#define EDATA_ESN_MASK ((size_t)PAGE-1)
|
||||||
/* Base extent size, which may not be a multiple of PAGE. */
|
/* Base extent size, which may not be a multiple of PAGE. */
|
||||||
size_t e_bsize;
|
size_t e_bsize;
|
||||||
};
|
};
|
||||||
@ -173,13 +173,13 @@ struct extent_s {
|
|||||||
* - stashed dirty extents
|
* - stashed dirty extents
|
||||||
* - arena's large allocations
|
* - arena's large allocations
|
||||||
*/
|
*/
|
||||||
ql_elm(extent_t) ql_link;
|
ql_elm(edata_t) ql_link;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Linkage for per size class sn/address-ordered heaps, and
|
* Linkage for per size class sn/address-ordered heaps, and
|
||||||
* for extent_avail
|
* for extent_avail
|
||||||
*/
|
*/
|
||||||
phn(extent_t) ph_link;
|
phn(edata_t) ph_link;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
/* Small region slab metadata. */
|
/* Small region slab metadata. */
|
||||||
@ -196,398 +196,397 @@ struct extent_s {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static inline unsigned
|
static inline unsigned
|
||||||
extent_arena_ind_get(const extent_t *extent) {
|
edata_arena_ind_get(const edata_t *edata) {
|
||||||
unsigned arena_ind = (unsigned)((extent->e_bits &
|
unsigned arena_ind = (unsigned)((edata->e_bits &
|
||||||
EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
|
EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
|
||||||
assert(arena_ind < MALLOCX_ARENA_LIMIT);
|
assert(arena_ind < MALLOCX_ARENA_LIMIT);
|
||||||
|
|
||||||
return arena_ind;
|
return arena_ind;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline szind_t
|
static inline szind_t
|
||||||
extent_szind_get_maybe_invalid(const extent_t *extent) {
|
edata_szind_get_maybe_invalid(const edata_t *edata) {
|
||||||
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
|
szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
|
||||||
EXTENT_BITS_SZIND_SHIFT);
|
EDATA_BITS_SZIND_SHIFT);
|
||||||
assert(szind <= SC_NSIZES);
|
assert(szind <= SC_NSIZES);
|
||||||
return szind;
|
return szind;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline szind_t
|
static inline szind_t
|
||||||
extent_szind_get(const extent_t *extent) {
|
edata_szind_get(const edata_t *edata) {
|
||||||
szind_t szind = extent_szind_get_maybe_invalid(extent);
|
szind_t szind = edata_szind_get_maybe_invalid(edata);
|
||||||
assert(szind < SC_NSIZES); /* Never call when "invalid". */
|
assert(szind < SC_NSIZES); /* Never call when "invalid". */
|
||||||
return szind;
|
return szind;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
extent_usize_get(const extent_t *extent) {
|
edata_usize_get(const edata_t *edata) {
|
||||||
return sz_index2size(extent_szind_get(extent));
|
return sz_index2size(edata_szind_get(edata));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned
|
static inline unsigned
|
||||||
extent_binshard_get(const extent_t *extent) {
|
edata_binshard_get(const edata_t *edata) {
|
||||||
unsigned binshard = (unsigned)((extent->e_bits &
|
unsigned binshard = (unsigned)((edata->e_bits &
|
||||||
EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT);
|
EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
|
||||||
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
|
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
|
||||||
return binshard;
|
return binshard;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
extent_sn_get(const extent_t *extent) {
|
edata_sn_get(const edata_t *edata) {
|
||||||
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
|
return (size_t)((edata->e_bits & EDATA_BITS_SN_MASK) >>
|
||||||
EXTENT_BITS_SN_SHIFT);
|
EDATA_BITS_SN_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline extent_state_t
|
static inline extent_state_t
|
||||||
extent_state_get(const extent_t *extent) {
|
edata_state_get(const edata_t *edata) {
|
||||||
return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
|
return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
|
||||||
EXTENT_BITS_STATE_SHIFT);
|
EDATA_BITS_STATE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
extent_zeroed_get(const extent_t *extent) {
|
edata_zeroed_get(const edata_t *edata) {
|
||||||
return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
|
return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
|
||||||
EXTENT_BITS_ZEROED_SHIFT);
|
EDATA_BITS_ZEROED_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
extent_committed_get(const extent_t *extent) {
|
edata_committed_get(const edata_t *edata) {
|
||||||
return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
|
return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
|
||||||
EXTENT_BITS_COMMITTED_SHIFT);
|
EDATA_BITS_COMMITTED_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
extent_dumpable_get(const extent_t *extent) {
|
edata_dumpable_get(const edata_t *edata) {
|
||||||
return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
|
return (bool)((edata->e_bits & EDATA_BITS_DUMPABLE_MASK) >>
|
||||||
EXTENT_BITS_DUMPABLE_SHIFT);
|
EDATA_BITS_DUMPABLE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
extent_slab_get(const extent_t *extent) {
|
edata_slab_get(const edata_t *edata) {
|
||||||
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
|
return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
|
||||||
EXTENT_BITS_SLAB_SHIFT);
|
EDATA_BITS_SLAB_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned
|
static inline unsigned
|
||||||
extent_nfree_get(const extent_t *extent) {
|
edata_nfree_get(const edata_t *edata) {
|
||||||
assert(extent_slab_get(extent));
|
assert(edata_slab_get(edata));
|
||||||
return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
|
return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
|
||||||
EXTENT_BITS_NFREE_SHIFT);
|
EDATA_BITS_NFREE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *
|
static inline void *
|
||||||
extent_base_get(const extent_t *extent) {
|
edata_base_get(const edata_t *edata) {
|
||||||
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
|
||||||
!extent_slab_get(extent));
|
!edata_slab_get(edata));
|
||||||
return PAGE_ADDR2BASE(extent->e_addr);
|
return PAGE_ADDR2BASE(edata->e_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *
|
static inline void *
|
||||||
extent_addr_get(const extent_t *extent) {
|
edata_addr_get(const edata_t *edata) {
|
||||||
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
|
||||||
!extent_slab_get(extent));
|
!edata_slab_get(edata));
|
||||||
return extent->e_addr;
|
return edata->e_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
extent_size_get(const extent_t *extent) {
|
edata_size_get(const edata_t *edata) {
|
||||||
return (extent->e_size_esn & EXTENT_SIZE_MASK);
|
return (edata->e_size_esn & EDATA_SIZE_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
extent_esn_get(const extent_t *extent) {
|
edata_esn_get(const edata_t *edata) {
|
||||||
return (extent->e_size_esn & EXTENT_ESN_MASK);
|
return (edata->e_size_esn & EDATA_ESN_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
extent_bsize_get(const extent_t *extent) {
|
edata_bsize_get(const edata_t *edata) {
|
||||||
return extent->e_bsize;
|
return edata->e_bsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *
|
static inline void *
|
||||||
extent_before_get(const extent_t *extent) {
|
edata_before_get(const edata_t *edata) {
|
||||||
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
|
return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *
|
static inline void *
|
||||||
extent_last_get(const extent_t *extent) {
|
edata_last_get(const edata_t *edata) {
|
||||||
return (void *)((uintptr_t)extent_base_get(extent) +
|
return (void *)((uintptr_t)edata_base_get(edata) +
|
||||||
extent_size_get(extent) - PAGE);
|
edata_size_get(edata) - PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *
|
static inline void *
|
||||||
extent_past_get(const extent_t *extent) {
|
edata_past_get(const edata_t *edata) {
|
||||||
return (void *)((uintptr_t)extent_base_get(extent) +
|
return (void *)((uintptr_t)edata_base_get(edata) +
|
||||||
extent_size_get(extent));
|
edata_size_get(edata));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline slab_data_t *
|
static inline slab_data_t *
|
||||||
extent_slab_data_get(extent_t *extent) {
|
edata_slab_data_get(edata_t *edata) {
|
||||||
assert(extent_slab_get(extent));
|
assert(edata_slab_get(edata));
|
||||||
return &extent->e_slab_data;
|
return &edata->e_slab_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline const slab_data_t *
|
static inline const slab_data_t *
|
||||||
extent_slab_data_get_const(const extent_t *extent) {
|
edata_slab_data_get_const(const edata_t *edata) {
|
||||||
assert(extent_slab_get(extent));
|
assert(edata_slab_get(edata));
|
||||||
return &extent->e_slab_data;
|
return &edata->e_slab_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_prof_info_get(const extent_t *extent, prof_info_t *prof_info) {
|
edata_prof_info_get(const edata_t *edata, prof_info_t *prof_info) {
|
||||||
assert(prof_info != NULL);
|
assert(prof_info != NULL);
|
||||||
prof_info->alloc_tctx = (prof_tctx_t *)atomic_load_p(
|
prof_info->alloc_tctx = (prof_tctx_t *)atomic_load_p(
|
||||||
&extent->e_prof_tctx, ATOMIC_ACQUIRE);
|
&edata->e_prof_tctx, ATOMIC_ACQUIRE);
|
||||||
prof_info->alloc_time = extent->e_alloc_time;
|
prof_info->alloc_time = edata->e_alloc_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_arena_ind_set(extent_t *extent, unsigned arena_ind) {
|
edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
|
||||||
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
|
((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_binshard_set(extent_t *extent, unsigned binshard) {
|
edata_binshard_set(edata_t *edata, unsigned binshard) {
|
||||||
/* The assertion assumes szind is set already. */
|
/* The assertion assumes szind is set already. */
|
||||||
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
|
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
|
||||||
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT);
|
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_addr_set(extent_t *extent, void *addr) {
|
edata_addr_set(edata_t *edata, void *addr) {
|
||||||
extent->e_addr = addr;
|
edata->e_addr = addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_size_set(extent_t *extent, size_t size) {
|
edata_size_set(edata_t *edata, size_t size) {
|
||||||
assert((size & ~EXTENT_SIZE_MASK) == 0);
|
assert((size & ~EDATA_SIZE_MASK) == 0);
|
||||||
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
|
edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_esn_set(extent_t *extent, size_t esn) {
|
edata_esn_set(edata_t *edata, size_t esn) {
|
||||||
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
|
edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
|
||||||
EXTENT_ESN_MASK);
|
EDATA_ESN_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_bsize_set(extent_t *extent, size_t bsize) {
|
edata_bsize_set(edata_t *edata, size_t bsize) {
|
||||||
extent->e_bsize = bsize;
|
edata->e_bsize = bsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_szind_set(extent_t *extent, szind_t szind) {
|
edata_szind_set(edata_t *edata, szind_t szind) {
|
||||||
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
|
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
|
||||||
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
|
((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_nfree_set(extent_t *extent, unsigned nfree) {
|
edata_nfree_set(edata_t *edata, unsigned nfree) {
|
||||||
assert(extent_slab_get(extent));
|
assert(edata_slab_get(edata));
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
|
||||||
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
|
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) {
|
edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
|
||||||
/* The assertion assumes szind is set already. */
|
/* The assertion assumes szind is set already. */
|
||||||
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
|
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
|
||||||
extent->e_bits = (extent->e_bits &
|
edata->e_bits = (edata->e_bits &
|
||||||
(~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) |
|
(~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
|
||||||
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) |
|
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
|
||||||
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
|
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_nfree_inc(extent_t *extent) {
|
edata_nfree_inc(edata_t *edata) {
|
||||||
assert(extent_slab_get(extent));
|
assert(edata_slab_get(edata));
|
||||||
extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_nfree_dec(extent_t *extent) {
|
edata_nfree_dec(edata_t *edata) {
|
||||||
assert(extent_slab_get(extent));
|
assert(edata_slab_get(edata));
|
||||||
extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_nfree_sub(extent_t *extent, uint64_t n) {
|
edata_nfree_sub(edata_t *edata, uint64_t n) {
|
||||||
assert(extent_slab_get(extent));
|
assert(edata_slab_get(edata));
|
||||||
extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT);
|
edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_sn_set(extent_t *extent, size_t sn) {
|
edata_sn_set(edata_t *edata, size_t sn) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SN_MASK) |
|
||||||
((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
|
((uint64_t)sn << EDATA_BITS_SN_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_state_set(extent_t *extent, extent_state_t state) {
|
edata_state_set(edata_t *edata, extent_state_t state) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
|
||||||
((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
|
((uint64_t)state << EDATA_BITS_STATE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_zeroed_set(extent_t *extent, bool zeroed) {
|
edata_zeroed_set(edata_t *edata, bool zeroed) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
|
||||||
((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
|
((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_committed_set(extent_t *extent, bool committed) {
|
edata_committed_set(edata_t *edata, bool committed) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
|
||||||
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
|
((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_dumpable_set(extent_t *extent, bool dumpable) {
|
edata_dumpable_set(edata_t *edata, bool dumpable) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_DUMPABLE_MASK) |
|
||||||
((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
|
((uint64_t)dumpable << EDATA_BITS_DUMPABLE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_slab_set(extent_t *extent, bool slab) {
|
edata_slab_set(edata_t *edata, bool slab) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
|
||||||
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
|
((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
|
||||||
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
|
atomic_store_p(&edata->e_prof_tctx, tctx, ATOMIC_RELEASE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_prof_alloc_time_set(extent_t *extent, nstime_t *t) {
|
edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
|
||||||
nstime_copy(&extent->e_alloc_time, t);
|
nstime_copy(&edata->e_alloc_time, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
extent_is_head_get(extent_t *extent) {
|
edata_is_head_get(edata_t *edata) {
|
||||||
if (maps_coalesce) {
|
if (maps_coalesce) {
|
||||||
not_reached();
|
not_reached();
|
||||||
}
|
}
|
||||||
|
|
||||||
return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >>
|
return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
|
||||||
EXTENT_BITS_IS_HEAD_SHIFT);
|
EDATA_BITS_IS_HEAD_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_is_head_set(extent_t *extent, bool is_head) {
|
edata_is_head_set(edata_t *edata, bool is_head) {
|
||||||
if (maps_coalesce) {
|
if (maps_coalesce) {
|
||||||
not_reached();
|
not_reached();
|
||||||
}
|
}
|
||||||
|
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
|
||||||
((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT);
|
((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_init(extent_t *extent, unsigned arena_ind, void *addr, size_t size,
|
edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
|
||||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||||
bool committed, bool dumpable, extent_head_state_t is_head) {
|
bool committed, bool dumpable, extent_head_state_t is_head) {
|
||||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||||
|
|
||||||
extent_arena_ind_set(extent, arena_ind);
|
edata_arena_ind_set(edata, arena_ind);
|
||||||
extent_addr_set(extent, addr);
|
edata_addr_set(edata, addr);
|
||||||
extent_size_set(extent, size);
|
edata_size_set(edata, size);
|
||||||
extent_slab_set(extent, slab);
|
edata_slab_set(edata, slab);
|
||||||
extent_szind_set(extent, szind);
|
edata_szind_set(edata, szind);
|
||||||
extent_sn_set(extent, sn);
|
edata_sn_set(edata, sn);
|
||||||
extent_state_set(extent, state);
|
edata_state_set(edata, state);
|
||||||
extent_zeroed_set(extent, zeroed);
|
edata_zeroed_set(edata, zeroed);
|
||||||
extent_committed_set(extent, committed);
|
edata_committed_set(edata, committed);
|
||||||
extent_dumpable_set(extent, dumpable);
|
edata_dumpable_set(edata, dumpable);
|
||||||
ql_elm_new(extent, ql_link);
|
ql_elm_new(edata, ql_link);
|
||||||
if (!maps_coalesce) {
|
if (!maps_coalesce) {
|
||||||
extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true :
|
edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
|
||||||
false);
|
|
||||||
}
|
}
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
extent_prof_tctx_set(extent, NULL);
|
edata_prof_tctx_set(edata, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
|
edata_binit(edata_t *edata, void *addr, size_t bsize, size_t sn) {
|
||||||
extent_arena_ind_set(extent, (1U << MALLOCX_ARENA_BITS) - 1);
|
edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
|
||||||
extent_addr_set(extent, addr);
|
edata_addr_set(edata, addr);
|
||||||
extent_bsize_set(extent, bsize);
|
edata_bsize_set(edata, bsize);
|
||||||
extent_slab_set(extent, false);
|
edata_slab_set(edata, false);
|
||||||
extent_szind_set(extent, SC_NSIZES);
|
edata_szind_set(edata, SC_NSIZES);
|
||||||
extent_sn_set(extent, sn);
|
edata_sn_set(edata, sn);
|
||||||
extent_state_set(extent, extent_state_active);
|
edata_state_set(edata, extent_state_active);
|
||||||
extent_zeroed_set(extent, true);
|
edata_zeroed_set(edata, true);
|
||||||
extent_committed_set(extent, true);
|
edata_committed_set(edata, true);
|
||||||
extent_dumpable_set(extent, true);
|
edata_dumpable_set(edata, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_list_init(extent_list_t *list) {
|
edata_list_init(edata_list_t *list) {
|
||||||
ql_new(list);
|
ql_new(list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline extent_t *
|
static inline edata_t *
|
||||||
extent_list_first(const extent_list_t *list) {
|
edata_list_first(const edata_list_t *list) {
|
||||||
return ql_first(list);
|
return ql_first(list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline extent_t *
|
static inline edata_t *
|
||||||
extent_list_last(const extent_list_t *list) {
|
edata_list_last(const edata_list_t *list) {
|
||||||
return ql_last(list, ql_link);
|
return ql_last(list, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_list_append(extent_list_t *list, extent_t *extent) {
|
edata_list_append(edata_list_t *list, edata_t *edata) {
|
||||||
ql_tail_insert(list, extent, ql_link);
|
ql_tail_insert(list, edata, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_list_prepend(extent_list_t *list, extent_t *extent) {
|
edata_list_prepend(edata_list_t *list, edata_t *edata) {
|
||||||
ql_head_insert(list, extent, ql_link);
|
ql_head_insert(list, edata, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_list_replace(extent_list_t *list, extent_t *to_remove,
|
edata_list_replace(edata_list_t *list, edata_t *to_remove,
|
||||||
extent_t *to_insert) {
|
edata_t *to_insert) {
|
||||||
ql_after_insert(to_remove, to_insert, ql_link);
|
ql_after_insert(to_remove, to_insert, ql_link);
|
||||||
ql_remove(list, to_remove, ql_link);
|
ql_remove(list, to_remove, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_list_remove(extent_list_t *list, extent_t *extent) {
|
edata_list_remove(edata_list_t *list, edata_t *edata) {
|
||||||
ql_remove(list, extent, ql_link);
|
ql_remove(list, edata, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
extent_sn_comp(const extent_t *a, const extent_t *b) {
|
edata_sn_comp(const edata_t *a, const edata_t *b) {
|
||||||
size_t a_sn = extent_sn_get(a);
|
size_t a_sn = edata_sn_get(a);
|
||||||
size_t b_sn = extent_sn_get(b);
|
size_t b_sn = edata_sn_get(b);
|
||||||
|
|
||||||
return (a_sn > b_sn) - (a_sn < b_sn);
|
return (a_sn > b_sn) - (a_sn < b_sn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
extent_esn_comp(const extent_t *a, const extent_t *b) {
|
edata_esn_comp(const edata_t *a, const edata_t *b) {
|
||||||
size_t a_esn = extent_esn_get(a);
|
size_t a_esn = edata_esn_get(a);
|
||||||
size_t b_esn = extent_esn_get(b);
|
size_t b_esn = edata_esn_get(b);
|
||||||
|
|
||||||
return (a_esn > b_esn) - (a_esn < b_esn);
|
return (a_esn > b_esn) - (a_esn < b_esn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
extent_ad_comp(const extent_t *a, const extent_t *b) {
|
edata_ad_comp(const edata_t *a, const edata_t *b) {
|
||||||
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
uintptr_t a_addr = (uintptr_t)edata_addr_get(a);
|
||||||
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
uintptr_t b_addr = (uintptr_t)edata_addr_get(b);
|
||||||
|
|
||||||
return (a_addr > b_addr) - (a_addr < b_addr);
|
return (a_addr > b_addr) - (a_addr < b_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
extent_ead_comp(const extent_t *a, const extent_t *b) {
|
edata_ead_comp(const edata_t *a, const edata_t *b) {
|
||||||
uintptr_t a_eaddr = (uintptr_t)a;
|
uintptr_t a_eaddr = (uintptr_t)a;
|
||||||
uintptr_t b_eaddr = (uintptr_t)b;
|
uintptr_t b_eaddr = (uintptr_t)b;
|
||||||
|
|
||||||
@ -595,32 +594,32 @@ extent_ead_comp(const extent_t *a, const extent_t *b) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
extent_snad_comp(const extent_t *a, const extent_t *b) {
|
edata_snad_comp(const edata_t *a, const edata_t *b) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = extent_sn_comp(a, b);
|
ret = edata_sn_comp(a, b);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = extent_ad_comp(a, b);
|
ret = edata_ad_comp(a, b);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
extent_esnead_comp(const extent_t *a, const extent_t *b) {
|
edata_esnead_comp(const edata_t *a, const edata_t *b) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = extent_esn_comp(a, b);
|
ret = edata_esn_comp(a, b);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = extent_ead_comp(a, b);
|
ret = edata_ead_comp(a, b);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ph_proto(, extent_avail_, extent_tree_t, extent_t)
|
ph_proto(, edata_avail_, edata_tree_t, edata_t)
|
||||||
ph_proto(, extent_heap_, extent_heap_t, extent_t)
|
ph_proto(, edata_heap_, edata_heap_t, edata_t)
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_EXTENT_H */
|
#endif /* JEMALLOC_INTERNAL_EDATA_H */
|
||||||
|
@ -19,7 +19,7 @@ struct eset_s {
|
|||||||
*
|
*
|
||||||
* Synchronization: mtx.
|
* Synchronization: mtx.
|
||||||
*/
|
*/
|
||||||
extent_heap_t heaps[SC_NPSIZES + 1];
|
edata_heap_t heaps[SC_NPSIZES + 1];
|
||||||
atomic_zu_t nextents[SC_NPSIZES + 1];
|
atomic_zu_t nextents[SC_NPSIZES + 1];
|
||||||
atomic_zu_t nbytes[SC_NPSIZES + 1];
|
atomic_zu_t nbytes[SC_NPSIZES + 1];
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ struct eset_s {
|
|||||||
*
|
*
|
||||||
* Synchronization: mtx.
|
* Synchronization: mtx.
|
||||||
*/
|
*/
|
||||||
extent_list_t lru;
|
edata_list_t lru;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Page sum for all extents in heaps.
|
* Page sum for all extents in heaps.
|
||||||
@ -67,13 +67,13 @@ size_t eset_nextents_get(eset_t *eset, pszind_t ind);
|
|||||||
/* Get the sum total bytes of the extents in the given page size index. */
|
/* Get the sum total bytes of the extents in the given page size index. */
|
||||||
size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
|
size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
|
||||||
|
|
||||||
void eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent);
|
void eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata);
|
||||||
void eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent);
|
void eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata);
|
||||||
/*
|
/*
|
||||||
* Select an extent from this eset of the given size and alignment. Returns
|
* Select an extent from this eset of the given size and alignment. Returns
|
||||||
* null if no such item could be found.
|
* null if no such item could be found.
|
||||||
*/
|
*/
|
||||||
extent_t *eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize,
|
edata_t *eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize,
|
||||||
size_t alignment);
|
size_t alignment);
|
||||||
|
|
||||||
void eset_prefork(tsdn_t *tsdn, eset_t *eset);
|
void eset_prefork(tsdn_t *tsdn, eset_t *eset);
|
||||||
|
@ -26,38 +26,38 @@ extern size_t opt_lg_extent_max_active_fit;
|
|||||||
|
|
||||||
extern rtree_t extents_rtree;
|
extern rtree_t extents_rtree;
|
||||||
|
|
||||||
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
edata_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
||||||
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *edata);
|
||||||
|
|
||||||
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
edata_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
eset_t *eset, void *new_addr, size_t size, size_t pad, size_t alignment,
|
eset_t *eset, void *new_addr, size_t size, size_t pad, size_t alignment,
|
||||||
bool slab, szind_t szind, bool *zero, bool *commit);
|
bool slab, szind_t szind, bool *zero, bool *commit);
|
||||||
void extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
void extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
eset_t *eset, extent_t *extent);
|
eset_t *eset, edata_t *edata);
|
||||||
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
edata_t *extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
eset_t *eset, size_t npages_min);
|
eset_t *eset, size_t npages_min);
|
||||||
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
||||||
szind_t szind, bool *zero, bool *commit);
|
szind_t szind, bool *zero, bool *commit);
|
||||||
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, edata_t *edata);
|
||||||
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
extent_t *extent);
|
edata_t *edata);
|
||||||
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
extent_t *extent);
|
edata_t *edata);
|
||||||
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
extent_t *extent, size_t offset, size_t length);
|
edata_t *edata, size_t offset, size_t length);
|
||||||
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
extent_t *extent, size_t offset, size_t length);
|
edata_t *edata, size_t offset, size_t length);
|
||||||
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
extent_t *extent, size_t offset, size_t length);
|
edata_t *edata, size_t offset, size_t length);
|
||||||
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
extent_t *extent, size_t offset, size_t length);
|
edata_t *edata, size_t offset, size_t length);
|
||||||
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
edata_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a,
|
edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a,
|
||||||
size_t size_b, szind_t szind_b, bool slab_b);
|
size_t size_b, szind_t szind_b, bool slab_b);
|
||||||
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
extent_t *a, extent_t *b);
|
edata_t *a, edata_t *b);
|
||||||
bool extent_head_no_merge(extent_t *a, extent_t *b);
|
bool extent_head_no_merge(edata_t *a, edata_t *b);
|
||||||
|
|
||||||
bool extent_boot(void);
|
bool extent_boot(void);
|
||||||
|
|
||||||
|
@ -76,12 +76,12 @@ arena_is_auto(arena_t *arena) {
|
|||||||
return (arena_ind_get(arena) < manual_arena_base);
|
return (arena_ind_get(arena) < manual_arena_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
JEMALLOC_ALWAYS_INLINE edata_t *
|
||||||
iealloc(tsdn_t *tsdn, const void *ptr) {
|
iealloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
|
return rtree_edata_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, true);
|
(uintptr_t)ptr, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
|
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
|
||||||
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||||
bool zero);
|
bool zero);
|
||||||
bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
|
||||||
size_t usize_max, bool zero);
|
size_t usize_max, bool zero);
|
||||||
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
||||||
size_t alignment, bool zero, tcache_t *tcache,
|
size_t alignment, bool zero, tcache_t *tcache,
|
||||||
@ -18,12 +18,12 @@ extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
|
|||||||
typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
|
typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
|
||||||
extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
|
extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
|
||||||
|
|
||||||
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, edata_t *edata);
|
||||||
void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
|
void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
|
||||||
void large_dalloc(tsdn_t *tsdn, extent_t *extent);
|
void large_dalloc(tsdn_t *tsdn, edata_t *edata);
|
||||||
size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
|
size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
|
||||||
void large_prof_info_get(const extent_t *extent, prof_info_t *prof_info);
|
void large_prof_info_get(const edata_t *edata, prof_info_t *prof_info);
|
||||||
void large_prof_tctx_reset(extent_t *extent);
|
void large_prof_tctx_reset(edata_t *edata);
|
||||||
void large_prof_info_set(extent_t *extent, prof_tctx_t *tctx);
|
void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx);
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
|
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
|
||||||
|
@ -48,18 +48,18 @@ struct rtree_leaf_elm_s {
|
|||||||
/*
|
/*
|
||||||
* Single pointer-width field containing all three leaf element fields.
|
* Single pointer-width field containing all three leaf element fields.
|
||||||
* For example, on a 64-bit x64 system with 48 significant virtual
|
* For example, on a 64-bit x64 system with 48 significant virtual
|
||||||
* memory address bits, the index, extent, and slab fields are packed as
|
* memory address bits, the index, edata, and slab fields are packed as
|
||||||
* such:
|
* such:
|
||||||
*
|
*
|
||||||
* x: index
|
* x: index
|
||||||
* e: extent
|
* e: edata
|
||||||
* b: slab
|
* b: slab
|
||||||
*
|
*
|
||||||
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
|
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
|
||||||
*/
|
*/
|
||||||
atomic_p_t le_bits;
|
atomic_p_t le_bits;
|
||||||
#else
|
#else
|
||||||
atomic_p_t le_extent; /* (extent_t *) */
|
atomic_p_t le_edata; /* (edata_t *) */
|
||||||
atomic_u_t le_szind; /* (szind_t) */
|
atomic_u_t le_szind; /* (szind_t) */
|
||||||
atomic_b_t le_slab; /* (bool) */
|
atomic_b_t le_slab; /* (bool) */
|
||||||
#endif
|
#endif
|
||||||
@ -176,8 +176,8 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
|
|||||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
JEMALLOC_ALWAYS_INLINE edata_t *
|
||||||
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
|
rtree_leaf_elm_bits_edata_get(uintptr_t bits) {
|
||||||
# ifdef __aarch64__
|
# ifdef __aarch64__
|
||||||
/*
|
/*
|
||||||
* aarch64 doesn't sign extend the highest virtual address bit to set
|
* aarch64 doesn't sign extend the highest virtual address bit to set
|
||||||
@ -187,10 +187,10 @@ rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
|
|||||||
/* Mask off the slab bit. */
|
/* Mask off the slab bit. */
|
||||||
uintptr_t low_bit_mask = ~(uintptr_t)1;
|
uintptr_t low_bit_mask = ~(uintptr_t)1;
|
||||||
uintptr_t mask = high_bit_mask & low_bit_mask;
|
uintptr_t mask = high_bit_mask & low_bit_mask;
|
||||||
return (extent_t *)(bits & mask);
|
return (edata_t *)(bits & mask);
|
||||||
# else
|
# else
|
||||||
/* Restore sign-extended high bits, mask slab bit. */
|
/* Restore sign-extended high bits, mask slab bit. */
|
||||||
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
|
return (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
|
||||||
RTREE_NHIB) & ~((uintptr_t)0x1));
|
RTREE_NHIB) & ~((uintptr_t)0x1));
|
||||||
# endif
|
# endif
|
||||||
}
|
}
|
||||||
@ -207,16 +207,16 @@ rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
|
|||||||
|
|
||||||
# endif
|
# endif
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
JEMALLOC_ALWAYS_INLINE edata_t *
|
||||||
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
|
rtree_leaf_elm_edata_read(tsdn_t *tsdn, rtree_t *rtree,
|
||||||
rtree_leaf_elm_t *elm, bool dependent) {
|
rtree_leaf_elm_t *elm, bool dependent) {
|
||||||
#ifdef RTREE_LEAF_COMPACT
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||||
return rtree_leaf_elm_bits_extent_get(bits);
|
return rtree_leaf_elm_bits_edata_get(bits);
|
||||||
#else
|
#else
|
||||||
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
|
edata_t *edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
|
||||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||||
return extent;
|
return edata;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -245,16 +245,16 @@ rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
|
rtree_leaf_elm_edata_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||||
rtree_leaf_elm_t *elm, extent_t *extent) {
|
rtree_leaf_elm_t *elm, edata_t *edata) {
|
||||||
#ifdef RTREE_LEAF_COMPACT
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
|
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
|
||||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||||
LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
|
LG_VADDR) | ((uintptr_t)edata & (((uintptr_t)0x1 << LG_VADDR) - 1))
|
||||||
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||||
#else
|
#else
|
||||||
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
|
atomic_store_p(&elm->le_edata, edata, ATOMIC_RELEASE);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,7 +267,7 @@ rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
|
|||||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||||
true);
|
true);
|
||||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||||
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
((uintptr_t)rtree_leaf_elm_bits_edata_get(old_bits) &
|
||||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||||
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||||
@ -283,7 +283,7 @@ rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
|
|||||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||||
true);
|
true);
|
||||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||||
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_edata_get(old_bits) &
|
||||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
|
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
|
||||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||||
#else
|
#else
|
||||||
@ -293,20 +293,20 @@ rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
|
|||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
|
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||||
rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
|
rtree_leaf_elm_t *elm, edata_t *edata, szind_t szind, bool slab) {
|
||||||
#ifdef RTREE_LEAF_COMPACT
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||||
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
((uintptr_t)edata & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||||
((uintptr_t)slab);
|
((uintptr_t)slab);
|
||||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||||
#else
|
#else
|
||||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||||
/*
|
/*
|
||||||
* Write extent last, since the element is atomically considered valid
|
* Write edata last, since the element is atomically considered valid
|
||||||
* as soon as the extent field is non-NULL.
|
* as soon as the edata field is non-NULL.
|
||||||
*/
|
*/
|
||||||
rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
|
rtree_leaf_elm_edata_write(tsdn, rtree, elm, edata);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -317,7 +317,7 @@ rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* The caller implicitly assures that it is the only writer to the szind
|
* The caller implicitly assures that it is the only writer to the szind
|
||||||
* and slab fields, and that the extent field cannot currently change.
|
* and slab fields, and that the edata field cannot currently change.
|
||||||
*/
|
*/
|
||||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||||
@ -384,9 +384,9 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||||
extent_t *extent, szind_t szind, bool slab) {
|
edata_t *edata, szind_t szind, bool slab) {
|
||||||
/* Use rtree_clear() to set the extent to NULL. */
|
/* Use rtree_clear() to set the edata to NULL. */
|
||||||
assert(extent != NULL);
|
assert(edata != NULL);
|
||||||
|
|
||||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||||
key, false, true);
|
key, false, true);
|
||||||
@ -394,8 +394,8 @@ rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
|
assert(rtree_leaf_elm_edata_read(tsdn, rtree, elm, false) == NULL);
|
||||||
rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
|
rtree_leaf_elm_write(tsdn, rtree, elm, edata, szind, slab);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -412,15 +412,15 @@ rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
|||||||
return elm;
|
return elm;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
JEMALLOC_ALWAYS_INLINE edata_t *
|
||||||
rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
rtree_edata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
uintptr_t key, bool dependent) {
|
uintptr_t key, bool dependent) {
|
||||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||||
dependent);
|
dependent);
|
||||||
if (!dependent && elm == NULL) {
|
if (!dependent && elm == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
return rtree_leaf_elm_edata_read(tsdn, rtree, elm, dependent);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE szind_t
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
@ -440,14 +440,14 @@ rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
rtree_edata_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
|
uintptr_t key, bool dependent, edata_t **r_edata, szind_t *r_szind) {
|
||||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||||
dependent);
|
dependent);
|
||||||
if (!dependent && elm == NULL) {
|
if (!dependent && elm == NULL) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
*r_edata = rtree_leaf_elm_edata_read(tsdn, rtree, elm, dependent);
|
||||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -520,7 +520,7 @@ static inline void
|
|||||||
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
uintptr_t key) {
|
uintptr_t key) {
|
||||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
|
assert(rtree_leaf_elm_edata_read(tsdn, rtree, elm, false) !=
|
||||||
NULL);
|
NULL);
|
||||||
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
|
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@
|
|||||||
#define WITNESS_RANK_TCACHE_QL 13U
|
#define WITNESS_RANK_TCACHE_QL 13U
|
||||||
#define WITNESS_RANK_EXTENT_GROW 14U
|
#define WITNESS_RANK_EXTENT_GROW 14U
|
||||||
#define WITNESS_RANK_EXTENTS 15U
|
#define WITNESS_RANK_EXTENTS 15U
|
||||||
#define WITNESS_RANK_EXTENT_AVAIL 16U
|
#define WITNESS_RANK_EDATA_AVAIL 16U
|
||||||
|
|
||||||
#define WITNESS_RANK_EXTENT_POOL 17U
|
#define WITNESS_RANK_EXTENT_POOL 17U
|
||||||
#define WITNESS_RANK_RTREE 18U
|
#define WITNESS_RANK_RTREE 18U
|
||||||
|
296
src/arena.c
296
src/arena.c
@ -60,9 +60,9 @@ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
|
|||||||
size_t npages_decay_max, bool is_background_thread);
|
size_t npages_decay_max, bool is_background_thread);
|
||||||
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
|
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
|
||||||
bool is_background_thread, bool all);
|
bool is_background_thread, bool all);
|
||||||
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
|
||||||
bin_t *bin);
|
bin_t *bin);
|
||||||
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
|
||||||
bin_t *bin);
|
bin_t *bin);
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -102,8 +102,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
arena_stats_accum_zu(&astats->retained,
|
arena_stats_accum_zu(&astats->retained,
|
||||||
eset_npages_get(&arena->eset_retained) << LG_PAGE);
|
eset_npages_get(&arena->eset_retained) << LG_PAGE);
|
||||||
|
|
||||||
atomic_store_zu(&astats->extent_avail,
|
atomic_store_zu(&astats->edata_avail,
|
||||||
atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
|
atomic_load_zu(&arena->edata_avail_cnt, ATOMIC_RELAXED),
|
||||||
ATOMIC_RELAXED);
|
ATOMIC_RELAXED);
|
||||||
|
|
||||||
arena_stats_accum_u64(&astats->decay_dirty.npurge,
|
arena_stats_accum_u64(&astats->decay_dirty.npurge,
|
||||||
@ -224,7 +224,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
|
|
||||||
/* Gather per arena mutex profiling data. */
|
/* Gather per arena mutex profiling data. */
|
||||||
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
|
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
|
||||||
READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
|
READ_ARENA_MUTEX_PROF_DATA(edata_avail_mtx,
|
||||||
arena_prof_mutex_extent_avail)
|
arena_prof_mutex_extent_avail)
|
||||||
READ_ARENA_MUTEX_PROF_DATA(eset_dirty.mtx,
|
READ_ARENA_MUTEX_PROF_DATA(eset_dirty.mtx,
|
||||||
arena_prof_mutex_extents_dirty)
|
arena_prof_mutex_extents_dirty)
|
||||||
@ -254,11 +254,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
|
|
||||||
void
|
void
|
||||||
arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
extent_t *extent) {
|
edata_t *edata) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
extents_dalloc(tsdn, arena, ehooks, &arena->eset_dirty, extent);
|
extents_dalloc(tsdn, arena, ehooks, &arena->eset_dirty, edata);
|
||||||
if (arena_dirty_decay_ms_get(arena) == 0) {
|
if (arena_dirty_decay_ms_get(arena) == 0) {
|
||||||
arena_decay_dirty(tsdn, arena, false, true);
|
arena_decay_dirty(tsdn, arena, false, true);
|
||||||
} else {
|
} else {
|
||||||
@ -267,34 +267,34 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
|
arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
|
||||||
void *ret;
|
void *ret;
|
||||||
slab_data_t *slab_data = extent_slab_data_get(slab);
|
slab_data_t *slab_data = edata_slab_data_get(slab);
|
||||||
size_t regind;
|
size_t regind;
|
||||||
|
|
||||||
assert(extent_nfree_get(slab) > 0);
|
assert(edata_nfree_get(slab) > 0);
|
||||||
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
|
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
|
||||||
|
|
||||||
regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
|
regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
|
||||||
ret = (void *)((uintptr_t)extent_addr_get(slab) +
|
ret = (void *)((uintptr_t)edata_addr_get(slab) +
|
||||||
(uintptr_t)(bin_info->reg_size * regind));
|
(uintptr_t)(bin_info->reg_size * regind));
|
||||||
extent_nfree_dec(slab);
|
edata_nfree_dec(slab);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
|
arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
|
||||||
unsigned cnt, void** ptrs) {
|
unsigned cnt, void** ptrs) {
|
||||||
slab_data_t *slab_data = extent_slab_data_get(slab);
|
slab_data_t *slab_data = edata_slab_data_get(slab);
|
||||||
|
|
||||||
assert(extent_nfree_get(slab) >= cnt);
|
assert(edata_nfree_get(slab) >= cnt);
|
||||||
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
|
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
|
||||||
|
|
||||||
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
|
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
|
||||||
for (unsigned i = 0; i < cnt; i++) {
|
for (unsigned i = 0; i < cnt; i++) {
|
||||||
size_t regind = bitmap_sfu(slab_data->bitmap,
|
size_t regind = bitmap_sfu(slab_data->bitmap,
|
||||||
&bin_info->bitmap_info);
|
&bin_info->bitmap_info);
|
||||||
*(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
|
*(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) +
|
||||||
(uintptr_t)(bin_info->reg_size * regind));
|
(uintptr_t)(bin_info->reg_size * regind));
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -315,7 +315,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
|
|||||||
* Load from memory locations only once, outside the
|
* Load from memory locations only once, outside the
|
||||||
* hot loop below.
|
* hot loop below.
|
||||||
*/
|
*/
|
||||||
uintptr_t base = (uintptr_t)extent_addr_get(slab);
|
uintptr_t base = (uintptr_t)edata_addr_get(slab);
|
||||||
uintptr_t regsize = (uintptr_t)bin_info->reg_size;
|
uintptr_t regsize = (uintptr_t)bin_info->reg_size;
|
||||||
while (pop--) {
|
while (pop--) {
|
||||||
size_t bit = cfs_lu(&g);
|
size_t bit = cfs_lu(&g);
|
||||||
@ -327,24 +327,24 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
|
|||||||
slab_data->bitmap[group] = g;
|
slab_data->bitmap[group] = g;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
extent_nfree_sub(slab, cnt);
|
edata_nfree_sub(slab, cnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef JEMALLOC_JET
|
#ifndef JEMALLOC_JET
|
||||||
static
|
static
|
||||||
#endif
|
#endif
|
||||||
size_t
|
size_t
|
||||||
arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
|
arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr) {
|
||||||
size_t diff, regind;
|
size_t diff, regind;
|
||||||
|
|
||||||
/* Freeing a pointer outside the slab can cause assertion failure. */
|
/* Freeing a pointer outside the slab can cause assertion failure. */
|
||||||
assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
|
assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
|
||||||
assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
|
assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
|
||||||
/* Freeing an interior pointer can cause assertion failure. */
|
/* Freeing an interior pointer can cause assertion failure. */
|
||||||
assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
|
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
|
||||||
(uintptr_t)bin_infos[binind].reg_size == 0);
|
(uintptr_t)bin_infos[binind].reg_size == 0);
|
||||||
|
|
||||||
diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
|
diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
|
||||||
|
|
||||||
/* Avoid doing division with a variable divisor. */
|
/* Avoid doing division with a variable divisor. */
|
||||||
regind = div_compute(&arena_binind_div_info[binind], diff);
|
regind = div_compute(&arena_binind_div_info[binind], diff);
|
||||||
@ -355,17 +355,17 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_slab_reg_dalloc(extent_t *slab, slab_data_t *slab_data, void *ptr) {
|
arena_slab_reg_dalloc(edata_t *slab, slab_data_t *slab_data, void *ptr) {
|
||||||
szind_t binind = extent_szind_get(slab);
|
szind_t binind = edata_szind_get(slab);
|
||||||
const bin_info_t *bin_info = &bin_infos[binind];
|
const bin_info_t *bin_info = &bin_infos[binind];
|
||||||
size_t regind = arena_slab_regind(slab, binind, ptr);
|
size_t regind = arena_slab_regind(slab, binind, ptr);
|
||||||
|
|
||||||
assert(extent_nfree_get(slab) < bin_info->nregs);
|
assert(edata_nfree_get(slab) < bin_info->nregs);
|
||||||
/* Freeing an unallocated pointer can cause assertion failure. */
|
/* Freeing an unallocated pointer can cause assertion failure. */
|
||||||
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
|
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
|
||||||
|
|
||||||
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
|
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
|
||||||
extent_nfree_inc(slab);
|
edata_nfree_inc(slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -423,7 +423,7 @@ arena_may_have_muzzy(arena_t *arena) {
|
|||||||
return arena_muzzy_decay_ms_get(arena) != 0;
|
return arena_muzzy_decay_ms_get(arena) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_t *
|
edata_t *
|
||||||
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool *zero) {
|
size_t alignment, bool *zero) {
|
||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
@ -434,23 +434,22 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
szind_t szind = sz_size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
size_t mapped_add;
|
size_t mapped_add;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
extent_t *extent = extents_alloc(tsdn, arena, ehooks,
|
edata_t *edata = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
|
||||||
&arena->eset_dirty, NULL, usize, sz_large_pad, alignment, false,
|
NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit);
|
||||||
szind, zero, &commit);
|
if (edata == NULL && arena_may_have_muzzy(arena)) {
|
||||||
if (extent == NULL && arena_may_have_muzzy(arena)) {
|
edata = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
|
||||||
extent = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
|
|
||||||
NULL, usize, sz_large_pad, alignment, false, szind, zero,
|
NULL, usize, sz_large_pad, alignment, false, szind, zero,
|
||||||
&commit);
|
&commit);
|
||||||
}
|
}
|
||||||
size_t size = usize + sz_large_pad;
|
size_t size = usize + sz_large_pad;
|
||||||
if (extent == NULL) {
|
if (edata == NULL) {
|
||||||
extent = extent_alloc_wrapper(tsdn, arena, ehooks, NULL, usize,
|
edata = extent_alloc_wrapper(tsdn, arena, ehooks, NULL, usize,
|
||||||
sz_large_pad, alignment, false, szind, zero, &commit);
|
sz_large_pad, alignment, false, szind, zero, &commit);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
/*
|
/*
|
||||||
* extent may be NULL on OOM, but in that case
|
* edata may be NULL on OOM, but in that case mapped_add
|
||||||
* mapped_add isn't used below, so there's no need to
|
* isn't used below, so there's no need to conditionlly
|
||||||
* conditionlly set it to 0 here.
|
* set it to 0 here.
|
||||||
*/
|
*/
|
||||||
mapped_add = size;
|
mapped_add = size;
|
||||||
}
|
}
|
||||||
@ -458,7 +457,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
mapped_add = 0;
|
mapped_add = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extent != NULL) {
|
if (edata != NULL) {
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
arena_stats_lock(tsdn, &arena->stats);
|
arena_stats_lock(tsdn, &arena->stats);
|
||||||
arena_large_malloc_stats_update(tsdn, arena, usize);
|
arena_large_malloc_stats_update(tsdn, arena, usize);
|
||||||
@ -471,24 +470,24 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
arena_nactive_add(arena, size >> LG_PAGE);
|
arena_nactive_add(arena, size >> LG_PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return extent;
|
return edata;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
arena_stats_lock(tsdn, &arena->stats);
|
arena_stats_lock(tsdn, &arena->stats);
|
||||||
arena_large_dalloc_stats_update(tsdn, arena,
|
arena_large_dalloc_stats_update(tsdn, arena,
|
||||||
extent_usize_get(extent));
|
edata_usize_get(edata));
|
||||||
arena_stats_unlock(tsdn, &arena->stats);
|
arena_stats_unlock(tsdn, &arena->stats);
|
||||||
}
|
}
|
||||||
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
|
arena_nactive_sub(arena, edata_size_get(edata) >> LG_PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
||||||
size_t oldusize) {
|
size_t oldusize) {
|
||||||
size_t usize = extent_usize_get(extent);
|
size_t usize = edata_usize_get(edata);
|
||||||
size_t udiff = oldusize - usize;
|
size_t udiff = oldusize - usize;
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -500,9 +499,9 @@ arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
||||||
size_t oldusize) {
|
size_t oldusize) {
|
||||||
size_t usize = extent_usize_get(extent);
|
size_t usize = edata_usize_get(edata);
|
||||||
size_t udiff = usize - oldusize;
|
size_t udiff = usize - oldusize;
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -819,25 +818,25 @@ arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
|
|||||||
static size_t
|
static size_t
|
||||||
arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
|
arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
|
||||||
ehooks_t *ehooks, eset_t *eset, size_t npages_limit,
|
ehooks_t *ehooks, eset_t *eset, size_t npages_limit,
|
||||||
size_t npages_decay_max, extent_list_t *decay_extents) {
|
size_t npages_decay_max, edata_list_t *decay_extents) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
/* Stash extents according to npages_limit. */
|
/* Stash extents according to npages_limit. */
|
||||||
size_t nstashed = 0;
|
size_t nstashed = 0;
|
||||||
extent_t *extent;
|
edata_t *edata;
|
||||||
while (nstashed < npages_decay_max &&
|
while (nstashed < npages_decay_max &&
|
||||||
(extent = extents_evict(tsdn, arena, ehooks, eset, npages_limit))
|
(edata = extents_evict(tsdn, arena, ehooks, eset, npages_limit))
|
||||||
!= NULL) {
|
!= NULL) {
|
||||||
extent_list_append(decay_extents, extent);
|
edata_list_append(decay_extents, edata);
|
||||||
nstashed += extent_size_get(extent) >> LG_PAGE;
|
nstashed += edata_size_get(edata) >> LG_PAGE;
|
||||||
}
|
}
|
||||||
return nstashed;
|
return nstashed;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
arena_decay_t *decay, eset_t *eset, bool all, extent_list_t *decay_extents,
|
arena_decay_t *decay, eset_t *eset, bool all, edata_list_t *decay_extents,
|
||||||
bool is_background_thread) {
|
bool is_background_thread) {
|
||||||
size_t nmadvise, nunmapped;
|
size_t nmadvise, nunmapped;
|
||||||
size_t npurged;
|
size_t npurged;
|
||||||
@ -849,31 +848,30 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
npurged = 0;
|
npurged = 0;
|
||||||
|
|
||||||
ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
|
ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
|
||||||
for (extent_t *extent = extent_list_first(decay_extents); extent !=
|
for (edata_t *edata = edata_list_first(decay_extents); edata !=
|
||||||
NULL; extent = extent_list_first(decay_extents)) {
|
NULL; edata = edata_list_first(decay_extents)) {
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
nmadvise++;
|
nmadvise++;
|
||||||
}
|
}
|
||||||
size_t npages = extent_size_get(extent) >> LG_PAGE;
|
size_t npages = edata_size_get(edata) >> LG_PAGE;
|
||||||
npurged += npages;
|
npurged += npages;
|
||||||
extent_list_remove(decay_extents, extent);
|
edata_list_remove(decay_extents, edata);
|
||||||
switch (eset_state_get(eset)) {
|
switch (eset_state_get(eset)) {
|
||||||
case extent_state_active:
|
case extent_state_active:
|
||||||
not_reached();
|
not_reached();
|
||||||
case extent_state_dirty:
|
case extent_state_dirty:
|
||||||
if (!all && muzzy_decay_ms != 0 &&
|
if (!all && muzzy_decay_ms != 0 &&
|
||||||
!extent_purge_lazy_wrapper(tsdn, arena,
|
!extent_purge_lazy_wrapper(tsdn, arena,
|
||||||
ehooks, extent, 0,
|
ehooks, edata, 0, edata_size_get(edata))) {
|
||||||
extent_size_get(extent))) {
|
|
||||||
extents_dalloc(tsdn, arena, ehooks,
|
extents_dalloc(tsdn, arena, ehooks,
|
||||||
&arena->eset_muzzy, extent);
|
&arena->eset_muzzy, edata);
|
||||||
arena_background_thread_inactivity_check(tsdn,
|
arena_background_thread_inactivity_check(tsdn,
|
||||||
arena, is_background_thread);
|
arena, is_background_thread);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
JEMALLOC_FALLTHROUGH;
|
JEMALLOC_FALLTHROUGH;
|
||||||
case extent_state_muzzy:
|
case extent_state_muzzy:
|
||||||
extent_dalloc_wrapper(tsdn, arena, ehooks, extent);
|
extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
nunmapped += npages;
|
nunmapped += npages;
|
||||||
}
|
}
|
||||||
@ -923,8 +921,8 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
|||||||
|
|
||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
|
|
||||||
extent_list_t decay_extents;
|
edata_list_t decay_extents;
|
||||||
extent_list_init(&decay_extents);
|
edata_list_init(&decay_extents);
|
||||||
|
|
||||||
size_t npurge = arena_stash_decayed(tsdn, arena, ehooks, eset,
|
size_t npurge = arena_stash_decayed(tsdn, arena, ehooks, eset,
|
||||||
npages_limit, npages_decay_max, &decay_extents);
|
npages_limit, npages_decay_max, &decay_extents);
|
||||||
@ -1000,33 +998,33 @@ arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
|
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
|
||||||
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
|
arena_nactive_sub(arena, edata_size_get(slab) >> LG_PAGE);
|
||||||
|
|
||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
arena_extents_dirty_dalloc(tsdn, arena, ehooks, slab);
|
arena_extents_dirty_dalloc(tsdn, arena, ehooks, slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
|
arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) {
|
||||||
assert(extent_nfree_get(slab) > 0);
|
assert(edata_nfree_get(slab) > 0);
|
||||||
extent_heap_insert(&bin->slabs_nonfull, slab);
|
edata_heap_insert(&bin->slabs_nonfull, slab);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
bin->stats.nonfull_slabs++;
|
bin->stats.nonfull_slabs++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
|
arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) {
|
||||||
extent_heap_remove(&bin->slabs_nonfull, slab);
|
edata_heap_remove(&bin->slabs_nonfull, slab);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
bin->stats.nonfull_slabs--;
|
bin->stats.nonfull_slabs--;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static extent_t *
|
static edata_t *
|
||||||
arena_bin_slabs_nonfull_tryget(bin_t *bin) {
|
arena_bin_slabs_nonfull_tryget(bin_t *bin) {
|
||||||
extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
|
edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
|
||||||
if (slab == NULL) {
|
if (slab == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1038,30 +1036,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
|
arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
|
||||||
assert(extent_nfree_get(slab) == 0);
|
assert(edata_nfree_get(slab) == 0);
|
||||||
/*
|
/*
|
||||||
* Tracking extents is required by arena_reset, which is not allowed
|
* Tracking extents is required by arena_reset, which is not allowed
|
||||||
* for auto arenas. Bypass this step to avoid touching the extent
|
* for auto arenas. Bypass this step to avoid touching the edata
|
||||||
* linkage (often results in cache misses) for auto arenas.
|
* linkage (often results in cache misses) for auto arenas.
|
||||||
*/
|
*/
|
||||||
if (arena_is_auto(arena)) {
|
if (arena_is_auto(arena)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
extent_list_append(&bin->slabs_full, slab);
|
edata_list_append(&bin->slabs_full, slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
|
arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
|
||||||
if (arena_is_auto(arena)) {
|
if (arena_is_auto(arena)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
extent_list_remove(&bin->slabs_full, slab);
|
edata_list_remove(&bin->slabs_full, slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
|
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
|
||||||
extent_t *slab;
|
edata_t *slab;
|
||||||
|
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||||
if (bin->slabcur != NULL) {
|
if (bin->slabcur != NULL) {
|
||||||
@ -1071,13 +1069,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
|
|||||||
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||||
}
|
}
|
||||||
while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
|
while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||||
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||||
}
|
}
|
||||||
for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
|
for (slab = edata_list_first(&bin->slabs_full); slab != NULL;
|
||||||
slab = extent_list_first(&bin->slabs_full)) {
|
slab = edata_list_first(&bin->slabs_full)) {
|
||||||
arena_bin_slabs_full_remove(arena, bin, slab);
|
arena_bin_slabs_full_remove(arena, bin, slab);
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||||
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
||||||
@ -1109,9 +1107,9 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
|||||||
/* Large allocations. */
|
/* Large allocations. */
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||||
|
|
||||||
for (extent_t *extent = extent_list_first(&arena->large); extent !=
|
for (edata_t *edata = edata_list_first(&arena->large); edata !=
|
||||||
NULL; extent = extent_list_first(&arena->large)) {
|
NULL; edata = edata_list_first(&arena->large)) {
|
||||||
void *ptr = extent_base_get(extent);
|
void *ptr = edata_base_get(edata);
|
||||||
size_t usize;
|
size_t usize;
|
||||||
|
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||||
@ -1129,7 +1127,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
|||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
prof_free(tsd, ptr, usize, &alloc_ctx);
|
prof_free(tsd, ptr, usize, &alloc_ctx);
|
||||||
}
|
}
|
||||||
large_dalloc(tsd_tsdn(tsd), extent);
|
large_dalloc(tsd_tsdn(tsd), edata);
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||||
@ -1157,10 +1155,10 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
* dss-based extents for later reuse.
|
* dss-based extents for later reuse.
|
||||||
*/
|
*/
|
||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
extent_t *extent;
|
edata_t *edata;
|
||||||
while ((extent = extents_evict(tsdn, arena, ehooks,
|
while ((edata = extents_evict(tsdn, arena, ehooks,
|
||||||
&arena->eset_retained, 0)) != NULL) {
|
&arena->eset_retained, 0)) != NULL) {
|
||||||
extent_destroy_wrapper(tsdn, arena, ehooks, extent);
|
extent_destroy_wrapper(tsdn, arena, ehooks, edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1200,10 +1198,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
|
|||||||
base_delete(tsd_tsdn(tsd), arena->base);
|
base_delete(tsd_tsdn(tsd), arena->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
static extent_t *
|
static edata_t *
|
||||||
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
const bin_info_t *bin_info, szind_t szind) {
|
const bin_info_t *bin_info, szind_t szind) {
|
||||||
extent_t *slab;
|
edata_t *slab;
|
||||||
bool zero, commit;
|
bool zero, commit;
|
||||||
|
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
@ -1222,7 +1220,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
return slab;
|
return slab;
|
||||||
}
|
}
|
||||||
|
|
||||||
static extent_t *
|
static edata_t *
|
||||||
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
|
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
|
||||||
const bin_info_t *bin_info) {
|
const bin_info_t *bin_info) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
@ -1232,7 +1230,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
|
|||||||
szind_t szind = sz_size2index(bin_info->reg_size);
|
szind_t szind = sz_size2index(bin_info->reg_size);
|
||||||
bool zero = false;
|
bool zero = false;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
extent_t *slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
|
edata_t *slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
|
||||||
NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit);
|
NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit);
|
||||||
if (slab == NULL && arena_may_have_muzzy(arena)) {
|
if (slab == NULL && arena_may_have_muzzy(arena)) {
|
||||||
slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
|
slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
|
||||||
@ -1246,22 +1244,22 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(extent_slab_get(slab));
|
assert(edata_slab_get(slab));
|
||||||
|
|
||||||
/* Initialize slab internals. */
|
/* Initialize slab internals. */
|
||||||
slab_data_t *slab_data = extent_slab_data_get(slab);
|
slab_data_t *slab_data = edata_slab_data_get(slab);
|
||||||
extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
|
edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
|
||||||
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
|
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
|
||||||
|
|
||||||
arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
|
arena_nactive_add(arena, edata_size_get(slab) >> LG_PAGE);
|
||||||
|
|
||||||
return slab;
|
return slab;
|
||||||
}
|
}
|
||||||
|
|
||||||
static extent_t *
|
static edata_t *
|
||||||
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
||||||
szind_t binind, unsigned binshard) {
|
szind_t binind, unsigned binshard) {
|
||||||
extent_t *slab;
|
edata_t *slab;
|
||||||
const bin_info_t *bin_info;
|
const bin_info_t *bin_info;
|
||||||
|
|
||||||
/* Look for a usable slab. */
|
/* Look for a usable slab. */
|
||||||
@ -1307,14 +1305,14 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|||||||
|
|
||||||
if (bin->slabcur != NULL) {
|
if (bin->slabcur != NULL) {
|
||||||
/* Only attempted when current slab is full. */
|
/* Only attempted when current slab is full. */
|
||||||
assert(extent_nfree_get(bin->slabcur) == 0);
|
assert(edata_nfree_get(bin->slabcur) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
const bin_info_t *bin_info = &bin_infos[binind];
|
const bin_info_t *bin_info = &bin_infos[binind];
|
||||||
extent_t *slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind,
|
edata_t *slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind,
|
||||||
binshard);
|
binshard);
|
||||||
if (bin->slabcur != NULL) {
|
if (bin->slabcur != NULL) {
|
||||||
if (extent_nfree_get(bin->slabcur) > 0) {
|
if (edata_nfree_get(bin->slabcur) > 0) {
|
||||||
/*
|
/*
|
||||||
* Another thread updated slabcur while this one ran
|
* Another thread updated slabcur while this one ran
|
||||||
* without the bin lock in arena_bin_nonfull_slab_get().
|
* without the bin lock in arena_bin_nonfull_slab_get().
|
||||||
@ -1331,7 +1329,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|||||||
* arena_bin_lower_slab() must be called, as if
|
* arena_bin_lower_slab() must be called, as if
|
||||||
* a region were just deallocated from the slab.
|
* a region were just deallocated from the slab.
|
||||||
*/
|
*/
|
||||||
if (extent_nfree_get(slab) == bin_info->nregs) {
|
if (edata_nfree_get(slab) == bin_info->nregs) {
|
||||||
arena_dalloc_bin_slab(tsdn, arena, slab,
|
arena_dalloc_bin_slab(tsdn, arena, slab,
|
||||||
bin);
|
bin);
|
||||||
} else {
|
} else {
|
||||||
@ -1350,7 +1348,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
bin->slabcur = slab;
|
bin->slabcur = slab;
|
||||||
assert(extent_nfree_get(bin->slabcur) > 0);
|
assert(edata_nfree_get(bin->slabcur) > 0);
|
||||||
|
|
||||||
return arena_slab_reg_alloc(slab, bin_info);
|
return arena_slab_reg_alloc(slab, bin_info);
|
||||||
}
|
}
|
||||||
@ -1386,12 +1384,12 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
|||||||
void **empty_position = cache_bin_empty_position_get(tbin, binind);
|
void **empty_position = cache_bin_empty_position_get(tbin, binind);
|
||||||
for (i = 0, nfill = (cache_bin_ncached_max_get(binind) >>
|
for (i = 0, nfill = (cache_bin_ncached_max_get(binind) >>
|
||||||
tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
|
tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
|
||||||
extent_t *slab;
|
edata_t *slab;
|
||||||
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
|
if ((slab = bin->slabcur) != NULL && edata_nfree_get(slab) >
|
||||||
0) {
|
0) {
|
||||||
unsigned tofill = nfill - i;
|
unsigned tofill = nfill - i;
|
||||||
cnt = tofill < extent_nfree_get(slab) ?
|
cnt = tofill < edata_nfree_get(slab) ?
|
||||||
tofill : extent_nfree_get(slab);
|
tofill : edata_nfree_get(slab);
|
||||||
arena_slab_reg_alloc_batch(
|
arena_slab_reg_alloc_batch(
|
||||||
slab, &bin_infos[binind], cnt,
|
slab, &bin_infos[binind], cnt,
|
||||||
empty_position - nfill + i);
|
empty_position - nfill + i);
|
||||||
@ -1454,14 +1452,14 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
|
|||||||
void *ret;
|
void *ret;
|
||||||
bin_t *bin;
|
bin_t *bin;
|
||||||
size_t usize;
|
size_t usize;
|
||||||
extent_t *slab;
|
edata_t *slab;
|
||||||
|
|
||||||
assert(binind < SC_NBINS);
|
assert(binind < SC_NBINS);
|
||||||
usize = sz_index2size(binind);
|
usize = sz_index2size(binind);
|
||||||
unsigned binshard;
|
unsigned binshard;
|
||||||
bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
|
bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
|
||||||
|
|
||||||
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
|
if ((slab = bin->slabcur) != NULL && edata_nfree_get(slab) > 0) {
|
||||||
ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
|
ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
|
||||||
} else {
|
} else {
|
||||||
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
|
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
|
||||||
@ -1554,11 +1552,11 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
|
|||||||
rtree_ctx_t rtree_ctx_fallback;
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
|
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, true);
|
(uintptr_t)ptr, true);
|
||||||
|
|
||||||
szind_t szind = sz_size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
extent_szind_set(extent, szind);
|
edata_szind_set(edata, szind);
|
||||||
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
||||||
szind, false);
|
szind, false);
|
||||||
|
|
||||||
@ -1568,11 +1566,11 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
|
arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
extent_szind_set(extent, SC_NBINS);
|
edata_szind_set(edata, SC_NBINS);
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
||||||
@ -1589,9 +1587,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(opt_prof);
|
assert(opt_prof);
|
||||||
|
|
||||||
extent_t *extent = iealloc(tsdn, ptr);
|
edata_t *edata = iealloc(tsdn, ptr);
|
||||||
size_t usize = extent_usize_get(extent);
|
size_t usize = edata_usize_get(edata);
|
||||||
size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
|
size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
|
||||||
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
|
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
|
||||||
/*
|
/*
|
||||||
* Currently, we only do redzoning for small sampled
|
* Currently, we only do redzoning for small sampled
|
||||||
@ -1604,17 +1602,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|||||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
||||||
sz_size2index(bumped_usize), slow_path);
|
sz_size2index(bumped_usize), slow_path);
|
||||||
} else {
|
} else {
|
||||||
large_dalloc(tsdn, extent);
|
large_dalloc(tsdn, edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
|
arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) {
|
||||||
/* Dissociate slab from bin. */
|
/* Dissociate slab from bin. */
|
||||||
if (slab == bin->slabcur) {
|
if (slab == bin->slabcur) {
|
||||||
bin->slabcur = NULL;
|
bin->slabcur = NULL;
|
||||||
} else {
|
} else {
|
||||||
szind_t binind = extent_szind_get(slab);
|
szind_t binind = edata_szind_get(slab);
|
||||||
const bin_info_t *bin_info = &bin_infos[binind];
|
const bin_info_t *bin_info = &bin_infos[binind];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1631,7 +1629,7 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
|
||||||
bin_t *bin) {
|
bin_t *bin) {
|
||||||
assert(slab != bin->slabcur);
|
assert(slab != bin->slabcur);
|
||||||
|
|
||||||
@ -1646,9 +1644,9 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
|
||||||
bin_t *bin) {
|
bin_t *bin) {
|
||||||
assert(extent_nfree_get(slab) > 0);
|
assert(edata_nfree_get(slab) > 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure that if bin->slabcur is non-NULL, it refers to the
|
* Make sure that if bin->slabcur is non-NULL, it refers to the
|
||||||
@ -1656,9 +1654,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
|||||||
* than proactively keeping it pointing at the oldest/lowest non-full
|
* than proactively keeping it pointing at the oldest/lowest non-full
|
||||||
* slab.
|
* slab.
|
||||||
*/
|
*/
|
||||||
if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
|
if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
|
||||||
/* Switch slabcur. */
|
/* Switch slabcur. */
|
||||||
if (extent_nfree_get(bin->slabcur) > 0) {
|
if (edata_nfree_get(bin->slabcur) > 0) {
|
||||||
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
|
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
|
||||||
} else {
|
} else {
|
||||||
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
|
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
|
||||||
@ -1674,8 +1672,8 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
||||||
szind_t binind, extent_t *slab, void *ptr, bool junked) {
|
szind_t binind, edata_t *slab, void *ptr, bool junked) {
|
||||||
slab_data_t *slab_data = extent_slab_data_get(slab);
|
slab_data_t *slab_data = edata_slab_data_get(slab);
|
||||||
const bin_info_t *bin_info = &bin_infos[binind];
|
const bin_info_t *bin_info = &bin_infos[binind];
|
||||||
|
|
||||||
if (!junked && config_fill && unlikely(opt_junk_free)) {
|
if (!junked && config_fill && unlikely(opt_junk_free)) {
|
||||||
@ -1683,7 +1681,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|||||||
}
|
}
|
||||||
|
|
||||||
arena_slab_reg_dalloc(slab, slab_data, ptr);
|
arena_slab_reg_dalloc(slab, slab_data, ptr);
|
||||||
unsigned nfree = extent_nfree_get(slab);
|
unsigned nfree = edata_nfree_get(slab);
|
||||||
if (nfree == bin_info->nregs) {
|
if (nfree == bin_info->nregs) {
|
||||||
arena_dissociate_bin_slab(arena, slab, bin);
|
arena_dissociate_bin_slab(arena, slab, bin);
|
||||||
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
|
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
|
||||||
@ -1700,29 +1698,29 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|||||||
|
|
||||||
void
|
void
|
||||||
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
||||||
szind_t binind, extent_t *extent, void *ptr) {
|
szind_t binind, edata_t *edata, void *ptr) {
|
||||||
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
|
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata, ptr,
|
||||||
true);
|
true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
|
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
|
||||||
szind_t binind = extent_szind_get(extent);
|
szind_t binind = edata_szind_get(edata);
|
||||||
unsigned binshard = extent_binshard_get(extent);
|
unsigned binshard = edata_binshard_get(edata);
|
||||||
bin_t *bin = &arena->bins[binind].bin_shards[binshard];
|
bin_t *bin = &arena->bins[binind].bin_shards[binshard];
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &bin->lock);
|
malloc_mutex_lock(tsdn, &bin->lock);
|
||||||
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
|
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata, ptr,
|
||||||
false);
|
false);
|
||||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
|
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
|
||||||
extent_t *extent = iealloc(tsdn, ptr);
|
edata_t *edata = iealloc(tsdn, ptr);
|
||||||
arena_t *arena = arena_get_from_extent(extent);
|
arena_t *arena = arena_get_from_edata(edata);
|
||||||
|
|
||||||
arena_dalloc_bin(tsdn, arena, extent, ptr);
|
arena_dalloc_bin(tsdn, arena, edata, ptr);
|
||||||
arena_decay_tick(tsdn, arena);
|
arena_decay_tick(tsdn, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1733,7 +1731,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
/* Calls with non-zero extra had to clamp extra. */
|
/* Calls with non-zero extra had to clamp extra. */
|
||||||
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
|
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
|
||||||
|
|
||||||
extent_t *extent = iealloc(tsdn, ptr);
|
edata_t *edata = iealloc(tsdn, ptr);
|
||||||
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
||||||
ret = true;
|
ret = true;
|
||||||
goto done;
|
goto done;
|
||||||
@ -1756,19 +1754,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_t *arena = arena_get_from_extent(extent);
|
arena_t *arena = arena_get_from_edata(edata);
|
||||||
arena_decay_tick(tsdn, arena);
|
arena_decay_tick(tsdn, arena);
|
||||||
ret = false;
|
ret = false;
|
||||||
} else if (oldsize >= SC_LARGE_MINCLASS
|
} else if (oldsize >= SC_LARGE_MINCLASS
|
||||||
&& usize_max >= SC_LARGE_MINCLASS) {
|
&& usize_max >= SC_LARGE_MINCLASS) {
|
||||||
ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
|
ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max,
|
||||||
zero);
|
zero);
|
||||||
} else {
|
} else {
|
||||||
ret = true;
|
ret = true;
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
assert(extent == iealloc(tsdn, ptr));
|
assert(edata == iealloc(tsdn, ptr));
|
||||||
*newsize = extent_usize_get(extent);
|
*newsize = edata_usize_get(edata);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2006,7 +2004,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
|
|
||||||
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
|
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
|
||||||
|
|
||||||
extent_list_init(&arena->large);
|
edata_list_init(&arena->large);
|
||||||
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
|
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
|
||||||
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
|
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
|
||||||
goto label_error;
|
goto label_error;
|
||||||
@ -2055,9 +2053,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_avail_new(&arena->extent_avail);
|
edata_avail_new(&arena->edata_avail);
|
||||||
if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
|
if (malloc_mutex_init(&arena->edata_avail_mtx, "edata_avail",
|
||||||
WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
|
WITNESS_RANK_EDATA_AVAIL, malloc_mutex_rank_exclusive)) {
|
||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2203,7 +2201,7 @@ arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
|
|
||||||
void
|
void
|
||||||
arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
|
arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
|
||||||
malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
|
malloc_mutex_prefork(tsdn, &arena->edata_avail_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -2237,7 +2235,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
}
|
}
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
|
||||||
base_postfork_parent(tsdn, arena->base);
|
base_postfork_parent(tsdn, arena->base);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->edata_avail_mtx);
|
||||||
eset_postfork_parent(tsdn, &arena->eset_dirty);
|
eset_postfork_parent(tsdn, &arena->eset_dirty);
|
||||||
eset_postfork_parent(tsdn, &arena->eset_muzzy);
|
eset_postfork_parent(tsdn, &arena->eset_muzzy);
|
||||||
eset_postfork_parent(tsdn, &arena->eset_retained);
|
eset_postfork_parent(tsdn, &arena->eset_retained);
|
||||||
@ -2283,7 +2281,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
}
|
}
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
|
||||||
base_postfork_child(tsdn, arena->base);
|
base_postfork_child(tsdn, arena->base);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->edata_avail_mtx);
|
||||||
eset_postfork_child(tsdn, &arena->eset_dirty);
|
eset_postfork_child(tsdn, &arena->eset_dirty);
|
||||||
eset_postfork_child(tsdn, &arena->eset_muzzy);
|
eset_postfork_child(tsdn, &arena->eset_muzzy);
|
||||||
eset_postfork_child(tsdn, &arena->eset_retained);
|
eset_postfork_child(tsdn, &arena->eset_retained);
|
||||||
|
76
src/base.c
76
src/base.c
@ -105,14 +105,14 @@ label_done:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
|
base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
|
||||||
size_t size) {
|
size_t size) {
|
||||||
size_t sn;
|
size_t sn;
|
||||||
|
|
||||||
sn = *extent_sn_next;
|
sn = *extent_sn_next;
|
||||||
(*extent_sn_next)++;
|
(*extent_sn_next)++;
|
||||||
|
|
||||||
extent_binit(extent, addr, size, sn);
|
edata_binit(edata, addr, size, sn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
@ -158,7 +158,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
|
|||||||
pages_huge(block, block->size);
|
pages_huge(block, block->size);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
base->n_thp += HUGEPAGE_CEILING(block->size -
|
base->n_thp += HUGEPAGE_CEILING(block->size -
|
||||||
extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
|
edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
|
||||||
}
|
}
|
||||||
block = block->next;
|
block = block->next;
|
||||||
assert(block == NULL || (base_ind_get(base) == 0));
|
assert(block == NULL || (base_ind_get(base) == 0));
|
||||||
@ -166,34 +166,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
|
base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
|
||||||
size_t alignment) {
|
size_t alignment) {
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
|
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
|
||||||
assert(size == ALIGNMENT_CEILING(size, alignment));
|
assert(size == ALIGNMENT_CEILING(size, alignment));
|
||||||
|
|
||||||
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
|
*gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
|
||||||
alignment) - (uintptr_t)extent_addr_get(extent);
|
alignment) - (uintptr_t)edata_addr_get(edata);
|
||||||
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
|
ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
|
||||||
assert(extent_bsize_get(extent) >= *gap_size + size);
|
assert(edata_bsize_get(edata) >= *gap_size + size);
|
||||||
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
|
edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
|
||||||
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
|
*gap_size + size), edata_bsize_get(edata) - *gap_size - size,
|
||||||
extent_sn_get(extent));
|
edata_sn_get(edata));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
|
base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size,
|
||||||
void *addr, size_t size) {
|
void *addr, size_t size) {
|
||||||
if (extent_bsize_get(extent) > 0) {
|
if (edata_bsize_get(edata) > 0) {
|
||||||
/*
|
/*
|
||||||
* Compute the index for the largest size class that does not
|
* Compute the index for the largest size class that does not
|
||||||
* exceed extent's size.
|
* exceed extent's size.
|
||||||
*/
|
*/
|
||||||
szind_t index_floor =
|
szind_t index_floor =
|
||||||
sz_size2index(extent_bsize_get(extent) + 1) - 1;
|
sz_size2index(edata_bsize_get(edata) + 1) - 1;
|
||||||
extent_heap_insert(&base->avail[index_floor], extent);
|
edata_heap_insert(&base->avail[index_floor], edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -218,13 +218,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
|
base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size,
|
||||||
size_t alignment) {
|
size_t alignment) {
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t gap_size;
|
size_t gap_size;
|
||||||
|
|
||||||
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
|
ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
|
||||||
base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
|
base_extent_bump_alloc_post(base, edata, gap_size, ret, size);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -284,7 +284,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
|
|||||||
block->size = block_size;
|
block->size = block_size;
|
||||||
block->next = NULL;
|
block->next = NULL;
|
||||||
assert(block_size >= header_size);
|
assert(block_size >= header_size);
|
||||||
base_extent_init(extent_sn_next, &block->extent,
|
base_edata_init(extent_sn_next, &block->edata,
|
||||||
(void *)((uintptr_t)block + header_size), block_size - header_size);
|
(void *)((uintptr_t)block + header_size), block_size - header_size);
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
@ -293,7 +293,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
|
|||||||
* Allocate an extent that is at least as large as specified size, with
|
* Allocate an extent that is at least as large as specified size, with
|
||||||
* specified alignment.
|
* specified alignment.
|
||||||
*/
|
*/
|
||||||
static extent_t *
|
static edata_t *
|
||||||
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
||||||
malloc_mutex_assert_owner(tsdn, &base->mtx);
|
malloc_mutex_assert_owner(tsdn, &base->mtx);
|
||||||
|
|
||||||
@ -327,7 +327,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
|||||||
assert(base->resident <= base->mapped);
|
assert(base->resident <= base->mapped);
|
||||||
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
|
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
|
||||||
}
|
}
|
||||||
return &block->extent;
|
return &block->edata;
|
||||||
}
|
}
|
||||||
|
|
||||||
base_t *
|
base_t *
|
||||||
@ -357,7 +357,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
size_t gap_size;
|
size_t gap_size;
|
||||||
size_t base_alignment = CACHELINE;
|
size_t base_alignment = CACHELINE;
|
||||||
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
|
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
|
||||||
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
|
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
|
||||||
&gap_size, base_size, base_alignment);
|
&gap_size, base_size, base_alignment);
|
||||||
base->ind = ind;
|
base->ind = ind;
|
||||||
ehooks_init(&base->ehooks, extent_hooks);
|
ehooks_init(&base->ehooks, extent_hooks);
|
||||||
@ -371,7 +371,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
base->blocks = block;
|
base->blocks = block;
|
||||||
base->auto_thp_switched = false;
|
base->auto_thp_switched = false;
|
||||||
for (szind_t i = 0; i < SC_NSIZES; i++) {
|
for (szind_t i = 0; i < SC_NSIZES; i++) {
|
||||||
extent_heap_new(&base->avail[i]);
|
edata_heap_new(&base->avail[i]);
|
||||||
}
|
}
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
base->allocated = sizeof(base_block_t);
|
base->allocated = sizeof(base_block_t);
|
||||||
@ -384,7 +384,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
assert(base->resident <= base->mapped);
|
assert(base->resident <= base->mapped);
|
||||||
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
|
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
|
||||||
}
|
}
|
||||||
base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
|
base_extent_bump_alloc_post(base, &block->edata, gap_size, base,
|
||||||
base_size);
|
base_size);
|
||||||
|
|
||||||
return base;
|
return base;
|
||||||
@ -422,28 +422,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
|
|||||||
size_t usize = ALIGNMENT_CEILING(size, alignment);
|
size_t usize = ALIGNMENT_CEILING(size, alignment);
|
||||||
size_t asize = usize + alignment - QUANTUM;
|
size_t asize = usize + alignment - QUANTUM;
|
||||||
|
|
||||||
extent_t *extent = NULL;
|
edata_t *edata = NULL;
|
||||||
malloc_mutex_lock(tsdn, &base->mtx);
|
malloc_mutex_lock(tsdn, &base->mtx);
|
||||||
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
|
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
|
||||||
extent = extent_heap_remove_first(&base->avail[i]);
|
edata = edata_heap_remove_first(&base->avail[i]);
|
||||||
if (extent != NULL) {
|
if (edata != NULL) {
|
||||||
/* Use existing space. */
|
/* Use existing space. */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (extent == NULL) {
|
if (edata == NULL) {
|
||||||
/* Try to allocate more space. */
|
/* Try to allocate more space. */
|
||||||
extent = base_extent_alloc(tsdn, base, usize, alignment);
|
edata = base_extent_alloc(tsdn, base, usize, alignment);
|
||||||
}
|
}
|
||||||
void *ret;
|
void *ret;
|
||||||
if (extent == NULL) {
|
if (edata == NULL) {
|
||||||
ret = NULL;
|
ret = NULL;
|
||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = base_extent_bump_alloc(base, extent, usize, alignment);
|
ret = base_extent_bump_alloc(base, edata, usize, alignment);
|
||||||
if (esn != NULL) {
|
if (esn != NULL) {
|
||||||
*esn = extent_sn_get(extent);
|
*esn = edata_sn_get(edata);
|
||||||
}
|
}
|
||||||
label_return:
|
label_return:
|
||||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||||
@ -463,16 +463,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
|||||||
return base_alloc_impl(tsdn, base, size, alignment, NULL);
|
return base_alloc_impl(tsdn, base, size, alignment, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_t *
|
edata_t *
|
||||||
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
|
base_alloc_edata(tsdn_t *tsdn, base_t *base) {
|
||||||
size_t esn;
|
size_t esn;
|
||||||
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
|
edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
|
||||||
CACHELINE, &esn);
|
CACHELINE, &esn);
|
||||||
if (extent == NULL) {
|
if (edata == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
extent_esn_set(extent, esn);
|
edata_esn_set(edata, esn);
|
||||||
return extent;
|
return edata;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -45,8 +45,8 @@ bin_init(bin_t *bin) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
bin->slabcur = NULL;
|
bin->slabcur = NULL;
|
||||||
extent_heap_new(&bin->slabs_nonfull);
|
edata_heap_new(&bin->slabs_nonfull);
|
||||||
extent_list_init(&bin->slabs_full);
|
edata_list_init(&bin->slabs_full);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
memset(&bin->stats, 0, sizeof(bin_stats_t));
|
memset(&bin->stats, 0, sizeof(bin_stats_t));
|
||||||
}
|
}
|
||||||
|
16
src/ctl.c
16
src/ctl.c
@ -855,8 +855,8 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
|
|||||||
&astats->astats.mapped);
|
&astats->astats.mapped);
|
||||||
accum_atomic_zu(&sdstats->astats.retained,
|
accum_atomic_zu(&sdstats->astats.retained,
|
||||||
&astats->astats.retained);
|
&astats->astats.retained);
|
||||||
accum_atomic_zu(&sdstats->astats.extent_avail,
|
accum_atomic_zu(&sdstats->astats.edata_avail,
|
||||||
&astats->astats.extent_avail);
|
&astats->astats.edata_avail);
|
||||||
}
|
}
|
||||||
|
|
||||||
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
|
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
|
||||||
@ -2603,18 +2603,18 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
|
|||||||
int ret;
|
int ret;
|
||||||
unsigned arena_ind;
|
unsigned arena_ind;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
extent_t *extent;
|
edata_t *edata;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
ptr = NULL;
|
ptr = NULL;
|
||||||
ret = EINVAL;
|
ret = EINVAL;
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
|
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
|
||||||
WRITE(ptr, void *);
|
WRITE(ptr, void *);
|
||||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
edata = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
if (extent == NULL)
|
if (edata == NULL)
|
||||||
goto label_return;
|
goto label_return;
|
||||||
|
|
||||||
arena = arena_get_from_extent(extent);
|
arena = arena_get_from_edata(edata);
|
||||||
if (arena == NULL)
|
if (arena == NULL)
|
||||||
goto label_return;
|
goto label_return;
|
||||||
|
|
||||||
@ -2860,7 +2860,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
|
|||||||
atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
|
atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
|
||||||
size_t)
|
size_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
|
||||||
atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
|
atomic_load_zu(&arenas_i(mib[2])->astats->astats.edata_avail,
|
||||||
ATOMIC_RELAXED),
|
ATOMIC_RELAXED),
|
||||||
size_t)
|
size_t)
|
||||||
|
|
||||||
@ -3010,7 +3010,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
MUTEX_PROF_RESET(arena->large_mtx);
|
MUTEX_PROF_RESET(arena->large_mtx);
|
||||||
MUTEX_PROF_RESET(arena->extent_avail_mtx);
|
MUTEX_PROF_RESET(arena->edata_avail_mtx);
|
||||||
MUTEX_PROF_RESET(arena->eset_dirty.mtx);
|
MUTEX_PROF_RESET(arena->eset_dirty.mtx);
|
||||||
MUTEX_PROF_RESET(arena->eset_muzzy.mtx);
|
MUTEX_PROF_RESET(arena->eset_muzzy.mtx);
|
||||||
MUTEX_PROF_RESET(arena->eset_retained.mtx);
|
MUTEX_PROF_RESET(arena->eset_retained.mtx);
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||||
|
|
||||||
ph_gen(, extent_avail_, extent_tree_t, extent_t, ph_link,
|
ph_gen(, edata_avail_, edata_tree_t, edata_t, ph_link,
|
||||||
extent_esnead_comp)
|
edata_esnead_comp)
|
||||||
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
|
ph_gen(, edata_heap_, edata_heap_t, edata_t, ph_link, edata_snad_comp)
|
||||||
|
@ -200,8 +200,8 @@ ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
|||||||
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
|
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
|
||||||
if (!maps_coalesce) {
|
if (!maps_coalesce) {
|
||||||
tsdn_t *tsdn = tsdn_fetch();
|
tsdn_t *tsdn = tsdn_fetch();
|
||||||
extent_t *a = iealloc(tsdn, addr_a);
|
edata_t *a = iealloc(tsdn, addr_a);
|
||||||
extent_t *b = iealloc(tsdn, addr_b);
|
edata_t *b = iealloc(tsdn, addr_b);
|
||||||
if (extent_head_no_merge(a, b)) {
|
if (extent_head_no_merge(a, b)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
68
src/eset.c
68
src/eset.c
@ -16,10 +16,10 @@ eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
|
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
|
||||||
extent_heap_new(&eset->heaps[i]);
|
edata_heap_new(&eset->heaps[i]);
|
||||||
}
|
}
|
||||||
bitmap_init(eset->bitmap, &eset_bitmap_info, true);
|
bitmap_init(eset->bitmap, &eset_bitmap_info, true);
|
||||||
extent_list_init(&eset->lru);
|
edata_list_init(&eset->lru);
|
||||||
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
|
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
|
||||||
eset->state = state;
|
eset->state = state;
|
||||||
eset->delay_coalesce = delay_coalesce;
|
eset->delay_coalesce = delay_coalesce;
|
||||||
@ -63,24 +63,24 @@ eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
|
eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) {
|
||||||
malloc_mutex_assert_owner(tsdn, &eset->mtx);
|
malloc_mutex_assert_owner(tsdn, &eset->mtx);
|
||||||
assert(extent_state_get(extent) == eset->state);
|
assert(edata_state_get(edata) == eset->state);
|
||||||
|
|
||||||
size_t size = extent_size_get(extent);
|
size_t size = edata_size_get(edata);
|
||||||
size_t psz = sz_psz_quantize_floor(size);
|
size_t psz = sz_psz_quantize_floor(size);
|
||||||
pszind_t pind = sz_psz2ind(psz);
|
pszind_t pind = sz_psz2ind(psz);
|
||||||
if (extent_heap_empty(&eset->heaps[pind])) {
|
if (edata_heap_empty(&eset->heaps[pind])) {
|
||||||
bitmap_unset(eset->bitmap, &eset_bitmap_info,
|
bitmap_unset(eset->bitmap, &eset_bitmap_info,
|
||||||
(size_t)pind);
|
(size_t)pind);
|
||||||
}
|
}
|
||||||
extent_heap_insert(&eset->heaps[pind], extent);
|
edata_heap_insert(&eset->heaps[pind], edata);
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
eset_stats_add(eset, pind, size);
|
eset_stats_add(eset, pind, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_list_append(&eset->lru, extent);
|
edata_list_append(&eset->lru, edata);
|
||||||
size_t npages = size >> LG_PAGE;
|
size_t npages = size >> LG_PAGE;
|
||||||
/*
|
/*
|
||||||
* All modifications to npages hold the mutex (as asserted above), so we
|
* All modifications to npages hold the mutex (as asserted above), so we
|
||||||
@ -94,24 +94,24 @@ eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
|
eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) {
|
||||||
malloc_mutex_assert_owner(tsdn, &eset->mtx);
|
malloc_mutex_assert_owner(tsdn, &eset->mtx);
|
||||||
assert(extent_state_get(extent) == eset->state);
|
assert(edata_state_get(edata) == eset->state);
|
||||||
|
|
||||||
size_t size = extent_size_get(extent);
|
size_t size = edata_size_get(edata);
|
||||||
size_t psz = sz_psz_quantize_floor(size);
|
size_t psz = sz_psz_quantize_floor(size);
|
||||||
pszind_t pind = sz_psz2ind(psz);
|
pszind_t pind = sz_psz2ind(psz);
|
||||||
extent_heap_remove(&eset->heaps[pind], extent);
|
edata_heap_remove(&eset->heaps[pind], edata);
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
eset_stats_sub(eset, pind, size);
|
eset_stats_sub(eset, pind, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extent_heap_empty(&eset->heaps[pind])) {
|
if (edata_heap_empty(&eset->heaps[pind])) {
|
||||||
bitmap_set(eset->bitmap, &eset_bitmap_info,
|
bitmap_set(eset->bitmap, &eset_bitmap_info,
|
||||||
(size_t)pind);
|
(size_t)pind);
|
||||||
}
|
}
|
||||||
extent_list_remove(&eset->lru, extent);
|
edata_list_remove(&eset->lru, edata);
|
||||||
size_t npages = size >> LG_PAGE;
|
size_t npages = size >> LG_PAGE;
|
||||||
/*
|
/*
|
||||||
* As in eset_insert_locked, we hold eset->mtx and so don't need atomic
|
* As in eset_insert_locked, we hold eset->mtx and so don't need atomic
|
||||||
@ -128,7 +128,7 @@ eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
|
|||||||
* Find an extent with size [min_size, max_size) to satisfy the alignment
|
* Find an extent with size [min_size, max_size) to satisfy the alignment
|
||||||
* requirement. For each size, try only the first extent in the heap.
|
* requirement. For each size, try only the first extent in the heap.
|
||||||
*/
|
*/
|
||||||
static extent_t *
|
static edata_t *
|
||||||
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
|
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
|
||||||
size_t alignment) {
|
size_t alignment) {
|
||||||
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
|
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
|
||||||
@ -139,10 +139,10 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
|
|||||||
(pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
|
(pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
|
||||||
(size_t)i+1)) {
|
(size_t)i+1)) {
|
||||||
assert(i < SC_NPSIZES);
|
assert(i < SC_NPSIZES);
|
||||||
assert(!extent_heap_empty(&eset->heaps[i]));
|
assert(!edata_heap_empty(&eset->heaps[i]));
|
||||||
extent_t *extent = extent_heap_first(&eset->heaps[i]);
|
edata_t *edata = edata_heap_first(&eset->heaps[i]);
|
||||||
uintptr_t base = (uintptr_t)extent_base_get(extent);
|
uintptr_t base = (uintptr_t)edata_base_get(edata);
|
||||||
size_t candidate_size = extent_size_get(extent);
|
size_t candidate_size = edata_size_get(edata);
|
||||||
assert(candidate_size >= min_size);
|
assert(candidate_size >= min_size);
|
||||||
|
|
||||||
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
|
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
|
||||||
@ -154,7 +154,7 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
|
|||||||
|
|
||||||
size_t leadsize = next_align - base;
|
size_t leadsize = next_align - base;
|
||||||
if (candidate_size - leadsize >= min_size) {
|
if (candidate_size - leadsize >= min_size) {
|
||||||
return extent;
|
return edata;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -165,9 +165,9 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
|
|||||||
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
|
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
|
||||||
* large enough.
|
* large enough.
|
||||||
*/
|
*/
|
||||||
static extent_t *
|
static edata_t *
|
||||||
eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
|
eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
|
||||||
extent_t *ret = NULL;
|
edata_t *ret = NULL;
|
||||||
|
|
||||||
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
|
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
|
||||||
|
|
||||||
@ -176,8 +176,8 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
|
|||||||
* No split / merge allowed (Windows w/o retain). Try exact fit
|
* No split / merge allowed (Windows w/o retain). Try exact fit
|
||||||
* only.
|
* only.
|
||||||
*/
|
*/
|
||||||
return extent_heap_empty(&eset->heaps[pind]) ? NULL :
|
return edata_heap_empty(&eset->heaps[pind]) ? NULL :
|
||||||
extent_heap_first(&eset->heaps[pind]);
|
edata_heap_first(&eset->heaps[pind]);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
|
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
|
||||||
@ -185,9 +185,9 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
|
|||||||
i < SC_NPSIZES + 1;
|
i < SC_NPSIZES + 1;
|
||||||
i = (pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
|
i = (pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
|
||||||
(size_t)i+1)) {
|
(size_t)i+1)) {
|
||||||
assert(!extent_heap_empty(&eset->heaps[i]));
|
assert(!edata_heap_empty(&eset->heaps[i]));
|
||||||
extent_t *extent = extent_heap_first(&eset->heaps[i]);
|
edata_t *edata = edata_heap_first(&eset->heaps[i]);
|
||||||
assert(extent_size_get(extent) >= size);
|
assert(edata_size_get(edata) >= size);
|
||||||
/*
|
/*
|
||||||
* In order to reduce fragmentation, avoid reusing and splitting
|
* In order to reduce fragmentation, avoid reusing and splitting
|
||||||
* large eset for much smaller sizes.
|
* large eset for much smaller sizes.
|
||||||
@ -198,8 +198,8 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
|
|||||||
(sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
|
(sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
|
if (ret == NULL || edata_snad_comp(edata, ret) < 0) {
|
||||||
ret = extent;
|
ret = edata;
|
||||||
}
|
}
|
||||||
if (i == SC_NPSIZES) {
|
if (i == SC_NPSIZES) {
|
||||||
break;
|
break;
|
||||||
@ -210,7 +210,7 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_t *
|
edata_t *
|
||||||
eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
|
eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
|
||||||
malloc_mutex_assert_owner(tsdn, &eset->mtx);
|
malloc_mutex_assert_owner(tsdn, &eset->mtx);
|
||||||
|
|
||||||
@ -220,18 +220,18 @@ eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_t *extent = eset_first_fit_locked(tsdn, eset, max_size);
|
edata_t *edata = eset_first_fit_locked(tsdn, eset, max_size);
|
||||||
|
|
||||||
if (alignment > PAGE && extent == NULL) {
|
if (alignment > PAGE && edata == NULL) {
|
||||||
/*
|
/*
|
||||||
* max_size guarantees the alignment requirement but is rather
|
* max_size guarantees the alignment requirement but is rather
|
||||||
* pessimistic. Next we try to satisfy the aligned allocation
|
* pessimistic. Next we try to satisfy the aligned allocation
|
||||||
* with sizes in [esize, max_size).
|
* with sizes in [esize, max_size).
|
||||||
*/
|
*/
|
||||||
extent = eset_fit_alignment(eset, esize, max_size, alignment);
|
edata = eset_fit_alignment(eset, esize, max_size, alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
return extent;
|
return edata;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
835
src/extent2.c
835
src/extent2.c
File diff suppressed because it is too large
Load Diff
@ -109,7 +109,7 @@ extent_dss_max_update(void *new_addr) {
|
|||||||
void *
|
void *
|
||||||
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||||
size_t alignment, bool *zero, bool *commit) {
|
size_t alignment, bool *zero, bool *commit) {
|
||||||
extent_t *gap;
|
edata_t *gap;
|
||||||
|
|
||||||
cassert(have_dss);
|
cassert(have_dss);
|
||||||
assert(size > 0);
|
assert(size > 0);
|
||||||
@ -153,7 +153,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
size_t gap_size_page = (uintptr_t)ret -
|
size_t gap_size_page = (uintptr_t)ret -
|
||||||
(uintptr_t)gap_addr_page;
|
(uintptr_t)gap_addr_page;
|
||||||
if (gap_size_page != 0) {
|
if (gap_size_page != 0) {
|
||||||
extent_init(gap, arena_ind_get(arena),
|
edata_init(gap, arena_ind_get(arena),
|
||||||
gap_addr_page, gap_size_page, false,
|
gap_addr_page, gap_size_page, false,
|
||||||
SC_NSIZES, arena_extent_sn_next(arena),
|
SC_NSIZES, arena_extent_sn_next(arena),
|
||||||
extent_state_active, false, true, true,
|
extent_state_active, false, true, true,
|
||||||
@ -194,17 +194,17 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
*commit = pages_decommit(ret, size);
|
*commit = pages_decommit(ret, size);
|
||||||
}
|
}
|
||||||
if (*zero && *commit) {
|
if (*zero && *commit) {
|
||||||
extent_t extent;
|
edata_t edata;
|
||||||
ehooks_t *ehooks = arena_get_ehooks(
|
ehooks_t *ehooks = arena_get_ehooks(
|
||||||
arena);
|
arena);
|
||||||
|
|
||||||
extent_init(&extent,
|
edata_init(&edata,
|
||||||
arena_ind_get(arena), ret, size,
|
arena_ind_get(arena), ret, size,
|
||||||
size, false, SC_NSIZES,
|
size, false, SC_NSIZES,
|
||||||
extent_state_active, false, true,
|
extent_state_active, false, true,
|
||||||
true, EXTENT_NOT_HEAD);
|
true, EXTENT_NOT_HEAD);
|
||||||
if (extent_purge_forced_wrapper(tsdn,
|
if (extent_purge_forced_wrapper(tsdn,
|
||||||
arena, ehooks, &extent, 0, size)) {
|
arena, ehooks, &edata, 0, size)) {
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,21 +6,21 @@ inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
|
|||||||
size_t *nregs, size_t *size) {
|
size_t *nregs, size_t *size) {
|
||||||
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
|
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
|
||||||
|
|
||||||
const extent_t *extent = iealloc(tsdn, ptr);
|
const edata_t *edata = iealloc(tsdn, ptr);
|
||||||
if (unlikely(extent == NULL)) {
|
if (unlikely(edata == NULL)) {
|
||||||
*nfree = *nregs = *size = 0;
|
*nfree = *nregs = *size = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
*size = extent_size_get(extent);
|
*size = edata_size_get(edata);
|
||||||
if (!extent_slab_get(extent)) {
|
if (!edata_slab_get(edata)) {
|
||||||
*nfree = 0;
|
*nfree = 0;
|
||||||
*nregs = 1;
|
*nregs = 1;
|
||||||
} else {
|
} else {
|
||||||
*nfree = extent_nfree_get(extent);
|
*nfree = edata_nfree_get(edata);
|
||||||
*nregs = bin_infos[extent_szind_get(extent)].nregs;
|
*nregs = bin_infos[edata_szind_get(edata)].nregs;
|
||||||
assert(*nfree <= *nregs);
|
assert(*nfree <= *nregs);
|
||||||
assert(*nfree * extent_usize_get(extent) <= *size);
|
assert(*nfree * edata_usize_get(edata) <= *size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -31,31 +31,31 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
|
|||||||
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
|
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
|
||||||
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
|
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
|
||||||
|
|
||||||
const extent_t *extent = iealloc(tsdn, ptr);
|
const edata_t *edata = iealloc(tsdn, ptr);
|
||||||
if (unlikely(extent == NULL)) {
|
if (unlikely(edata == NULL)) {
|
||||||
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
|
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
|
||||||
*slabcur_addr = NULL;
|
*slabcur_addr = NULL;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
*size = extent_size_get(extent);
|
*size = edata_size_get(edata);
|
||||||
if (!extent_slab_get(extent)) {
|
if (!edata_slab_get(edata)) {
|
||||||
*nfree = *bin_nfree = *bin_nregs = 0;
|
*nfree = *bin_nfree = *bin_nregs = 0;
|
||||||
*nregs = 1;
|
*nregs = 1;
|
||||||
*slabcur_addr = NULL;
|
*slabcur_addr = NULL;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
*nfree = extent_nfree_get(extent);
|
*nfree = edata_nfree_get(edata);
|
||||||
const szind_t szind = extent_szind_get(extent);
|
const szind_t szind = edata_szind_get(edata);
|
||||||
*nregs = bin_infos[szind].nregs;
|
*nregs = bin_infos[szind].nregs;
|
||||||
assert(*nfree <= *nregs);
|
assert(*nfree <= *nregs);
|
||||||
assert(*nfree * extent_usize_get(extent) <= *size);
|
assert(*nfree * edata_usize_get(edata) <= *size);
|
||||||
|
|
||||||
const arena_t *arena = (arena_t *)atomic_load_p(
|
const arena_t *arena = (arena_t *)atomic_load_p(
|
||||||
&arenas[extent_arena_ind_get(extent)], ATOMIC_RELAXED);
|
&arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
|
||||||
assert(arena != NULL);
|
assert(arena != NULL);
|
||||||
const unsigned binshard = extent_binshard_get(extent);
|
const unsigned binshard = edata_binshard_get(edata);
|
||||||
bin_t *bin = &arena->bins[szind].bin_shards[binshard];
|
bin_t *bin = &arena->bins[szind].bin_shards[binshard];
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &bin->lock);
|
malloc_mutex_lock(tsdn, &bin->lock);
|
||||||
@ -66,12 +66,12 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
|
|||||||
} else {
|
} else {
|
||||||
*bin_nfree = *bin_nregs = 0;
|
*bin_nfree = *bin_nregs = 0;
|
||||||
}
|
}
|
||||||
extent_t *slab;
|
edata_t *slab;
|
||||||
if (bin->slabcur != NULL) {
|
if (bin->slabcur != NULL) {
|
||||||
slab = bin->slabcur;
|
slab = bin->slabcur;
|
||||||
} else {
|
} else {
|
||||||
slab = extent_heap_first(&bin->slabs_nonfull);
|
slab = edata_heap_first(&bin->slabs_nonfull);
|
||||||
}
|
}
|
||||||
*slabcur_addr = slab != NULL ? extent_addr_get(slab) : NULL;
|
*slabcur_addr = slab != NULL ? edata_addr_get(slab) : NULL;
|
||||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||||
}
|
}
|
||||||
|
144
src/large.c
144
src/large.c
@ -21,7 +21,7 @@ void *
|
|||||||
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||||
bool zero) {
|
bool zero) {
|
||||||
size_t ausize;
|
size_t ausize;
|
||||||
extent_t *extent;
|
edata_t *edata;
|
||||||
bool is_zeroed;
|
bool is_zeroed;
|
||||||
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
|
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
|
||||||
|
|
||||||
@ -44,28 +44,28 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
if (likely(!tsdn_null(tsdn))) {
|
if (likely(!tsdn_null(tsdn))) {
|
||||||
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
|
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
|
||||||
}
|
}
|
||||||
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
|
if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
|
||||||
arena, usize, alignment, &is_zeroed)) == NULL) {
|
arena, usize, alignment, &is_zeroed)) == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* See comments in arena_bin_slabs_full_insert(). */
|
/* See comments in arena_bin_slabs_full_insert(). */
|
||||||
if (!arena_is_auto(arena)) {
|
if (!arena_is_auto(arena)) {
|
||||||
/* Insert extent into large. */
|
/* Insert edata into large. */
|
||||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||||
extent_list_append(&arena->large, extent);
|
edata_list_append(&arena->large, edata);
|
||||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (zero) {
|
if (zero) {
|
||||||
assert(is_zeroed);
|
assert(is_zeroed);
|
||||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||||
memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
|
memset(edata_addr_get(edata), JEMALLOC_ALLOC_JUNK,
|
||||||
extent_usize_get(extent));
|
edata_usize_get(edata));
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_decay_tick(tsdn, arena);
|
arena_decay_tick(tsdn, arena);
|
||||||
return extent_addr_get(extent);
|
return edata_addr_get(edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -90,11 +90,11 @@ large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
|
|||||||
large_dalloc_maybe_junk_impl;
|
large_dalloc_maybe_junk_impl;
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
|
large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
|
||||||
arena_t *arena = arena_get_from_extent(extent);
|
arena_t *arena = arena_get_from_edata(edata);
|
||||||
size_t oldusize = extent_usize_get(extent);
|
size_t oldusize = edata_usize_get(edata);
|
||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
|
size_t diff = edata_size_get(edata) - (usize + sz_large_pad);
|
||||||
|
|
||||||
assert(oldusize > usize);
|
assert(oldusize > usize);
|
||||||
|
|
||||||
@ -104,31 +104,31 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
|
|||||||
|
|
||||||
/* Split excess pages. */
|
/* Split excess pages. */
|
||||||
if (diff != 0) {
|
if (diff != 0) {
|
||||||
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
edata_t *trail = extent_split_wrapper(tsdn, arena,
|
||||||
ehooks, extent, usize + sz_large_pad, sz_size2index(usize),
|
ehooks, edata, usize + sz_large_pad, sz_size2index(usize),
|
||||||
false, diff, SC_NSIZES, false);
|
false, diff, SC_NSIZES, false);
|
||||||
if (trail == NULL) {
|
if (trail == NULL) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
if (config_fill && unlikely(opt_junk_free)) {
|
||||||
large_dalloc_maybe_junk(extent_addr_get(trail),
|
large_dalloc_maybe_junk(edata_addr_get(trail),
|
||||||
extent_size_get(trail));
|
edata_size_get(trail));
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_extents_dirty_dalloc(tsdn, arena, ehooks, trail);
|
arena_extents_dirty_dalloc(tsdn, arena, ehooks, trail);
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
|
arena_extent_ralloc_large_shrink(tsdn, arena, edata, oldusize);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
|
||||||
bool zero) {
|
bool zero) {
|
||||||
arena_t *arena = arena_get_from_extent(extent);
|
arena_t *arena = arena_get_from_edata(edata);
|
||||||
size_t oldusize = extent_usize_get(extent);
|
size_t oldusize = edata_usize_get(edata);
|
||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
size_t trailsize = usize - oldusize;
|
size_t trailsize = usize - oldusize;
|
||||||
|
|
||||||
@ -147,20 +147,20 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
|||||||
*/
|
*/
|
||||||
bool is_zeroed_trail = zero;
|
bool is_zeroed_trail = zero;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
extent_t *trail;
|
edata_t *trail;
|
||||||
bool new_mapping;
|
bool new_mapping;
|
||||||
if ((trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
|
if ((trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
|
||||||
extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES,
|
edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES,
|
||||||
&is_zeroed_trail, &commit)) != NULL
|
&is_zeroed_trail, &commit)) != NULL
|
||||||
|| (trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
|
|| (trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
|
||||||
extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES,
|
edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES,
|
||||||
&is_zeroed_trail, &commit)) != NULL) {
|
&is_zeroed_trail, &commit)) != NULL) {
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
new_mapping = false;
|
new_mapping = false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if ((trail = extent_alloc_wrapper(tsdn, arena, ehooks,
|
if ((trail = extent_alloc_wrapper(tsdn, arena, ehooks,
|
||||||
extent_past_get(extent), trailsize, 0, CACHELINE, false,
|
edata_past_get(edata), trailsize, 0, CACHELINE, false,
|
||||||
SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
|
SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -169,16 +169,16 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extent_merge_wrapper(tsdn, arena, ehooks, extent, trail)) {
|
if (extent_merge_wrapper(tsdn, arena, ehooks, edata, trail)) {
|
||||||
extent_dalloc_wrapper(tsdn, arena, ehooks, trail);
|
extent_dalloc_wrapper(tsdn, arena, ehooks, trail);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
szind_t szind = sz_size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
extent_szind_set(extent, szind);
|
edata_szind_set(edata, szind);
|
||||||
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)extent_addr_get(extent), szind, false);
|
(uintptr_t)edata_addr_get(edata), szind, false);
|
||||||
|
|
||||||
if (config_stats && new_mapping) {
|
if (config_stats && new_mapping) {
|
||||||
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
|
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
|
||||||
@ -194,7 +194,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
|||||||
* of CACHELINE in [0 .. PAGE).
|
* of CACHELINE in [0 .. PAGE).
|
||||||
*/
|
*/
|
||||||
void *zbase = (void *)
|
void *zbase = (void *)
|
||||||
((uintptr_t)extent_addr_get(extent) + oldusize);
|
((uintptr_t)edata_addr_get(edata) + oldusize);
|
||||||
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
|
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
|
||||||
PAGE));
|
PAGE));
|
||||||
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
|
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
|
||||||
@ -203,19 +203,19 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
|||||||
}
|
}
|
||||||
assert(is_zeroed_trail);
|
assert(is_zeroed_trail);
|
||||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||||
memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
|
memset((void *)((uintptr_t)edata_addr_get(edata) + oldusize),
|
||||||
JEMALLOC_ALLOC_JUNK, usize - oldusize);
|
JEMALLOC_ALLOC_JUNK, usize - oldusize);
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
|
arena_extent_ralloc_large_expand(tsdn, arena, edata, oldusize);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
|
||||||
size_t usize_max, bool zero) {
|
size_t usize_max, bool zero) {
|
||||||
size_t oldusize = extent_usize_get(extent);
|
size_t oldusize = edata_usize_get(edata);
|
||||||
|
|
||||||
/* The following should have been caught by callers. */
|
/* The following should have been caught by callers. */
|
||||||
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
|
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
|
||||||
@ -225,16 +225,15 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
|||||||
|
|
||||||
if (usize_max > oldusize) {
|
if (usize_max > oldusize) {
|
||||||
/* Attempt to expand the allocation in-place. */
|
/* Attempt to expand the allocation in-place. */
|
||||||
if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
|
if (!large_ralloc_no_move_expand(tsdn, edata, usize_max,
|
||||||
zero)) {
|
zero)) {
|
||||||
arena_decay_tick(tsdn, arena_get_from_extent(extent));
|
arena_decay_tick(tsdn, arena_get_from_edata(edata));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
/* Try again, this time with usize_min. */
|
/* Try again, this time with usize_min. */
|
||||||
if (usize_min < usize_max && usize_min > oldusize &&
|
if (usize_min < usize_max && usize_min > oldusize &&
|
||||||
large_ralloc_no_move_expand(tsdn, extent, usize_min,
|
large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) {
|
||||||
zero)) {
|
arena_decay_tick(tsdn, arena_get_from_edata(edata));
|
||||||
arena_decay_tick(tsdn, arena_get_from_extent(extent));
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -244,14 +243,14 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
|||||||
* the new size.
|
* the new size.
|
||||||
*/
|
*/
|
||||||
if (oldusize >= usize_min && oldusize <= usize_max) {
|
if (oldusize >= usize_min && oldusize <= usize_max) {
|
||||||
arena_decay_tick(tsdn, arena_get_from_extent(extent));
|
arena_decay_tick(tsdn, arena_get_from_edata(edata));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Attempt to shrink the allocation in-place. */
|
/* Attempt to shrink the allocation in-place. */
|
||||||
if (oldusize > usize_max) {
|
if (oldusize > usize_max) {
|
||||||
if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
|
if (!large_ralloc_no_move_shrink(tsdn, edata, usize_max)) {
|
||||||
arena_decay_tick(tsdn, arena_get_from_extent(extent));
|
arena_decay_tick(tsdn, arena_get_from_edata(edata));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -271,9 +270,9 @@ void *
|
|||||||
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
||||||
size_t alignment, bool zero, tcache_t *tcache,
|
size_t alignment, bool zero, tcache_t *tcache,
|
||||||
hook_ralloc_args_t *hook_args) {
|
hook_ralloc_args_t *hook_args) {
|
||||||
extent_t *extent = iealloc(tsdn, ptr);
|
edata_t *edata = iealloc(tsdn, ptr);
|
||||||
|
|
||||||
size_t oldusize = extent_usize_get(extent);
|
size_t oldusize = edata_usize_get(edata);
|
||||||
/* The following should have been caught by callers. */
|
/* The following should have been caught by callers. */
|
||||||
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
|
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
|
||||||
/* Both allocation sizes must be large to avoid a move. */
|
/* Both allocation sizes must be large to avoid a move. */
|
||||||
@ -281,11 +280,11 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
|||||||
&& usize >= SC_LARGE_MINCLASS);
|
&& usize >= SC_LARGE_MINCLASS);
|
||||||
|
|
||||||
/* Try to avoid moving the allocation. */
|
/* Try to avoid moving the allocation. */
|
||||||
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
|
if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) {
|
||||||
hook_invoke_expand(hook_args->is_realloc
|
hook_invoke_expand(hook_args->is_realloc
|
||||||
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
|
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
|
||||||
usize, (uintptr_t)ptr, hook_args->args);
|
usize, (uintptr_t)ptr, hook_args->args);
|
||||||
return extent_addr_get(extent);
|
return edata_addr_get(edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -306,8 +305,8 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
|||||||
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
|
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
|
||||||
|
|
||||||
size_t copysize = (usize < oldusize) ? usize : oldusize;
|
size_t copysize = (usize < oldusize) ? usize : oldusize;
|
||||||
memcpy(ret, extent_addr_get(extent), copysize);
|
memcpy(ret, edata_addr_get(edata), copysize);
|
||||||
isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
|
isdalloct(tsdn, edata_addr_get(edata), oldusize, tcache, NULL, true);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -316,76 +315,75 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
|||||||
* whether the arena's large_mtx is currently held.
|
* whether the arena's large_mtx is currently held.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
||||||
bool junked_locked) {
|
bool junked_locked) {
|
||||||
if (!junked_locked) {
|
if (!junked_locked) {
|
||||||
/* See comments in arena_bin_slabs_full_insert(). */
|
/* See comments in arena_bin_slabs_full_insert(). */
|
||||||
if (!arena_is_auto(arena)) {
|
if (!arena_is_auto(arena)) {
|
||||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||||
extent_list_remove(&arena->large, extent);
|
edata_list_remove(&arena->large, edata);
|
||||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||||
}
|
}
|
||||||
large_dalloc_maybe_junk(extent_addr_get(extent),
|
large_dalloc_maybe_junk(edata_addr_get(edata),
|
||||||
extent_usize_get(extent));
|
edata_usize_get(edata));
|
||||||
} else {
|
} else {
|
||||||
/* Only hold the large_mtx if necessary. */
|
/* Only hold the large_mtx if necessary. */
|
||||||
if (!arena_is_auto(arena)) {
|
if (!arena_is_auto(arena)) {
|
||||||
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
|
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
|
||||||
extent_list_remove(&arena->large, extent);
|
edata_list_remove(&arena->large, edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
arena_extent_dalloc_large_prep(tsdn, arena, extent);
|
arena_extent_dalloc_large_prep(tsdn, arena, edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
|
||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
arena_extents_dirty_dalloc(tsdn, arena, ehooks, extent);
|
arena_extents_dirty_dalloc(tsdn, arena, ehooks, edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
|
large_dalloc_prep_junked_locked(tsdn_t *tsdn, edata_t *edata) {
|
||||||
large_dalloc_prep_impl(tsdn, arena_get_from_extent(extent), extent,
|
large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true);
|
||||||
true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
|
large_dalloc_finish(tsdn_t *tsdn, edata_t *edata) {
|
||||||
large_dalloc_finish_impl(tsdn, arena_get_from_extent(extent), extent);
|
large_dalloc_finish_impl(tsdn, arena_get_from_edata(edata), edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
large_dalloc(tsdn_t *tsdn, extent_t *extent) {
|
large_dalloc(tsdn_t *tsdn, edata_t *edata) {
|
||||||
arena_t *arena = arena_get_from_extent(extent);
|
arena_t *arena = arena_get_from_edata(edata);
|
||||||
large_dalloc_prep_impl(tsdn, arena, extent, false);
|
large_dalloc_prep_impl(tsdn, arena, edata, false);
|
||||||
large_dalloc_finish_impl(tsdn, arena, extent);
|
large_dalloc_finish_impl(tsdn, arena, edata);
|
||||||
arena_decay_tick(tsdn, arena);
|
arena_decay_tick(tsdn, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
large_salloc(tsdn_t *tsdn, const extent_t *extent) {
|
large_salloc(tsdn_t *tsdn, const edata_t *edata) {
|
||||||
return extent_usize_get(extent);
|
return edata_usize_get(edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
large_prof_info_get(const extent_t *extent, prof_info_t *prof_info) {
|
large_prof_info_get(const edata_t *edata, prof_info_t *prof_info) {
|
||||||
extent_prof_info_get(extent, prof_info);
|
edata_prof_info_get(edata, prof_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
large_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
large_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
|
||||||
extent_prof_tctx_set(extent, tctx);
|
edata_prof_tctx_set(edata, tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
large_prof_tctx_reset(extent_t *extent) {
|
large_prof_tctx_reset(edata_t *edata) {
|
||||||
large_prof_tctx_set(extent, (prof_tctx_t *)(uintptr_t)1U);
|
large_prof_tctx_set(edata, (prof_tctx_t *)(uintptr_t)1U);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
large_prof_info_set(extent_t *extent, prof_tctx_t *tctx) {
|
large_prof_info_set(edata_t *edata, prof_tctx_t *tctx) {
|
||||||
large_prof_tctx_set(extent, tctx);
|
large_prof_tctx_set(edata, tctx);
|
||||||
nstime_t t;
|
nstime_t t;
|
||||||
nstime_init_update(&t);
|
nstime_init_update(&t);
|
||||||
extent_prof_alloc_time_set(extent, &t);
|
edata_prof_alloc_time_set(edata, &t);
|
||||||
}
|
}
|
||||||
|
64
src/tcache.c
64
src/tcache.c
@ -114,8 +114,8 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
|||||||
|
|
||||||
/* Enabled with --enable-extra-size-check. */
|
/* Enabled with --enable-extra-size-check. */
|
||||||
static void
|
static void
|
||||||
tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
|
tbin_edatas_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
|
||||||
size_t nflush, extent_t **extents){
|
size_t nflush, edata_t **edatas){
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
@ -129,9 +129,9 @@ tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
|
|||||||
size_t sz_sum = binind * nflush;
|
size_t sz_sum = binind * nflush;
|
||||||
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
|
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
|
||||||
for (unsigned i = 0 ; i < nflush; i++) {
|
for (unsigned i = 0 ; i < nflush; i++) {
|
||||||
rtree_extent_szind_read(tsdn, &extents_rtree,
|
rtree_edata_szind_read(tsdn, &extents_rtree,
|
||||||
rtree_ctx, (uintptr_t)*(bottom_item - i), true,
|
rtree_ctx, (uintptr_t)*(bottom_item - i), true,
|
||||||
&extents[i], &szind);
|
&edatas[i], &szind);
|
||||||
sz_sum -= szind;
|
sz_sum -= szind;
|
||||||
}
|
}
|
||||||
if (sz_sum != 0) {
|
if (sz_sum != 0) {
|
||||||
@ -154,26 +154,26 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
|
|||||||
arena_t *arena = tcache->arena;
|
arena_t *arena = tcache->arena;
|
||||||
assert(arena != NULL);
|
assert(arena != NULL);
|
||||||
unsigned nflush = ncached - rem;
|
unsigned nflush = ncached - rem;
|
||||||
VARIABLE_ARRAY(extent_t *, item_extent, nflush);
|
VARIABLE_ARRAY(edata_t *, item_edata, nflush);
|
||||||
|
|
||||||
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
|
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
|
||||||
/* Look up extent once per item. */
|
/* Look up edata once per item. */
|
||||||
if (config_opt_safety_checks) {
|
if (config_opt_safety_checks) {
|
||||||
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
|
tbin_edatas_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
|
||||||
nflush, item_extent);
|
nflush, item_edata);
|
||||||
} else {
|
} else {
|
||||||
for (unsigned i = 0 ; i < nflush; i++) {
|
for (unsigned i = 0 ; i < nflush; i++) {
|
||||||
item_extent[i] = iealloc(tsd_tsdn(tsd),
|
item_edata[i] = iealloc(tsd_tsdn(tsd),
|
||||||
*(bottom_item - i));
|
*(bottom_item - i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
while (nflush > 0) {
|
while (nflush > 0) {
|
||||||
/* Lock the arena bin associated with the first object. */
|
/* Lock the arena bin associated with the first object. */
|
||||||
extent_t *extent = item_extent[0];
|
edata_t *edata = item_edata[0];
|
||||||
unsigned bin_arena_ind = extent_arena_ind_get(extent);
|
unsigned bin_arena_ind = edata_arena_ind_get(edata);
|
||||||
arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
|
arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
|
||||||
false);
|
false);
|
||||||
unsigned binshard = extent_binshard_get(extent);
|
unsigned binshard = edata_binshard_get(edata);
|
||||||
assert(binshard < bin_infos[binind].n_shards);
|
assert(binshard < bin_infos[binind].n_shards);
|
||||||
bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
|
bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
|
||||||
|
|
||||||
@ -187,13 +187,13 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
|
|||||||
unsigned ndeferred = 0;
|
unsigned ndeferred = 0;
|
||||||
for (unsigned i = 0; i < nflush; i++) {
|
for (unsigned i = 0; i < nflush; i++) {
|
||||||
void *ptr = *(bottom_item - i);
|
void *ptr = *(bottom_item - i);
|
||||||
extent = item_extent[i];
|
edata = item_edata[i];
|
||||||
assert(ptr != NULL && extent != NULL);
|
assert(ptr != NULL && edata != NULL);
|
||||||
|
|
||||||
if (extent_arena_ind_get(extent) == bin_arena_ind
|
if (edata_arena_ind_get(edata) == bin_arena_ind
|
||||||
&& extent_binshard_get(extent) == binshard) {
|
&& edata_binshard_get(edata) == binshard) {
|
||||||
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
|
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
|
||||||
bin_arena, bin, binind, extent, ptr);
|
bin_arena, bin, binind, edata, ptr);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* This object was allocated via a different
|
* This object was allocated via a different
|
||||||
@ -202,7 +202,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
|
|||||||
* handled in a future pass.
|
* handled in a future pass.
|
||||||
*/
|
*/
|
||||||
*(bottom_item - ndeferred) = ptr;
|
*(bottom_item - ndeferred) = ptr;
|
||||||
item_extent[ndeferred] = extent;
|
item_edata[ndeferred] = edata;
|
||||||
ndeferred++;
|
ndeferred++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -244,22 +244,22 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
|
|||||||
arena_t *tcache_arena = tcache->arena;
|
arena_t *tcache_arena = tcache->arena;
|
||||||
assert(tcache_arena != NULL);
|
assert(tcache_arena != NULL);
|
||||||
unsigned nflush = ncached - rem;
|
unsigned nflush = ncached - rem;
|
||||||
VARIABLE_ARRAY(extent_t *, item_extent, nflush);
|
VARIABLE_ARRAY(edata_t *, item_edata, nflush);
|
||||||
|
|
||||||
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
|
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
|
||||||
#ifndef JEMALLOC_EXTRA_SIZE_CHECK
|
#ifndef JEMALLOC_EXTRA_SIZE_CHECK
|
||||||
/* Look up extent once per item. */
|
/* Look up edata once per item. */
|
||||||
for (unsigned i = 0 ; i < nflush; i++) {
|
for (unsigned i = 0 ; i < nflush; i++) {
|
||||||
item_extent[i] = iealloc(tsd_tsdn(tsd), *(bottom_item - i));
|
item_edata[i] = iealloc(tsd_tsdn(tsd), *(bottom_item - i));
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
|
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
|
||||||
item_extent);
|
item_edata);
|
||||||
#endif
|
#endif
|
||||||
while (nflush > 0) {
|
while (nflush > 0) {
|
||||||
/* Lock the arena associated with the first object. */
|
/* Lock the arena associated with the first object. */
|
||||||
extent_t *extent = item_extent[0];
|
edata_t *edata = item_edata[0];
|
||||||
unsigned locked_arena_ind = extent_arena_ind_get(extent);
|
unsigned locked_arena_ind = edata_arena_ind_get(edata);
|
||||||
arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
|
arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
|
||||||
locked_arena_ind, false);
|
locked_arena_ind, false);
|
||||||
|
|
||||||
@ -270,10 +270,10 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
|
|||||||
for (unsigned i = 0; i < nflush; i++) {
|
for (unsigned i = 0; i < nflush; i++) {
|
||||||
void *ptr = *(bottom_item - i);
|
void *ptr = *(bottom_item - i);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
extent = item_extent[i];
|
edata = item_edata[i];
|
||||||
if (extent_arena_ind_get(extent) == locked_arena_ind) {
|
if (edata_arena_ind_get(edata) == locked_arena_ind) {
|
||||||
large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
|
large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
|
||||||
extent);
|
edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ((config_prof || config_stats) &&
|
if ((config_prof || config_stats) &&
|
||||||
@ -293,11 +293,11 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
|
|||||||
unsigned ndeferred = 0;
|
unsigned ndeferred = 0;
|
||||||
for (unsigned i = 0; i < nflush; i++) {
|
for (unsigned i = 0; i < nflush; i++) {
|
||||||
void *ptr = *(bottom_item - i);
|
void *ptr = *(bottom_item - i);
|
||||||
extent = item_extent[i];
|
edata = item_edata[i];
|
||||||
assert(ptr != NULL && extent != NULL);
|
assert(ptr != NULL && edata != NULL);
|
||||||
|
|
||||||
if (extent_arena_ind_get(extent) == locked_arena_ind) {
|
if (edata_arena_ind_get(edata) == locked_arena_ind) {
|
||||||
large_dalloc_finish(tsd_tsdn(tsd), extent);
|
large_dalloc_finish(tsd_tsdn(tsd), edata);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* This object was allocated via a different
|
* This object was allocated via a different
|
||||||
@ -306,7 +306,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
|
|||||||
* in a future pass.
|
* in a future pass.
|
||||||
*/
|
*/
|
||||||
*(bottom_item - ndeferred) = ptr;
|
*(bottom_item - ndeferred) = ptr;
|
||||||
item_extent[ndeferred] = extent;
|
item_edata[ndeferred] = edata;
|
||||||
ndeferred++;
|
ndeferred++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -63,17 +63,17 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
rtree_ctx_t rtree_ctx_fallback;
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
extent_t *extent;
|
edata_t *edata;
|
||||||
szind_t szind;
|
szind_t szind;
|
||||||
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
if (rtree_edata_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, false, &extent, &szind)) {
|
(uintptr_t)ptr, false, &edata, &szind)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extent == NULL) {
|
if (edata == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (extent_state_get(extent) != extent_state_active) {
|
if (edata_state_get(edata) != extent_state_active) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -168,14 +168,14 @@ TEST_BEGIN(test_base_hooks_not_null) {
|
|||||||
* that the first block's remaining space is considered for subsequent
|
* that the first block's remaining space is considered for subsequent
|
||||||
* allocation.
|
* allocation.
|
||||||
*/
|
*/
|
||||||
assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM,
|
assert_zu_ge(edata_bsize_get(&base->blocks->edata), QUANTUM,
|
||||||
"Remainder insufficient for test");
|
"Remainder insufficient for test");
|
||||||
/* Use up all but one quantum of block. */
|
/* Use up all but one quantum of block. */
|
||||||
while (extent_bsize_get(&base->blocks->extent) > QUANTUM) {
|
while (edata_bsize_get(&base->blocks->edata) > QUANTUM) {
|
||||||
p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
|
p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
|
||||||
assert_ptr_not_null(p, "Unexpected base_alloc() failure");
|
assert_ptr_not_null(p, "Unexpected base_alloc() failure");
|
||||||
}
|
}
|
||||||
r_exp = extent_addr_get(&base->blocks->extent);
|
r_exp = edata_addr_get(&base->blocks->edata);
|
||||||
assert_zu_eq(base->extent_sn_next, 1, "One extant block expected");
|
assert_zu_eq(base->extent_sn_next, 1, "One extant block expected");
|
||||||
q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
|
q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
|
||||||
assert_ptr_not_null(q, "Unexpected base_alloc() failure");
|
assert_ptr_not_null(q, "Unexpected base_alloc() failure");
|
||||||
|
@ -53,7 +53,7 @@ TEST_END
|
|||||||
static void *
|
static void *
|
||||||
thd_start(void *varg) {
|
thd_start(void *varg) {
|
||||||
void *ptr, *ptr2;
|
void *ptr, *ptr2;
|
||||||
extent_t *extent;
|
edata_t *edata;
|
||||||
unsigned shard1, shard2;
|
unsigned shard1, shard2;
|
||||||
|
|
||||||
tsdn_t *tsdn = tsdn_fetch();
|
tsdn_t *tsdn = tsdn_fetch();
|
||||||
@ -62,13 +62,13 @@ thd_start(void *varg) {
|
|||||||
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
|
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
|
||||||
ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
|
ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
|
||||||
|
|
||||||
extent = iealloc(tsdn, ptr);
|
edata = iealloc(tsdn, ptr);
|
||||||
shard1 = extent_binshard_get(extent);
|
shard1 = edata_binshard_get(edata);
|
||||||
dallocx(ptr, 0);
|
dallocx(ptr, 0);
|
||||||
assert_u_lt(shard1, 16, "Unexpected bin shard used");
|
assert_u_lt(shard1, 16, "Unexpected bin shard used");
|
||||||
|
|
||||||
extent = iealloc(tsdn, ptr2);
|
edata = iealloc(tsdn, ptr2);
|
||||||
shard2 = extent_binshard_get(extent);
|
shard2 = edata_binshard_get(edata);
|
||||||
dallocx(ptr2, 0);
|
dallocx(ptr2, 0);
|
||||||
assert_u_lt(shard2, 4, "Unexpected bin shard used");
|
assert_u_lt(shard2, 4, "Unexpected bin shard used");
|
||||||
|
|
||||||
|
@ -75,8 +75,8 @@ TEST_BEGIN(test_rtree_read_empty) {
|
|||||||
rtree_ctx_t rtree_ctx;
|
rtree_ctx_t rtree_ctx;
|
||||||
rtree_ctx_data_init(&rtree_ctx);
|
rtree_ctx_data_init(&rtree_ctx);
|
||||||
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
|
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
|
||||||
assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE,
|
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE,
|
||||||
false), "rtree_extent_read() should return NULL for empty tree");
|
false), "rtree_edata_read() should return NULL for empty tree");
|
||||||
rtree_delete(tsdn, rtree);
|
rtree_delete(tsdn, rtree);
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
@ -86,11 +86,11 @@ TEST_END
|
|||||||
#undef SEED
|
#undef SEED
|
||||||
|
|
||||||
TEST_BEGIN(test_rtree_extrema) {
|
TEST_BEGIN(test_rtree_extrema) {
|
||||||
extent_t extent_a, extent_b;
|
edata_t edata_a, edata_b;
|
||||||
extent_init(&extent_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
|
edata_init(&edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
|
||||||
false, sz_size2index(SC_LARGE_MINCLASS), 0,
|
false, sz_size2index(SC_LARGE_MINCLASS), 0,
|
||||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
||||||
extent_init(&extent_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
edata_init(&edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
||||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
||||||
|
|
||||||
tsdn_t *tsdn = tsdn_fetch();
|
tsdn_t *tsdn = tsdn_fetch();
|
||||||
@ -100,21 +100,21 @@ TEST_BEGIN(test_rtree_extrema) {
|
|||||||
rtree_ctx_data_init(&rtree_ctx);
|
rtree_ctx_data_init(&rtree_ctx);
|
||||||
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
|
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
|
||||||
|
|
||||||
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a,
|
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &edata_a,
|
||||||
extent_szind_get(&extent_a), extent_slab_get(&extent_a)),
|
edata_szind_get(&edata_a), edata_slab_get(&edata_a)),
|
||||||
"Unexpected rtree_write() failure");
|
"Unexpected rtree_write() failure");
|
||||||
rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE,
|
rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE,
|
||||||
extent_szind_get(&extent_a), extent_slab_get(&extent_a));
|
edata_szind_get(&edata_a), edata_slab_get(&edata_a));
|
||||||
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, true),
|
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE, true),
|
||||||
&extent_a,
|
&edata_a,
|
||||||
"rtree_extent_read() should return previously set value");
|
"rtree_edata_read() should return previously set value");
|
||||||
|
|
||||||
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
|
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
|
||||||
&extent_b, extent_szind_get_maybe_invalid(&extent_b),
|
&edata_b, edata_szind_get_maybe_invalid(&edata_b),
|
||||||
extent_slab_get(&extent_b)), "Unexpected rtree_write() failure");
|
edata_slab_get(&edata_b)), "Unexpected rtree_write() failure");
|
||||||
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
|
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
|
||||||
~((uintptr_t)0), true), &extent_b,
|
~((uintptr_t)0), true), &edata_b,
|
||||||
"rtree_extent_read() should return previously set value");
|
"rtree_edata_read() should return previously set value");
|
||||||
|
|
||||||
rtree_delete(tsdn, rtree);
|
rtree_delete(tsdn, rtree);
|
||||||
}
|
}
|
||||||
@ -126,8 +126,8 @@ TEST_BEGIN(test_rtree_bits) {
|
|||||||
uintptr_t keys[] = {PAGE, PAGE + 1,
|
uintptr_t keys[] = {PAGE, PAGE + 1,
|
||||||
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
|
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
|
||||||
|
|
||||||
extent_t extent;
|
edata_t edata;
|
||||||
extent_init(&extent, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
||||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
||||||
|
|
||||||
rtree_t *rtree = &test_rtree;
|
rtree_t *rtree = &test_rtree;
|
||||||
@ -137,17 +137,17 @@ TEST_BEGIN(test_rtree_bits) {
|
|||||||
|
|
||||||
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
|
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
|
||||||
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
|
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
|
||||||
&extent, SC_NSIZES, false),
|
&edata, SC_NSIZES, false),
|
||||||
"Unexpected rtree_write() failure");
|
"Unexpected rtree_write() failure");
|
||||||
for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
|
for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
|
||||||
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
|
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
|
||||||
keys[j], true), &extent,
|
keys[j], true), &edata,
|
||||||
"rtree_extent_read() should return previously set "
|
"rtree_edata_read() should return previously set "
|
||||||
"value and ignore insignificant key bits; i=%u, "
|
"value and ignore insignificant key bits; i=%u, "
|
||||||
"j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
|
"j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
|
||||||
j, keys[i], keys[j]);
|
j, keys[i], keys[j]);
|
||||||
}
|
}
|
||||||
assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
|
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
|
||||||
(((uintptr_t)2) << LG_PAGE), false),
|
(((uintptr_t)2) << LG_PAGE), false),
|
||||||
"Only leftmost rtree leaf should be set; i=%u", i);
|
"Only leftmost rtree leaf should be set; i=%u", i);
|
||||||
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
|
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
|
||||||
@ -167,8 +167,8 @@ TEST_BEGIN(test_rtree_random) {
|
|||||||
rtree_ctx_t rtree_ctx;
|
rtree_ctx_t rtree_ctx;
|
||||||
rtree_ctx_data_init(&rtree_ctx);
|
rtree_ctx_data_init(&rtree_ctx);
|
||||||
|
|
||||||
extent_t extent;
|
edata_t edata;
|
||||||
extent_init(&extent, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
||||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
||||||
|
|
||||||
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
|
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
|
||||||
@ -179,29 +179,29 @@ TEST_BEGIN(test_rtree_random) {
|
|||||||
&rtree_ctx, keys[i], false, true);
|
&rtree_ctx, keys[i], false, true);
|
||||||
assert_ptr_not_null(elm,
|
assert_ptr_not_null(elm,
|
||||||
"Unexpected rtree_leaf_elm_lookup() failure");
|
"Unexpected rtree_leaf_elm_lookup() failure");
|
||||||
rtree_leaf_elm_write(tsdn, rtree, elm, &extent, SC_NSIZES,
|
rtree_leaf_elm_write(tsdn, rtree, elm, &edata, SC_NSIZES,
|
||||||
false);
|
false);
|
||||||
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
|
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
|
||||||
keys[i], true), &extent,
|
keys[i], true), &edata,
|
||||||
"rtree_extent_read() should return previously set value");
|
"rtree_edata_read() should return previously set value");
|
||||||
}
|
}
|
||||||
for (unsigned i = 0; i < NSET; i++) {
|
for (unsigned i = 0; i < NSET; i++) {
|
||||||
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
|
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
|
||||||
keys[i], true), &extent,
|
keys[i], true), &edata,
|
||||||
"rtree_extent_read() should return previously set value, "
|
"rtree_edata_read() should return previously set value, "
|
||||||
"i=%u", i);
|
"i=%u", i);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (unsigned i = 0; i < NSET; i++) {
|
for (unsigned i = 0; i < NSET; i++) {
|
||||||
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
|
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
|
||||||
assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
|
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
|
||||||
keys[i], true),
|
keys[i], true),
|
||||||
"rtree_extent_read() should return previously set value");
|
"rtree_edata_read() should return previously set value");
|
||||||
}
|
}
|
||||||
for (unsigned i = 0; i < NSET; i++) {
|
for (unsigned i = 0; i < NSET; i++) {
|
||||||
assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
|
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
|
||||||
keys[i], true),
|
keys[i], true),
|
||||||
"rtree_extent_read() should return previously set value");
|
"rtree_edata_read() should return previously set value");
|
||||||
}
|
}
|
||||||
|
|
||||||
rtree_delete(tsdn, rtree);
|
rtree_delete(tsdn, rtree);
|
||||||
|
@ -7,24 +7,24 @@ TEST_BEGIN(test_arena_slab_regind) {
|
|||||||
|
|
||||||
for (binind = 0; binind < SC_NBINS; binind++) {
|
for (binind = 0; binind < SC_NBINS; binind++) {
|
||||||
size_t regind;
|
size_t regind;
|
||||||
extent_t slab;
|
edata_t slab;
|
||||||
const bin_info_t *bin_info = &bin_infos[binind];
|
const bin_info_t *bin_info = &bin_infos[binind];
|
||||||
extent_init(&slab, INVALID_ARENA_IND,
|
edata_init(&slab, INVALID_ARENA_IND,
|
||||||
mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
|
mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
|
||||||
bin_info->slab_size, true,
|
bin_info->slab_size, true,
|
||||||
binind, 0, extent_state_active, false, true, true,
|
binind, 0, extent_state_active, false, true, true,
|
||||||
EXTENT_NOT_HEAD);
|
EXTENT_NOT_HEAD);
|
||||||
assert_ptr_not_null(extent_addr_get(&slab),
|
assert_ptr_not_null(edata_addr_get(&slab),
|
||||||
"Unexpected malloc() failure");
|
"Unexpected malloc() failure");
|
||||||
for (regind = 0; regind < bin_info->nregs; regind++) {
|
for (regind = 0; regind < bin_info->nregs; regind++) {
|
||||||
void *reg = (void *)((uintptr_t)extent_addr_get(&slab) +
|
void *reg = (void *)((uintptr_t)edata_addr_get(&slab) +
|
||||||
(bin_info->reg_size * regind));
|
(bin_info->reg_size * regind));
|
||||||
assert_zu_eq(arena_slab_regind(&slab, binind, reg),
|
assert_zu_eq(arena_slab_regind(&slab, binind, reg),
|
||||||
regind,
|
regind,
|
||||||
"Incorrect region index computed for size %zu",
|
"Incorrect region index computed for size %zu",
|
||||||
bin_info->reg_size);
|
bin_info->reg_size);
|
||||||
}
|
}
|
||||||
free(extent_addr_get(&slab));
|
free(edata_addr_get(&slab));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
Loading…
Reference in New Issue
Block a user