diff --git a/Makefile.in b/Makefile.in index eda9c7a9..984bd724 100644 --- a/Makefile.in +++ b/Makefile.in @@ -110,6 +110,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/edata.c \ $(srcroot)src/edata_cache.c \ $(srcroot)src/ehooks.c \ + $(srcroot)src/emap.c \ $(srcroot)src/eset.c \ $(srcroot)src/extent.c \ $(srcroot)src/extent_dss.c \ diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index 844e045d..b39578c9 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -188,7 +188,7 @@ arena_salloc(tsdn_t *tsdn, const void *ptr) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx, + szind_t szind = rtree_szind_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind != SC_NSIZES); @@ -211,7 +211,7 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) { edata_t *edata; szind_t szind; - if (rtree_edata_szind_read(tsdn, &extents_rtree, rtree_ctx, + if (rtree_edata_szind_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, false, &edata, &szind)) { return 0; } @@ -247,11 +247,11 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { szind_t szind; bool slab; - rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, - true, &szind, &slab); + rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx, + (uintptr_t)ptr, true, &szind, &slab); if (config_debug) { - edata_t *edata = rtree_edata_read(tsdn, &extents_rtree, + edata_t *edata = rtree_edata_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == edata_szind_get(edata)); assert(szind < SC_NSIZES); @@ -302,13 +302,13 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, assert(szind != SC_NSIZES); } else { rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); - rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); } if (config_debug) { rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); - edata_t *edata = rtree_edata_read(tsdn, &extents_rtree, + edata_t *edata = rtree_edata_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == edata_szind_get(edata)); assert(szind < SC_NSIZES); @@ -345,7 +345,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); assert(szind == sz_size2index(size)); @@ -353,7 +353,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { if (config_debug) { edata_t *edata = rtree_edata_read(tsdn, - &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); + &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == edata_szind_get(edata)); assert(slab == edata_slab_get(edata)); } @@ -388,8 +388,8 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &local_ctx.szind, + rtree_szind_slab_read(tsdn, &emap_global.rtree, + rtree_ctx, (uintptr_t)ptr, true, &local_ctx.szind, &local_ctx.slab); assert(local_ctx.szind == sz_size2index(size)); alloc_ctx = &local_ctx; @@ -407,10 +407,10 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, if (config_debug) { rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); - rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); edata_t *edata = rtree_edata_read(tsdn, - &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); + &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == edata_szind_get(edata)); assert(slab == edata_slab_get(edata)); } diff --git a/include/jemalloc/internal/emap.h b/include/jemalloc/internal/emap.h new file mode 100644 index 00000000..9a37b943 --- /dev/null +++ b/include/jemalloc/internal/emap.h @@ -0,0 +1,33 @@ +#ifndef JEMALLOC_INTERNAL_EMAP_H +#define JEMALLOC_INTERNAL_EMAP_H + +#include "jemalloc/internal/mutex_pool.h" +#include "jemalloc/internal/rtree.h" + +typedef struct emap_s emap_t; +struct emap_s { + rtree_t rtree; + /* Keyed by the address of the edata_t being protected. */ + mutex_pool_t mtx_pool; +}; + +extern emap_t emap_global; + +bool emap_init(emap_t *emap); + +void emap_lock_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata); +void emap_unlock_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata); + +void emap_lock_edata2(tsdn_t *tsdn, emap_t *emap, edata_t *edata1, + edata_t *edata2); +void emap_unlock_edata2(tsdn_t *tsdn, emap_t *emap, edata_t *edata1, + edata_t *edata2); + +edata_t *emap_lock_edata_from_addr(tsdn_t *tsdn, emap_t *emap, + rtree_ctx_t *rtree_ctx, void *addr, bool inactive_only); + +bool emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap, + rtree_ctx_t *rtree_ctx, const edata_t *edata, bool dependent, + bool init_missing, rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b); + +#endif /* JEMALLOC_INTERNAL_EMAP_H */ diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h index b89708a4..d0ba70b8 100644 --- a/include/jemalloc/internal/extent.h +++ b/include/jemalloc/internal/extent.h @@ -19,8 +19,6 @@ #define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 extern size_t opt_lg_extent_max_active_fit; -extern rtree_t extents_rtree; - edata_t *ecache_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero); diff --git a/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/include/jemalloc/internal/jemalloc_internal_inlines_b.h index ebfb331b..00fb6042 100644 --- a/include/jemalloc/internal/jemalloc_internal_inlines_b.h +++ b/include/jemalloc/internal/jemalloc_internal_inlines_b.h @@ -1,6 +1,7 @@ #ifndef JEMALLOC_INTERNAL_INLINES_B_H #define JEMALLOC_INTERNAL_INLINES_B_H +#include "jemalloc/internal/emap.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/rtree.h" @@ -81,7 +82,7 @@ iealloc(tsdn_t *tsdn, const void *ptr) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - return rtree_edata_read(tsdn, &extents_rtree, rtree_ctx, + return rtree_edata_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true); } diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h index 083bdcc9..b5fa1c02 100644 --- a/include/jemalloc/internal/witness.h +++ b/include/jemalloc/internal/witness.h @@ -45,7 +45,7 @@ #define WITNESS_RANK_EXTENTS 15U #define WITNESS_RANK_EDATA_CACHE 16U -#define WITNESS_RANK_EXTENT_POOL 17U +#define WITNESS_RANK_EMAP 17U #define WITNESS_RANK_RTREE 18U #define WITNESS_RANK_BASE 19U #define WITNESS_RANK_ARENA_LARGE 20U diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj index d8b48986..d98bb858 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -50,6 +50,7 @@ + diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters index 9b0445f6..fd3e11c0 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -37,6 +37,9 @@ Source Files + + Source Files + Source Files diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj index b0d32d93..b59d411f 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj @@ -50,6 +50,7 @@ + diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters index 9b0445f6..fd3e11c0 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters @@ -37,6 +37,9 @@ Source Files + + Source Files + Source Files diff --git a/src/arena.c b/src/arena.c index 22348949..3206a9a6 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1112,8 +1112,9 @@ arena_reset(tsd_t *tsd, arena_t *arena) { malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + rtree_szind_slab_read(tsd_tsdn(tsd), &emap_global.rtree, + rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, + &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); if (config_stats || (config_prof && opt_prof)) { @@ -1601,13 +1602,13 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - edata_t *edata = rtree_edata_read(tsdn, &extents_rtree, rtree_ctx, + edata_t *edata = rtree_edata_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true); szind_t szind = sz_size2index(usize); edata_szind_set(edata, szind); - rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, - szind, false); + rtree_szind_slab_update(tsdn, &emap_global.rtree, rtree_ctx, + (uintptr_t)ptr, szind, false); prof_idump_rollback(tsdn, usize); @@ -1622,8 +1623,8 @@ arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) { edata_szind_set(edata, SC_NBINS); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, - SC_NBINS, false); + rtree_szind_slab_update(tsdn, &emap_global.rtree, rtree_ctx, + (uintptr_t)ptr, SC_NBINS, false); assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); diff --git a/src/emap.c b/src/emap.c new file mode 100644 index 00000000..ea3cce0f --- /dev/null +++ b/src/emap.c @@ -0,0 +1,127 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/emap.h" + +emap_t emap_global; + +enum emap_lock_result_e { + emap_lock_result_success, + emap_lock_result_failure, + emap_lock_result_no_extent +}; +typedef enum emap_lock_result_e emap_lock_result_t; + +bool +emap_init(emap_t *emap) { + bool err; + err = rtree_new(&emap->rtree, true); + if (err) { + return true; + } + err = mutex_pool_init(&emap->mtx_pool, "emap_mutex_pool", + WITNESS_RANK_EMAP); + if (err) { + return true; + } + return false; +} + +void +emap_lock_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { + assert(edata != NULL); + mutex_pool_lock(tsdn, &emap->mtx_pool, (uintptr_t)edata); +} + +void +emap_unlock_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { + assert(edata != NULL); + mutex_pool_unlock(tsdn, &emap->mtx_pool, (uintptr_t)edata); +} + +void +emap_lock_edata2(tsdn_t *tsdn, emap_t *emap, edata_t *edata1, + edata_t *edata2) { + assert(edata1 != NULL && edata2 != NULL); + mutex_pool_lock2(tsdn, &emap->mtx_pool, (uintptr_t)edata1, + (uintptr_t)edata2); +} + +void +emap_unlock_edata2(tsdn_t *tsdn, emap_t *emap, edata_t *edata1, + edata_t *edata2) { + assert(edata1 != NULL && edata2 != NULL); + mutex_pool_unlock2(tsdn, &emap->mtx_pool, (uintptr_t)edata1, + (uintptr_t)edata2); +} + +static inline emap_lock_result_t +emap_try_lock_rtree_leaf_elm(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm, + edata_t **result, bool inactive_only) { + edata_t *edata1 = rtree_leaf_elm_edata_read(tsdn, &emap->rtree, + elm, true); + + /* Slab implies active extents and should be skipped. */ + if (edata1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn, + &emap->rtree, elm, true))) { + return emap_lock_result_no_extent; + } + + /* + * It's possible that the extent changed out from under us, and with it + * the leaf->edata mapping. We have to recheck while holding the lock. + */ + emap_lock_edata(tsdn, emap, edata1); + edata_t *edata2 = rtree_leaf_elm_edata_read(tsdn, &emap->rtree, elm, + true); + + if (edata1 == edata2) { + *result = edata1; + return emap_lock_result_success; + } else { + emap_unlock_edata(tsdn, emap, edata1); + return emap_lock_result_failure; + } +} + +/* + * Returns a pool-locked edata_t * if there's one associated with the given + * address, and NULL otherwise. + */ +edata_t * +emap_lock_edata_from_addr(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx, + void *addr, bool inactive_only) { + edata_t *ret = NULL; + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree, + rtree_ctx, (uintptr_t)addr, false, false); + if (elm == NULL) { + return NULL; + } + emap_lock_result_t lock_result; + do { + lock_result = emap_try_lock_rtree_leaf_elm(tsdn, emap, elm, + &ret, inactive_only); + } while (lock_result == emap_lock_result_failure); + return ret; +} + +bool +emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx, + const edata_t *edata, bool dependent, bool init_missing, + rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { + *r_elm_a = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_base_get(edata), dependent, init_missing); + if (!dependent && *r_elm_a == NULL) { + return true; + } + assert(*r_elm_a != NULL); + + *r_elm_b = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_last_get(edata), dependent, init_missing); + if (!dependent && *r_elm_b == NULL) { + return true; + } + assert(*r_elm_b != NULL); + + return false; +} diff --git a/src/extent.c b/src/extent.c index 07c0bd21..bbebf9ed 100644 --- a/src/extent.c +++ b/src/extent.c @@ -2,20 +2,15 @@ #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" +#include "jemalloc/internal/emap.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/ph.h" -#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mutex_pool.h" /******************************************************************************/ /* Data. */ -rtree_t extents_rtree; -/* Keyed by the address of the edata_t being protected. */ -mutex_pool_t extent_mutex_pool; - size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, @@ -58,88 +53,6 @@ static edata_t *extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, /******************************************************************************/ -typedef enum { - lock_result_success, - lock_result_failure, - lock_result_no_extent -} lock_result_t; - -static inline void -extent_lock_edata(tsdn_t *tsdn, edata_t *edata) { - assert(edata != NULL); - mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)edata); -} - -static inline void -extent_unlock_edata(tsdn_t *tsdn, edata_t *edata) { - assert(edata != NULL); - mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)edata); -} - -static inline void -extent_lock_edata2(tsdn_t *tsdn, edata_t *edata1, edata_t *edata2) { - assert(edata1 != NULL && edata2 != NULL); - mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)edata1, - (uintptr_t)edata2); -} - -static inline void -extent_unlock_edata2(tsdn_t *tsdn, edata_t *edata1, edata_t *edata2) { - assert(edata1 != NULL && edata2 != NULL); - mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)edata1, - (uintptr_t)edata2); -} - -static lock_result_t -extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, - edata_t **result, bool inactive_only) { - edata_t *edata1 = rtree_leaf_elm_edata_read(tsdn, &extents_rtree, - elm, true); - - /* Slab implies active extents and should be skipped. */ - if (edata1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn, - &extents_rtree, elm, true))) { - return lock_result_no_extent; - } - - /* - * It's possible that the extent changed out from under us, and with it - * the leaf->edata mapping. We have to recheck while holding the lock. - */ - extent_lock_edata(tsdn, edata1); - edata_t *edata2 = rtree_leaf_elm_edata_read(tsdn, &extents_rtree, elm, - true); - - if (edata1 == edata2) { - *result = edata1; - return lock_result_success; - } else { - extent_unlock_edata(tsdn, edata1); - return lock_result_failure; - } -} - -/* - * Returns a pool-locked edata_t * if there's one associated with the given - * address, and NULL otherwise. - */ -static edata_t * -extent_lock_edata_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr, - bool inactive_only) { - edata_t *ret = NULL; - rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, - rtree_ctx, (uintptr_t)addr, false, false); - if (elm == NULL) { - return NULL; - } - lock_result_t lock_result; - do { - lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret, - inactive_only); - } while (lock_result == lock_result_failure); - return ret; -} - static void extent_addr_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t alignment) { @@ -357,34 +270,14 @@ extent_activate_locked(tsdn_t *tsdn, ecache_t *ecache, edata_t *edata) { edata_state_set(edata, extent_state_active); } -static bool -extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, - const edata_t *edata, bool dependent, bool init_missing, - rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { - *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)edata_base_get(edata), dependent, init_missing); - if (!dependent && *r_elm_a == NULL) { - return true; - } - assert(*r_elm_a != NULL); - - *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)edata_last_get(edata), dependent, init_missing); - if (!dependent && *r_elm_b == NULL) { - return true; - } - assert(*r_elm_b != NULL); - - return false; -} - static void extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, rtree_leaf_elm_t *elm_b, edata_t *edata, szind_t szind, bool slab) { - rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, edata, szind, slab); + rtree_leaf_elm_write(tsdn, &emap_global.rtree, elm_a, edata, szind, + slab); if (elm_b != NULL) { - rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, edata, szind, - slab); + rtree_leaf_elm_write(tsdn, &emap_global.rtree, elm_b, edata, + szind, slab); } } @@ -395,7 +288,7 @@ extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, edata_t *edata, /* Register interior. */ for (size_t i = 1; i < (edata_size_get(edata) >> LG_PAGE) - 1; i++) { - rtree_write(tsdn, &extents_rtree, rtree_ctx, + rtree_write(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)edata_base_get(edata) + (uintptr_t)(i << LG_PAGE), edata, szind, true); } @@ -448,11 +341,11 @@ extent_register_impl(tsdn_t *tsdn, edata_t *edata, bool gdump_add) { * We need to hold the lock to protect against a concurrent coalesce * operation that sees us in a partial state. */ - extent_lock_edata(tsdn, edata); + emap_lock_edata(tsdn, &emap_global, edata); - if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, edata, false, true, - &elm_a, &elm_b)) { - extent_unlock_edata(tsdn, edata); + if (emap_rtree_leaf_elms_lookup(tsdn, &emap_global, rtree_ctx, edata, + false, true, &elm_a, &elm_b)) { + emap_unlock_edata(tsdn, &emap_global, edata); return true; } @@ -463,7 +356,7 @@ extent_register_impl(tsdn_t *tsdn, edata_t *edata, bool gdump_add) { extent_interior_register(tsdn, rtree_ctx, edata, szind); } - extent_unlock_edata(tsdn, edata); + emap_unlock_edata(tsdn, &emap_global, edata); if (config_prof && gdump_add) { extent_gdump_add(tsdn, edata); @@ -503,7 +396,7 @@ extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, assert(edata_slab_get(edata)); for (i = 1; i < (edata_size_get(edata) >> LG_PAGE) - 1; i++) { - rtree_clear(tsdn, &extents_rtree, rtree_ctx, + rtree_clear(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)edata_base_get(edata) + (uintptr_t)(i << LG_PAGE)); } @@ -517,10 +410,10 @@ extent_deregister_impl(tsdn_t *tsdn, edata_t *edata, bool gdump) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_leaf_elm_t *elm_a, *elm_b; - extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, edata, true, false, - &elm_a, &elm_b); + emap_rtree_leaf_elms_lookup(tsdn, &emap_global, rtree_ctx, edata, + true, false, &elm_a, &elm_b); - extent_lock_edata(tsdn, edata); + emap_lock_edata(tsdn, &emap_global, edata); extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false); if (edata_slab_get(edata)) { @@ -528,7 +421,7 @@ extent_deregister_impl(tsdn_t *tsdn, edata_t *edata, bool gdump) { edata_slab_set(edata, false); } - extent_unlock_edata(tsdn, edata); + emap_unlock_edata(tsdn, &emap_global, edata); if (config_prof && gdump) { extent_gdump_sub(tsdn, edata); @@ -577,8 +470,8 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, malloc_mutex_lock(tsdn, &ecache->mtx); edata_t *edata; if (new_addr != NULL) { - edata = extent_lock_edata_from_addr(tsdn, rtree_ctx, new_addr, - false); + edata = emap_lock_edata_from_addr(tsdn, &emap_global, rtree_ctx, + new_addr, false); if (edata != NULL) { /* * We might null-out edata to report an error, but we @@ -592,7 +485,7 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, != ecache->state) { edata = NULL; } - extent_unlock_edata(tsdn, unlock_edata); + emap_unlock_edata(tsdn, &emap_global, unlock_edata); } } else { edata = eset_fit(&ecache->eset, esize, alignment, @@ -692,11 +585,12 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, */ edata_szind_set(*edata, szind); if (szind != SC_NSIZES) { - rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)edata_addr_get(*edata), szind, slab); + rtree_szind_slab_update(tsdn, &emap_global.rtree, + rtree_ctx, (uintptr_t)edata_addr_get(*edata), szind, + slab); if (slab && edata_size_get(*edata) > PAGE) { - rtree_szind_slab_update(tsdn, &extents_rtree, - rtree_ctx, + rtree_szind_slab_update(tsdn, + &emap_global.rtree, rtree_ctx, (uintptr_t)edata_past_get(*edata) - (uintptr_t)PAGE, szind, slab); } @@ -760,8 +654,8 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_deregister_no_gdump_sub(tsdn, to_leak); extents_abandon_vm(tsdn, arena, ehooks, ecache, to_leak, growing_retained); - assert(extent_lock_edata_from_addr(tsdn, rtree_ctx, leak, - false) == NULL); + assert(emap_lock_edata_from_addr(tsdn, &emap_global, + rtree_ctx, leak, false) == NULL); } return NULL; } @@ -1119,8 +1013,8 @@ extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache, again = false; /* Try to coalesce forward. */ - edata_t *next = extent_lock_edata_from_addr(tsdn, rtree_ctx, - edata_past_get(edata), inactive_only); + edata_t *next = emap_lock_edata_from_addr(tsdn, &emap_global, + rtree_ctx, edata_past_get(edata), inactive_only); if (next != NULL) { /* * ecache->mtx only protects against races for @@ -1130,7 +1024,7 @@ extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache, bool can_coalesce = extent_can_coalesce(ecache, edata, next); - extent_unlock_edata(tsdn, next); + emap_unlock_edata(tsdn, &emap_global, next); if (can_coalesce && !extent_coalesce(tsdn, edata_cache, ehooks, ecache, edata, next, true, @@ -1145,12 +1039,12 @@ extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache, } /* Try to coalesce backward. */ - edata_t *prev = extent_lock_edata_from_addr(tsdn, rtree_ctx, - edata_before_get(edata), inactive_only); + edata_t *prev = emap_lock_edata_from_addr(tsdn, &emap_global, + rtree_ctx, edata_before_get(edata), inactive_only); if (prev != NULL) { bool can_coalesce = extent_can_coalesce(ecache, edata, prev); - extent_unlock_edata(tsdn, prev); + emap_unlock_edata(tsdn, &emap_global, prev); if (can_coalesce && !extent_coalesce(tsdn, edata_cache, ehooks, ecache, edata, prev, false, @@ -1210,7 +1104,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache, edata_slab_set(edata, false); } - assert(rtree_edata_read(tsdn, &extents_rtree, rtree_ctx, + assert(rtree_edata_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)edata_base_get(edata), true) == edata); if (!ecache->delay_coalesce) { @@ -1449,19 +1343,19 @@ extent_split_impl(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks, edata_committed_get(edata), edata_dumpable_get(edata), EXTENT_NOT_HEAD); - extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, - true, &lead_elm_a, &lead_elm_b); + emap_rtree_leaf_elms_lookup(tsdn, &emap_global, rtree_ctx, + &lead, false, true, &lead_elm_a, &lead_elm_b); } rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; - extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, - &trail_elm_a, &trail_elm_b); + emap_rtree_leaf_elms_lookup(tsdn, &emap_global, rtree_ctx, trail, false, + true, &trail_elm_a, &trail_elm_b); if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL || trail_elm_b == NULL) { goto label_error_b; } - extent_lock_edata2(tsdn, edata, trail); + emap_lock_edata2(tsdn, &emap_global, edata, trail); bool err = ehooks_split(tsdn, ehooks, edata_base_get(edata), size_a + size_b, size_a, size_b, edata_committed_get(edata)); @@ -1478,11 +1372,11 @@ extent_split_impl(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks, extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, szind_b, slab_b); - extent_unlock_edata2(tsdn, edata, trail); + emap_unlock_edata2(tsdn, &emap_global, edata, trail); return trail; label_error_c: - extent_unlock_edata2(tsdn, edata, trail); + emap_unlock_edata2(tsdn, &emap_global, edata, trail); label_error_b: edata_cache_put(tsdn, edata_cache, trail); label_error_a: @@ -1523,19 +1417,19 @@ extent_merge_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_cache_t *edata_cache, rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; - extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, - &a_elm_b); - extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, - &b_elm_b); + emap_rtree_leaf_elms_lookup(tsdn, &emap_global, rtree_ctx, a, true, + false, &a_elm_a, &a_elm_b); + emap_rtree_leaf_elms_lookup(tsdn, &emap_global, rtree_ctx, b, true, + false, &b_elm_a, &b_elm_b); - extent_lock_edata2(tsdn, a, b); + emap_lock_edata2(tsdn, &emap_global, a, b); if (a_elm_b != NULL) { - rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, + rtree_leaf_elm_write(tsdn, &emap_global.rtree, a_elm_b, NULL, SC_NSIZES, false); } if (b_elm_b != NULL) { - rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, + rtree_leaf_elm_write(tsdn, &emap_global.rtree, b_elm_a, NULL, SC_NSIZES, false); } else { b_elm_b = b_elm_a; @@ -1550,7 +1444,7 @@ extent_merge_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_cache_t *edata_cache, extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES, false); - extent_unlock_edata2(tsdn, a, b); + emap_unlock_edata2(tsdn, &emap_global, a, b); edata_cache_put(tsdn, edata_cache, b); @@ -1567,15 +1461,6 @@ bool extent_boot(void) { assert(sizeof(slab_data_t) >= sizeof(e_prof_info_t)); - if (rtree_new(&extents_rtree, true)) { - return true; - } - - if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", - WITNESS_RANK_EXTENT_POOL)) { - return true; - } - if (have_dss) { extent_dss_boot(); } diff --git a/src/jemalloc.c b/src/jemalloc.c index ddb29e38..8f34989a 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -6,6 +6,7 @@ #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/buf_writer.h" #include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/emap.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/hook.h" @@ -1571,6 +1572,9 @@ malloc_init_hard_a0_locked() { if (base_boot(TSDN_NULL)) { return true; } + if (emap_init(&emap_global)) { + return true; + } if (extent_boot()) { return true; } @@ -2565,7 +2569,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + rtree_szind_slab_read(tsd_tsdn(tsd), &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); @@ -2619,15 +2623,16 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { alloc_ctx_t dbg_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), - &extents_rtree, rtree_ctx, (uintptr_t)ptr, - true, &dbg_ctx.szind, &dbg_ctx.slab); + &emap_global.rtree, rtree_ctx, + (uintptr_t)ptr, true, &dbg_ctx.szind, + &dbg_ctx.slab); assert(dbg_ctx.szind == ctx->szind); assert(dbg_ctx.slab == ctx->slab); } } else if (opt_prof) { ctx = &alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, + rtree_szind_slab_read(tsd_tsdn(tsd), &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true, &ctx->szind, &ctx->slab); /* Small alloc may have !slab (sampled). */ @@ -2699,7 +2704,8 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) { bool slab; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), - &extents_rtree, rtree_ctx, (uintptr_t)ptr, &szind, &slab); + &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, &szind, + &slab); /* Note: profiled objects will have alloc_ctx.slab set */ if (unlikely(!res || !slab)) { @@ -3142,7 +3148,7 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) { alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + rtree_szind_slab_read(tsd_tsdn(tsd), &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); old_usize = sz_index2size(alloc_ctx.szind); @@ -3421,7 +3427,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + rtree_szind_slab_read(tsd_tsdn(tsd), &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); old_usize = sz_index2size(alloc_ctx.szind); diff --git a/src/large.c b/src/large.c index e133e193..2e520981 100644 --- a/src/large.c +++ b/src/large.c @@ -179,7 +179,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); szind_t szind = sz_size2index(usize); edata_szind_set(edata, szind); - rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + rtree_szind_slab_update(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)edata_addr_get(edata), szind, false); if (config_stats && new_mapping) { diff --git a/src/tcache.c b/src/tcache.c index e8a4cc5f..9146f244 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -129,7 +129,7 @@ tbin_edatas_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, size_t sz_sum = binind * nflush; void **bottom_item = cache_bin_bottom_item_get(tbin, binind); for (unsigned i = 0 ; i < nflush; i++) { - rtree_edata_szind_read(tsdn, &extents_rtree, + rtree_edata_szind_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)*(bottom_item - i), true, &edatas[i], &szind); sz_sum -= szind; diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c index 854799da..a1f1d07c 100644 --- a/test/unit/arena_reset.c +++ b/test/unit/arena_reset.c @@ -65,7 +65,7 @@ vsalloc(tsdn_t *tsdn, const void *ptr) { edata_t *edata; szind_t szind; - if (rtree_edata_szind_read(tsdn, &extents_rtree, rtree_ctx, + if (rtree_edata_szind_read(tsdn, &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, false, &edata, &szind)) { return 0; }