Emap: Remove direct access to emap internals.
In the process, we do a few local cleanups and optimizations. In particular, the size safety check on tcache flush no longer does a redundant load.
This commit is contained in:
parent
06e42090f7
commit
ac50c1e44b
@ -188,15 +188,11 @@ arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
arena_salloc(tsdn_t *tsdn, const void *ptr) {
|
arena_salloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
alloc_ctx_t alloc_ctx;
|
||||||
|
emap_alloc_info_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
|
||||||
|
assert(alloc_ctx.szind != SC_NSIZES);
|
||||||
|
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
return sz_index2size(alloc_ctx.szind);
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
|
|
||||||
szind_t szind = rtree_szind_read(tsdn, &emap_global.rtree, rtree_ctx,
|
|
||||||
(uintptr_t)ptr, true);
|
|
||||||
assert(szind != SC_NSIZES);
|
|
||||||
|
|
||||||
return sz_index2size(szind);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
@ -210,26 +206,24 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
* failure.
|
* failure.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
emap_full_alloc_ctx_t full_alloc_ctx;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
bool missing = emap_full_alloc_info_try_lookup(tsdn, &emap_global, ptr,
|
||||||
|
&full_alloc_ctx);
|
||||||
edata_t *edata;
|
if (missing) {
|
||||||
szind_t szind;
|
|
||||||
if (rtree_edata_szind_read(tsdn, &emap_global.rtree, rtree_ctx,
|
|
||||||
(uintptr_t)ptr, false, &edata, &szind)) {
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (edata == NULL) {
|
if (full_alloc_ctx.edata == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
assert(edata_state_get(edata) == extent_state_active);
|
assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active);
|
||||||
/* Only slab members should be looked up via interior pointers. */
|
/* Only slab members should be looked up via interior pointers. */
|
||||||
assert(edata_addr_get(edata) == ptr || edata_slab_get(edata));
|
assert(edata_addr_get(full_alloc_ctx.edata) == ptr
|
||||||
|
|| edata_slab_get(full_alloc_ctx.edata));
|
||||||
|
|
||||||
assert(szind != SC_NSIZES);
|
assert(full_alloc_ctx.szind != SC_NSIZES);
|
||||||
|
|
||||||
return sz_index2size(szind);
|
return sz_index2size(full_alloc_ctx.szind);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
@ -246,27 +240,21 @@ static inline void
|
|||||||
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
alloc_ctx_t alloc_ctx;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
emap_alloc_info_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
|
||||||
|
|
||||||
szind_t szind;
|
|
||||||
bool slab;
|
|
||||||
rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx,
|
|
||||||
(uintptr_t)ptr, true, &szind, &slab);
|
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
edata_t *edata = rtree_edata_read(tsdn, &emap_global.rtree,
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
rtree_ctx, (uintptr_t)ptr, true);
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
||||||
assert(szind == edata_szind_get(edata));
|
assert(alloc_ctx.szind < SC_NSIZES);
|
||||||
assert(szind < SC_NSIZES);
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
||||||
assert(slab == edata_slab_get(edata));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(slab)) {
|
if (likely(alloc_ctx.slab)) {
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
arena_dalloc_small(tsdn, ptr);
|
arena_dalloc_small(tsdn, ptr);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_large_no_tcache(tsdn, ptr, szind);
|
arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,7 +276,7 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||||
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
|
||||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
@ -297,34 +285,28 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
szind_t szind;
|
alloc_ctx_t alloc_ctx;
|
||||||
bool slab;
|
if (caller_alloc_ctx != NULL) {
|
||||||
rtree_ctx_t *rtree_ctx;
|
alloc_ctx = *caller_alloc_ctx;
|
||||||
if (alloc_ctx != NULL) {
|
|
||||||
szind = alloc_ctx->szind;
|
|
||||||
slab = alloc_ctx->slab;
|
|
||||||
assert(szind != SC_NSIZES);
|
|
||||||
} else {
|
} else {
|
||||||
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
util_assume(!tsdn_null(tsdn));
|
||||||
rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx,
|
emap_alloc_info_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
|
||||||
(uintptr_t)ptr, true, &szind, &slab);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
edata_t *edata = rtree_edata_read(tsdn, &emap_global.rtree,
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
||||||
rtree_ctx, (uintptr_t)ptr, true);
|
assert(alloc_ctx.szind < SC_NSIZES);
|
||||||
assert(szind == edata_szind_get(edata));
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
||||||
assert(szind < SC_NSIZES);
|
|
||||||
assert(slab == edata_slab_get(edata));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(slab)) {
|
if (likely(alloc_ctx.slab)) {
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
||||||
slow_path);
|
alloc_ctx.szind, slow_path);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
|
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
|
||||||
|
slow_path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -333,47 +315,41 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
|||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(size <= SC_LARGE_MAXCLASS);
|
assert(size <= SC_LARGE_MAXCLASS);
|
||||||
|
|
||||||
szind_t szind;
|
alloc_ctx_t alloc_ctx;
|
||||||
bool slab;
|
|
||||||
if (!config_prof || !opt_prof) {
|
if (!config_prof || !opt_prof) {
|
||||||
/*
|
/*
|
||||||
* There is no risk of being confused by a promoted sampled
|
* There is no risk of being confused by a promoted sampled
|
||||||
* object, so base szind and slab on the given size.
|
* object, so base szind and slab on the given size.
|
||||||
*/
|
*/
|
||||||
szind = sz_size2index(size);
|
alloc_ctx.szind = sz_size2index(size);
|
||||||
slab = (szind < SC_NBINS);
|
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((config_prof && opt_prof) || config_debug) {
|
if ((config_prof && opt_prof) || config_debug) {
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
emap_alloc_info_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
|
||||||
&rtree_ctx_fallback);
|
|
||||||
|
|
||||||
rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx,
|
assert(alloc_ctx.szind == sz_size2index(size));
|
||||||
(uintptr_t)ptr, true, &szind, &slab);
|
assert((config_prof && opt_prof)
|
||||||
|
|| alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
|
||||||
assert(szind == sz_size2index(size));
|
|
||||||
assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
|
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
edata_t *edata = rtree_edata_read(tsdn,
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
&emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true);
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
||||||
assert(szind == edata_szind_get(edata));
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
||||||
assert(slab == edata_slab_get(edata));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(slab)) {
|
if (likely(alloc_ctx.slab)) {
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
arena_dalloc_small(tsdn, ptr);
|
arena_dalloc_small(tsdn, ptr);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_large_no_tcache(tsdn, ptr, szind);
|
arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
||||||
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
|
||||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(size <= SC_LARGE_MAXCLASS);
|
assert(size <= SC_LARGE_MAXCLASS);
|
||||||
@ -383,48 +359,38 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
szind_t szind;
|
alloc_ctx_t alloc_ctx;
|
||||||
bool slab;
|
|
||||||
alloc_ctx_t local_ctx;
|
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
if (alloc_ctx == NULL) {
|
if (caller_alloc_ctx == NULL) {
|
||||||
/* Uncommon case and should be a static check. */
|
/* Uncommon case and should be a static check. */
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
emap_alloc_info_lookup(tsdn, &emap_global, ptr,
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
&alloc_ctx);
|
||||||
&rtree_ctx_fallback);
|
assert(alloc_ctx.szind == sz_size2index(size));
|
||||||
rtree_szind_slab_read(tsdn, &emap_global.rtree,
|
} else {
|
||||||
rtree_ctx, (uintptr_t)ptr, true, &local_ctx.szind,
|
alloc_ctx = *caller_alloc_ctx;
|
||||||
&local_ctx.slab);
|
|
||||||
assert(local_ctx.szind == sz_size2index(size));
|
|
||||||
alloc_ctx = &local_ctx;
|
|
||||||
}
|
}
|
||||||
slab = alloc_ctx->slab;
|
|
||||||
szind = alloc_ctx->szind;
|
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* There is no risk of being confused by a promoted sampled
|
* There is no risk of being confused by a promoted sampled
|
||||||
* object, so base szind and slab on the given size.
|
* object, so base szind and slab on the given size.
|
||||||
*/
|
*/
|
||||||
szind = sz_size2index(size);
|
alloc_ctx.szind = sz_size2index(size);
|
||||||
slab = (szind < SC_NBINS);
|
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx,
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
||||||
(uintptr_t)ptr, true, &szind, &slab);
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
||||||
edata_t *edata = rtree_edata_read(tsdn,
|
|
||||||
&emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true);
|
|
||||||
assert(szind == edata_szind_get(edata));
|
|
||||||
assert(slab == edata_slab_get(edata));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(slab)) {
|
if (likely(alloc_ctx.slab)) {
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
||||||
slow_path);
|
alloc_ctx.szind, slow_path);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
|
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
|
||||||
|
slow_path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,13 @@ struct alloc_ctx_t {
|
|||||||
bool slab;
|
bool slab;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t;
|
||||||
|
struct emap_full_alloc_ctx_s {
|
||||||
|
szind_t szind;
|
||||||
|
bool slab;
|
||||||
|
edata_t *edata;
|
||||||
|
};
|
||||||
|
|
||||||
extern emap_t emap_global;
|
extern emap_t emap_global;
|
||||||
|
|
||||||
bool emap_init(emap_t *emap);
|
bool emap_init(emap_t *emap);
|
||||||
@ -136,7 +143,7 @@ emap_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) {
|
|||||||
|
|
||||||
/* Fills in alloc_ctx with the info in the map. */
|
/* Fills in alloc_ctx with the info in the map. */
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
emap_alloc_info_lookup(tsdn_t *tsdn, emap_t *emap, void *ptr,
|
emap_alloc_info_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
|
||||||
alloc_ctx_t *alloc_ctx) {
|
alloc_ctx_t *alloc_ctx) {
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
@ -145,6 +152,34 @@ emap_alloc_info_lookup(tsdn_t *tsdn, emap_t *emap, void *ptr,
|
|||||||
true, &alloc_ctx->szind, &alloc_ctx->slab);
|
true, &alloc_ctx->szind, &alloc_ctx->slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* The pointer must be mapped. */
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
emap_full_alloc_info_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
|
||||||
|
emap_full_alloc_ctx_t *full_alloc_ctx) {
|
||||||
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
|
rtree_edata_szind_slab_read(tsdn, &emap->rtree, rtree_ctx,
|
||||||
|
(uintptr_t)ptr, true, &full_alloc_ctx->edata,
|
||||||
|
&full_alloc_ctx->szind, &full_alloc_ctx->slab);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The pointer is allowed to not be mapped.
|
||||||
|
*
|
||||||
|
* Returns true when the pointer is not present.
|
||||||
|
*/
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
emap_full_alloc_info_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
|
||||||
|
emap_full_alloc_ctx_t *full_alloc_ctx) {
|
||||||
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
|
return rtree_edata_szind_slab_read(tsdn, &emap->rtree, rtree_ctx,
|
||||||
|
(uintptr_t)ptr, false, &full_alloc_ctx->edata,
|
||||||
|
&full_alloc_ctx->szind, &full_alloc_ctx->slab);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fills in alloc_ctx, but only if it can be done easily (i.e. with a hit in the
|
* Fills in alloc_ctx, but only if it can be done easily (i.e. with a hit in the
|
||||||
* L1 rtree cache.
|
* L1 rtree cache.
|
||||||
@ -152,7 +187,7 @@ emap_alloc_info_lookup(tsdn_t *tsdn, emap_t *emap, void *ptr,
|
|||||||
* Returns whether or not alloc_ctx was filled in.
|
* Returns whether or not alloc_ctx was filled in.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
emap_alloc_info_try_lookup_fast(tsd_t *tsd, emap_t *emap, void *ptr,
|
emap_alloc_info_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr,
|
||||||
alloc_ctx_t *alloc_ctx) {
|
alloc_ctx_t *alloc_ctx) {
|
||||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
|
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
|
||||||
bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &emap->rtree,
|
bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &emap->rtree,
|
||||||
|
@ -440,15 +440,24 @@ rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
rtree_edata_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
rtree_edata_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree,
|
||||||
uintptr_t key, bool dependent, edata_t **r_edata, szind_t *r_szind) {
|
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, edata_t **r_edata,
|
||||||
|
szind_t *r_szind, bool *r_slab) {
|
||||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||||
dependent);
|
dependent);
|
||||||
if (!dependent && elm == NULL) {
|
if (!dependent && elm == NULL) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
|
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||||
|
*r_edata = rtree_leaf_elm_bits_edata_get(bits);
|
||||||
|
*r_szind = rtree_leaf_elm_bits_szind_get(bits);
|
||||||
|
*r_slab = rtree_leaf_elm_bits_slab_get(bits);
|
||||||
|
#else
|
||||||
*r_edata = rtree_leaf_elm_edata_read(tsdn, rtree, elm, dependent);
|
*r_edata = rtree_leaf_elm_edata_read(tsdn, rtree, elm, dependent);
|
||||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||||
|
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
|
||||||
|
#endif
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,6 +62,13 @@ get_errno(void) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
util_assume(bool b) {
|
||||||
|
if (!b) {
|
||||||
|
unreachable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#undef UTIL_INLINE
|
#undef UTIL_INLINE
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_UTIL_H */
|
#endif /* JEMALLOC_INTERNAL_UTIL_H */
|
||||||
|
@ -1111,10 +1111,8 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
|||||||
|
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||||
alloc_ctx_t alloc_ctx;
|
alloc_ctx_t alloc_ctx;
|
||||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
|
emap_alloc_info_lookup(tsd_tsdn(tsd), &emap_global, ptr,
|
||||||
rtree_szind_slab_read(tsd_tsdn(tsd), &emap_global.rtree,
|
&alloc_ctx);
|
||||||
rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind,
|
|
||||||
&alloc_ctx.slab);
|
|
||||||
assert(alloc_ctx.szind != SC_NSIZES);
|
assert(alloc_ctx.szind != SC_NSIZES);
|
||||||
|
|
||||||
if (config_stats || (config_prof && opt_prof)) {
|
if (config_stats || (config_prof && opt_prof)) {
|
||||||
|
24
src/tcache.c
24
src/tcache.c
@ -114,10 +114,10 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
|||||||
|
|
||||||
/* Enabled with --enable-extra-size-check. */
|
/* Enabled with --enable-extra-size-check. */
|
||||||
static void
|
static void
|
||||||
tbin_edatas_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
|
tbin_edatas_lookup_size_check(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
|
||||||
size_t nflush, edata_t **edatas){
|
size_t nflush, edata_t **edatas) {
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
/* Avoids null-checking tsdn in the loop below. */
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
util_assume(tsd != NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Verify that the items in the tcache all have the correct size; this
|
* Verify that the items in the tcache all have the correct size; this
|
||||||
@ -125,16 +125,16 @@ tbin_edatas_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
|
|||||||
* instead of corrupting metadata. Since this can be turned on for opt
|
* instead of corrupting metadata. Since this can be turned on for opt
|
||||||
* builds, avoid the branch in the loop.
|
* builds, avoid the branch in the loop.
|
||||||
*/
|
*/
|
||||||
szind_t szind;
|
size_t szind_sum = binind * nflush;
|
||||||
size_t sz_sum = binind * nflush;
|
|
||||||
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
|
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
|
||||||
for (unsigned i = 0 ; i < nflush; i++) {
|
for (unsigned i = 0 ; i < nflush; i++) {
|
||||||
rtree_edata_szind_read(tsdn, &emap_global.rtree,
|
emap_full_alloc_ctx_t full_alloc_ctx;
|
||||||
rtree_ctx, (uintptr_t)*(bottom_item - i), true,
|
emap_full_alloc_info_lookup(tsd_tsdn(tsd), &emap_global,
|
||||||
&edatas[i], &szind);
|
*(bottom_item - i), &full_alloc_ctx);
|
||||||
sz_sum -= szind;
|
edatas[i] = full_alloc_ctx.edata;
|
||||||
|
szind_sum -= full_alloc_ctx.szind;
|
||||||
}
|
}
|
||||||
if (sz_sum != 0) {
|
if (szind_sum != 0) {
|
||||||
safety_check_fail_sized_dealloc(false);
|
safety_check_fail_sized_dealloc(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -156,7 +156,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
|
|||||||
tsdn_t *tsdn = tsd_tsdn(tsd);
|
tsdn_t *tsdn = tsd_tsdn(tsd);
|
||||||
/* Look up edata once per item. */
|
/* Look up edata once per item. */
|
||||||
if (config_opt_safety_checks) {
|
if (config_opt_safety_checks) {
|
||||||
tbin_edatas_lookup_size_check(tsdn, tbin, binind, nflush,
|
tbin_edatas_lookup_size_check(tsd, tbin, binind, nflush,
|
||||||
item_edata);
|
item_edata);
|
||||||
} else {
|
} else {
|
||||||
for (unsigned i = 0 ; i < nflush; i++) {
|
for (unsigned i = 0 ; i < nflush; i++) {
|
||||||
|
@ -60,28 +60,25 @@ get_large_size(size_t ind) {
|
|||||||
/* Like ivsalloc(), but safe to call on discarded allocations. */
|
/* Like ivsalloc(), but safe to call on discarded allocations. */
|
||||||
static size_t
|
static size_t
|
||||||
vsalloc(tsdn_t *tsdn, const void *ptr) {
|
vsalloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
emap_full_alloc_ctx_t full_alloc_ctx;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
bool missing = emap_full_alloc_info_try_lookup(tsdn, &emap_global,
|
||||||
|
ptr, &full_alloc_ctx);
|
||||||
edata_t *edata;
|
if (missing) {
|
||||||
szind_t szind;
|
|
||||||
if (rtree_edata_szind_read(tsdn, &emap_global.rtree, rtree_ctx,
|
|
||||||
(uintptr_t)ptr, false, &edata, &szind)) {
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (edata == NULL) {
|
if (full_alloc_ctx.edata == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (edata_state_get(edata) != extent_state_active) {
|
if (edata_state_get(full_alloc_ctx.edata) != extent_state_active) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (szind == SC_NSIZES) {
|
if (full_alloc_ctx.szind == SC_NSIZES) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return sz_index2size(szind);
|
return sz_index2size(full_alloc_ctx.szind);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned
|
static unsigned
|
||||||
|
Loading…
Reference in New Issue
Block a user