Pass dalloc_ctx down the sdalloc path.

This avoids redundant rtree lookups.
This commit is contained in:
Qi Wang
2017-04-11 14:56:43 -07:00
committed by Qi Wang
parent e709fae1d7
commit f35213bae4
5 changed files with 41 additions and 24 deletions

View File

@@ -19,7 +19,7 @@ void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
dalloc_ctx_t *dalloc_ctx, bool slow_path);
void arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size);
void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
dalloc_ctx_t *dalloc_ctx, bool slow_path);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
@@ -293,7 +293,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path) {
dalloc_ctx_t *dalloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
assert(size <= LARGE_MAXCLASS);
@@ -305,7 +305,22 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
szind_t szind;
bool slab;
if (!config_prof || !opt_prof) {
UNUSED dalloc_ctx_t local_ctx;
if (config_prof && opt_prof) {
if (dalloc_ctx == NULL) {
/* Uncommon case and should be a static check. */
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &local_ctx.szind,
&local_ctx.slab);
assert(local_ctx.szind == size2index(size));
dalloc_ctx = &local_ctx;
}
slab = dalloc_ctx->slab;
szind = dalloc_ctx->szind;
} else {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
@@ -314,21 +329,14 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
slab = (szind < NBINS);
}
if ((config_prof && opt_prof) || config_debug) {
if (config_debug) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
assert(szind == size2index(size));
assert((config_prof && opt_prof) || slab == (szind < NBINS));
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(slab == extent_slab_get(extent));
}
extent_t *extent = rtree_extent_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(slab == extent_slab_get(extent));
}
if (likely(slab)) {

View File

@@ -18,7 +18,7 @@ void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
dalloc_ctx_t *dalloc_ctx, bool is_internal, bool slow_path);
void idalloc(tsd_t *tsd, void *ptr);
void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
dalloc_ctx_t *dalloc_ctx, bool slow_path);
void *iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
@@ -129,10 +129,10 @@ idalloc(tsd_t *tsd, void *ptr) {
}
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t *tsdn, void *ptr, size_t size,
tcache_t *tcache, bool slow_path) {
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
dalloc_ctx_t *dalloc_ctx, bool slow_path) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
arena_sdalloc(tsdn, ptr, size, tcache, dalloc_ctx, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
@@ -168,7 +168,7 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
isdalloct(tsdn, ptr, oldsize, tcache, true);
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
return p;
}