Pass dealloc_ctx down free() fast path.

This gets rid of the redundent rtree lookup down fast path.
This commit is contained in:
Qi Wang 2017-04-07 14:12:30 -07:00 committed by Qi Wang
parent 8209df24ea
commit bfa530b75b
8 changed files with 62 additions and 34 deletions

View File

@ -15,7 +15,8 @@ arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
size_t arena_salloc(tsdn_t *tsdn, const void *ptr);
size_t arena_vsalloc(tsdn_t *tsdn, const void *ptr);
void arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr);
void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
dalloc_ctx_t *dalloc_ctx, bool slow_path);
void arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size);
void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
@ -194,7 +195,8 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) {
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
dalloc_ctx_t *dalloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
@ -203,13 +205,21 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) {
return;
}
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
szind_t szind;
bool slab;
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
true, &szind, &slab);
rtree_ctx_t *rtree_ctx;
if (dalloc_ctx != NULL) {
szind = dalloc_ctx->szind;
slab = dalloc_ctx->slab;
assert(szind != NSIZES);
} else {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
}
if (config_debug) {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));

View File

@ -256,4 +256,10 @@ struct arena_tdata_s {
ticker_t decay_ticker;
};
/* Used to pass rtree lookup context down the deallocation path. */
struct dalloc_ctx_s {
szind_t szind;
bool slab;
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */

View File

@ -19,6 +19,7 @@ typedef struct arena_decay_s arena_decay_t;
typedef struct arena_bin_s arena_bin_t;
typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t;
typedef struct dalloc_ctx_s dalloc_ctx_t;
typedef enum {
percpu_arena_disabled = 0,

View File

@ -1095,8 +1095,8 @@ void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t ivsalloc(tsdn_t *tsdn, const void *ptr);
void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_internal,
bool slow_path);
void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
dalloc_ctx_t *dalloc_ctx, bool is_internal, bool slow_path);
void idalloc(tsd_t *tsd, void *ptr);
void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
@ -1188,8 +1188,8 @@ ivsalloc(tsdn_t *tsdn, const void *ptr) {
}
JEMALLOC_ALWAYS_INLINE void
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_internal,
bool slow_path) {
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, dalloc_ctx_t *dalloc_ctx,
bool is_internal, bool slow_path) {
assert(ptr != NULL);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena_ind_get(iaalloc(tsdn, ptr)) <
@ -1201,12 +1201,12 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_internal,
if (!is_internal && *tsd_reentrancy_levelp_get(tsdn_tsd(tsdn)) != 0) {
tcache = NULL;
}
arena_dalloc(tsdn, ptr, tcache, slow_path);
arena_dalloc(tsdn, ptr, tcache, dalloc_ctx, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr) {
idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), false, true);
idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
}
JEMALLOC_ALWAYS_INLINE void

View File

@ -282,12 +282,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) {
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
@ -329,7 +329,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@ -337,7 +337,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@ -418,7 +418,7 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) {
(unsigned long long)ckh->nrelocs);
#endif
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
if (config_debug) {
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
}

View File

@ -313,7 +313,7 @@ a0ialloc(size_t size, bool zero, bool is_internal) {
static void
a0idalloc(void *ptr, bool is_internal) {
idalloctm(TSDN_NULL, ptr, false, is_internal, true);
idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
}
void *
@ -2045,21 +2045,29 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
dalloc_ctx_t dalloc_ctx;
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &dalloc_ctx.szind, &dalloc_ctx.slab);
assert(dalloc_ctx.szind != NSIZES);
size_t usize;
if (config_prof && opt_prof) {
usize = isalloc(tsd_tsdn(tsd), ptr);
usize = index2size(dalloc_ctx.szind);
prof_free(tsd, ptr, usize);
} else if (config_stats) {
usize = isalloc(tsd_tsdn(tsd), ptr);
usize = index2size(dalloc_ctx.szind);
}
if (config_stats) {
*tsd_thread_deallocatedp_get(tsd) += usize;
}
if (likely(!slow_path)) {
idalloctm(tsd_tsdn(tsd), ptr, tcache, false, false);
idalloctm(tsd_tsdn(tsd), ptr, tcache, &dalloc_ctx, false,
false);
} else {
idalloctm(tsd_tsdn(tsd), ptr, tcache, false, true);
idalloctm(tsd_tsdn(tsd), ptr, tcache, &dalloc_ctx, false,
true);
}
}

View File

@ -590,7 +590,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
prof_leave(tsd, tdata_self);
/* Destroy gctx. */
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
} else {
/*
* Compensate for increment in prof_tctx_destroy() or
@ -704,7 +704,7 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
}
if (destroy_tctx) {
idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
}
}
@ -736,8 +736,8 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */
prof_leave(tsd, tdata);
idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true,
true);
idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
true, true);
return true;
}
new_gctx = true;
@ -761,7 +761,8 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
if (tgctx.v != NULL) {
/* Lost race to insert. */
idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
true);
}
}
prof_leave(tsd, tdata);
@ -833,7 +834,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
if (new_gctx) {
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
}
idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
return NULL;
}
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
@ -1245,7 +1246,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
tctx_tree_remove(&gctx->tctxs,
to_destroy);
idalloctm(tsd_tsdn(tsd), to_destroy,
NULL, true, true);
NULL, NULL, true, true);
} else {
next = NULL;
}
@ -1915,7 +1916,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp)) {
idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
return NULL;
}
@ -1971,10 +1972,11 @@ prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
if (tdata->thread_name != NULL) {
idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
true);
}
ckh_delete(tsd, &tdata->bt2tctx);
idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
}
static void
@ -2171,7 +2173,8 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
}
if (tdata->thread_name != NULL) {
idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
true);
tdata->thread_name = NULL;
}
if (strlen(s) > 0) {

View File

@ -499,10 +499,10 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
void *avail_array =
(void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
(uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
idalloctm(tsd_tsdn(tsd), avail_array, NULL, true, true);
idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
} else {
/* Release both the tcache struct and avail array. */
idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
}
}