Remove extent arg from isalloc() and arena_salloc().

This commit is contained in:
Jason Evans 2017-03-17 01:25:12 -07:00
parent 0ee0e0c155
commit 4f341412e5
6 changed files with 29 additions and 51 deletions

View File

@ -14,7 +14,7 @@ void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero, tcache_t *tcache, bool slow_path);
arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
size_t arena_salloc(tsdn_t *tsdn, const void *ptr);
size_t arena_vsalloc(tsdn_t *tsdn, const void *ptr);
void arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr,
tcache_t *tcache, bool slow_path);
@ -116,7 +116,7 @@ arena_aalloc(tsdn_t *tsdn, const void *ptr) {
}
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
arena_salloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
rtree_ctx_t rtree_ctx_fallback;
@ -126,15 +126,6 @@ arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
(uintptr_t)ptr, true);
assert(szind != NSIZES);
if (config_debug && unlikely(extent != NULL)) {
rtree_leaf_elm_t elm;
rtree_leaf_elm_read(rtree_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true), true, &elm);
assert(extent == rtree_leaf_elm_extent_get(&elm));
assert(szind == extent_szind_get(extent));
}
return index2size(szind);
}

View File

@ -1008,7 +1008,7 @@ iealloc(tsdn_t *tsdn, const void *ptr) {
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(tsdn_t *tsdn, const void *ptr);
size_t isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
size_t isalloc(tsdn_t *tsdn, const void *ptr);
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool is_internal, arena_t *arena, bool slow_path);
void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
@ -1043,18 +1043,11 @@ iaalloc(tsdn_t *tsdn, const void *ptr) {
return arena_aalloc(tsdn, ptr);
}
/*
* Typical usage:
* tsdn_t *tsdn = [...]
* void *ptr = [...]
* extent_t *extent = iealloc(tsdn, ptr);
* size_t sz = isalloc(tsdn, extent, ptr);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
isalloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
return arena_salloc(tsdn, extent, ptr);
return arena_salloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE void *
@ -1070,8 +1063,7 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
if (config_stats && is_internal && likely(ret != NULL)) {
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn,
iealloc(tsdn, ret), ret));
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
}
return ret;
}
@ -1097,8 +1089,7 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_internal && likely(ret != NULL)) {
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn,
iealloc(tsdn, ret), ret));
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
}
return ret;
}
@ -1129,8 +1120,7 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
narenas_auto);
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
if (config_stats && is_internal) {
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, extent,
ptr));
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
}
arena_dalloc(tsdn, extent, ptr, tcache, slow_path);

View File

@ -155,7 +155,7 @@ prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(tsdn, extent, ptr));
assert(usize == isalloc(tsdn, ptr));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
prof_malloc_sample_object(tsdn, extent, ptr, usize, tctx);
@ -175,7 +175,7 @@ prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
@ -229,7 +229,7 @@ prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr, size_t usize) {
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), extent, ptr);
cassert(config_prof);
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
prof_free_sampled_object(tsd, usize, tctx);

View File

@ -1029,7 +1029,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
if (config_stats || (config_prof && opt_prof)) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
usize = isalloc(tsd_tsdn(tsd), ptr);
}
/* Remove large allocation from prof sample set. */
if (config_prof && opt_prof) {
@ -1465,7 +1465,7 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
cassert(config_prof);
assert(ptr != NULL);
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
assert(usize <= SMALL_MAXCLASS);
szind_t szind = size2index(usize);
@ -1477,7 +1477,7 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
prof_accum_cancel(tsdn, &arena->prof_accum, usize);
assert(isalloc(tsdn, extent, ptr) == usize);
assert(isalloc(tsdn, ptr) == usize);
}
static size_t
@ -1491,7 +1491,7 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
NBINS, false);
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
return LARGE_MINCLASS;
}

View File

@ -1766,8 +1766,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
|| ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
if (config_stats) {
assert(usize == isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
allocation), allocation));
assert(usize == isalloc(tsd_tsdn(tsd), allocation));
*tsd_thread_allocatedp_get(tsd) += usize;
}
@ -2019,10 +2018,10 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
extent = iealloc(tsd_tsdn(tsd), ptr);
if (config_prof && opt_prof) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
usize = isalloc(tsd_tsdn(tsd), ptr);
prof_free(tsd, extent, ptr, usize);
} else if (config_stats) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
usize = isalloc(tsd_tsdn(tsd), ptr);
}
if (config_stats) {
*tsd_thread_deallocatedp_get(tsd) += usize;
@ -2089,7 +2088,7 @@ je_realloc(void *ptr, size_t size) {
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(tsd_tsdn(tsd), ptr);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
old_usize = isalloc(tsd_tsdn(tsd), ptr);
if (config_prof && opt_prof) {
usize = s2u(size);
ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
@ -2119,7 +2118,7 @@ je_realloc(void *ptr, size_t size) {
if (config_stats && likely(ret != NULL)) {
tsd_t *tsd;
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret));
assert(usize == isalloc(tsdn, ret));
tsd = tsdn_tsd(tsdn);
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
@ -2374,7 +2373,7 @@ irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
* reallocation. Therefore, query the actual value of usize.
*/
extent = old_extent;
*usize = isalloc(tsd_tsdn(tsd), extent, p);
*usize = isalloc(tsd_tsdn(tsd), p);
} else {
extent = iealloc(tsd_tsdn(tsd), p);
}
@ -2425,7 +2424,7 @@ je_rallocx(void *ptr, size_t size, int flags) {
tcache = tcache_get(tsd, true);
}
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
old_usize = isalloc(tsd_tsdn(tsd), ptr);
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
@ -2444,8 +2443,7 @@ je_rallocx(void *ptr, size_t size, int flags) {
goto label_oom;
}
if (config_stats) {
usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
p), p);
usize = isalloc(tsd_tsdn(tsd), p);
}
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@ -2476,7 +2474,7 @@ ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize,
zero)) {
return old_usize;
}
usize = isalloc(tsdn, extent, ptr);
usize = isalloc(tsdn, ptr);
return usize;
}
@ -2561,7 +2559,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(tsd_tsdn(tsd), ptr);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
old_usize = isalloc(tsd_tsdn(tsd), ptr);
/*
* The API explicitly absolves itself of protecting against (size +
@ -2615,7 +2613,7 @@ je_sallocx(const void *ptr, int flags) {
if (config_ivsalloc) {
usize = ivsalloc(tsdn, ptr);
} else {
usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr);
usize = isalloc(tsdn, ptr);
}
witness_assert_lockless(tsdn);
@ -2678,7 +2676,7 @@ je_sdallocx(void *ptr, size_t size, int flags) {
tsd = tsd_fetch();
extent = iealloc(tsd_tsdn(tsd), ptr);
usize = inallocx(tsd_tsdn(tsd), size, flags);
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
@ -2798,8 +2796,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
if (config_ivsalloc) {
ret = ivsalloc(tsdn, ptr);
} else {
ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr),
ptr);
ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr);
}
witness_assert_lockless(tsdn);

View File

@ -28,7 +28,7 @@ static malloc_mutex_t tcaches_mtx;
size_t
tcache_salloc(tsdn_t *tsdn, const void *ptr) {
return arena_salloc(tsdn, iealloc(tsdn, ptr), ptr);
return arena_salloc(tsdn, ptr);
}
void