Remove extent arg from isalloc() and arena_salloc().

This commit is contained in:
Jason Evans
2017-03-17 01:25:12 -07:00
parent 0ee0e0c155
commit 4f341412e5
6 changed files with 29 additions and 51 deletions

View File

@@ -1029,7 +1029,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
if (config_stats || (config_prof && opt_prof)) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
usize = isalloc(tsd_tsdn(tsd), ptr);
}
/* Remove large allocation from prof sample set. */
if (config_prof && opt_prof) {
@@ -1465,7 +1465,7 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
cassert(config_prof);
assert(ptr != NULL);
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
assert(usize <= SMALL_MAXCLASS);
szind_t szind = size2index(usize);
@@ -1477,7 +1477,7 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
prof_accum_cancel(tsdn, &arena->prof_accum, usize);
assert(isalloc(tsdn, extent, ptr) == usize);
assert(isalloc(tsdn, ptr) == usize);
}
static size_t
@@ -1491,7 +1491,7 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
NBINS, false);
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
return LARGE_MINCLASS;
}

View File

@@ -1766,8 +1766,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
|| ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
if (config_stats) {
assert(usize == isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
allocation), allocation));
assert(usize == isalloc(tsd_tsdn(tsd), allocation));
*tsd_thread_allocatedp_get(tsd) += usize;
}
@@ -2019,10 +2018,10 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
extent = iealloc(tsd_tsdn(tsd), ptr);
if (config_prof && opt_prof) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
usize = isalloc(tsd_tsdn(tsd), ptr);
prof_free(tsd, extent, ptr, usize);
} else if (config_stats) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
usize = isalloc(tsd_tsdn(tsd), ptr);
}
if (config_stats) {
*tsd_thread_deallocatedp_get(tsd) += usize;
@@ -2089,7 +2088,7 @@ je_realloc(void *ptr, size_t size) {
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(tsd_tsdn(tsd), ptr);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
old_usize = isalloc(tsd_tsdn(tsd), ptr);
if (config_prof && opt_prof) {
usize = s2u(size);
ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
@@ -2119,7 +2118,7 @@ je_realloc(void *ptr, size_t size) {
if (config_stats && likely(ret != NULL)) {
tsd_t *tsd;
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret));
assert(usize == isalloc(tsdn, ret));
tsd = tsdn_tsd(tsdn);
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
@@ -2374,7 +2373,7 @@ irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
* reallocation. Therefore, query the actual value of usize.
*/
extent = old_extent;
*usize = isalloc(tsd_tsdn(tsd), extent, p);
*usize = isalloc(tsd_tsdn(tsd), p);
} else {
extent = iealloc(tsd_tsdn(tsd), p);
}
@@ -2425,7 +2424,7 @@ je_rallocx(void *ptr, size_t size, int flags) {
tcache = tcache_get(tsd, true);
}
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
old_usize = isalloc(tsd_tsdn(tsd), ptr);
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
@@ -2444,8 +2443,7 @@ je_rallocx(void *ptr, size_t size, int flags) {
goto label_oom;
}
if (config_stats) {
usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
p), p);
usize = isalloc(tsd_tsdn(tsd), p);
}
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@@ -2476,7 +2474,7 @@ ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize,
zero)) {
return old_usize;
}
usize = isalloc(tsdn, extent, ptr);
usize = isalloc(tsdn, ptr);
return usize;
}
@@ -2561,7 +2559,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(tsd_tsdn(tsd), ptr);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
old_usize = isalloc(tsd_tsdn(tsd), ptr);
/*
* The API explicitly absolves itself of protecting against (size +
@@ -2615,7 +2613,7 @@ je_sallocx(const void *ptr, int flags) {
if (config_ivsalloc) {
usize = ivsalloc(tsdn, ptr);
} else {
usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr);
usize = isalloc(tsdn, ptr);
}
witness_assert_lockless(tsdn);
@@ -2678,7 +2676,7 @@ je_sdallocx(void *ptr, size_t size, int flags) {
tsd = tsd_fetch();
extent = iealloc(tsd_tsdn(tsd), ptr);
usize = inallocx(tsd_tsdn(tsd), size, flags);
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
@@ -2798,8 +2796,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
if (config_ivsalloc) {
ret = ivsalloc(tsdn, ptr);
} else {
ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr),
ptr);
ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr);
}
witness_assert_lockless(tsdn);

View File

@@ -28,7 +28,7 @@ static malloc_mutex_t tcaches_mtx;
size_t
tcache_salloc(tsdn_t *tsdn, const void *ptr) {
return arena_salloc(tsdn, iealloc(tsdn, ptr), ptr);
return arena_salloc(tsdn, ptr);
}
void