2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
|
|
|
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
|
|
|
|
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
2017-03-23 02:00:40 +08:00
|
|
|
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
2017-04-12 09:13:10 +08:00
|
|
|
prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr,
|
|
|
|
alloc_ctx_t *ctx);
|
2017-03-23 02:00:40 +08:00
|
|
|
void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx_t *ctx, prof_tctx_t *tctx);
|
2017-03-23 02:00:40 +08:00
|
|
|
void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
|
|
|
|
void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
|
|
|
|
void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
2017-01-11 10:06:31 +08:00
|
|
|
bool zero, tcache_t *tcache, bool slow_path);
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
|
2017-03-17 16:25:12 +08:00
|
|
|
size_t arena_salloc(tsdn_t *tsdn, const void *ptr);
|
2017-03-17 08:57:52 +08:00
|
|
|
size_t arena_vsalloc(tsdn_t *tsdn, const void *ptr);
|
2017-03-23 02:00:40 +08:00
|
|
|
void arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr);
|
2017-04-08 05:12:30 +08:00
|
|
|
void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx_t *alloc_ctx, bool slow_path);
|
2017-03-23 02:00:40 +08:00
|
|
|
void arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size);
|
|
|
|
void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx_t *alloc_ctx, bool slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
|
|
|
JEMALLOC_INLINE szind_t
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_bin_index(arena_t *arena, arena_bin_t *bin) {
|
2017-01-11 10:06:31 +08:00
|
|
|
szind_t binind = (szind_t)(bin - arena->bins);
|
|
|
|
assert(binind < NBINS);
|
2017-01-20 10:15:45 +08:00
|
|
|
return binind;
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
|
|
|
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
/* Static check. */
|
|
|
|
if (alloc_ctx == NULL) {
|
|
|
|
const extent_t *extent = iealloc(tsdn, ptr);
|
|
|
|
if (unlikely(!extent_slab_get(extent))) {
|
|
|
|
return large_prof_tctx_get(tsdn, extent);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (unlikely(!alloc_ctx->slab)) {
|
|
|
|
return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return (prof_tctx_t *)(uintptr_t)1U;
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
/* Static check. */
|
|
|
|
if (alloc_ctx == NULL) {
|
|
|
|
extent_t *extent = iealloc(tsdn, ptr);
|
|
|
|
if (unlikely(!extent_slab_get(extent))) {
|
|
|
|
large_prof_tctx_set(tsdn, extent, tctx);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (unlikely(!alloc_ctx->slab)) {
|
|
|
|
large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
2017-03-21 02:00:07 +08:00
|
|
|
|
|
|
|
extent_t *extent = iealloc(tsdn, ptr);
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(!extent_slab_get(extent));
|
|
|
|
|
|
|
|
large_prof_tctx_reset(tsdn, extent);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
|
2017-01-11 10:06:31 +08:00
|
|
|
tsd_t *tsd;
|
|
|
|
ticker_t *decay_ticker;
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(tsdn_null(tsdn))) {
|
2017-01-11 10:06:31 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
tsd = tsdn_tsd(tsdn);
|
|
|
|
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(decay_ticker == NULL)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
arena_decay(tsdn, arena, false);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
|
|
|
|
malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
|
|
|
arena_decay_ticks(tsdn, arena, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
|
|
|
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
2017-01-16 08:56:30 +08:00
|
|
|
tcache_t *tcache, bool slow_path) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
assert(size != 0);
|
|
|
|
|
|
|
|
if (likely(tcache != NULL)) {
|
|
|
|
if (likely(size <= SMALL_MAXCLASS)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
|
|
|
tcache, size, ind, zero, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
if (likely(size <= tcache_maxclass)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
|
|
|
tcache, size, ind, zero, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
/* (size > tcache_maxclass) case falls through. */
|
|
|
|
assert(size > tcache_maxclass);
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return arena_malloc_hard(tsdn, arena, size, ind, zero);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_arena_get(iealloc(tsdn, ptr));
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2017-03-17 16:25:12 +08:00
|
|
|
arena_salloc(tsdn_t *tsdn, const void *ptr) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
|
|
|
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, true);
|
|
|
|
assert(szind != NSIZES);
|
|
|
|
|
|
|
|
return index2size(szind);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
|
|
|
arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|
|
|
/*
|
|
|
|
* Return 0 if ptr is not within an extent managed by jemalloc. This
|
|
|
|
* function has two extra costs relative to isalloc():
|
|
|
|
* - The rtree calls cannot claim to be dependent lookups, which induces
|
|
|
|
* rtree lookup load dependencies.
|
|
|
|
* - The lookup may fail, so there is an extra branch to check for
|
|
|
|
* failure.
|
|
|
|
*/
|
|
|
|
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
|
|
|
extent_t *extent;
|
|
|
|
szind_t szind;
|
|
|
|
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, false, &extent, &szind)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (extent == NULL) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
assert(extent_state_get(extent) == extent_state_active);
|
|
|
|
/* Only slab members should be looked up via interior pointers. */
|
|
|
|
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
|
|
|
|
|
|
|
assert(szind != NSIZES);
|
|
|
|
|
|
|
|
return index2size(szind);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-03-23 02:00:40 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
|
|
|
szind_t szind;
|
|
|
|
bool slab;
|
|
|
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
|
|
|
true, &szind, &slab);
|
|
|
|
|
|
|
|
if (config_debug) {
|
|
|
|
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
|
|
|
|
rtree_ctx, (uintptr_t)ptr, true);
|
|
|
|
assert(szind == extent_szind_get(extent));
|
|
|
|
assert(szind < NSIZES);
|
|
|
|
assert(slab == extent_slab_get(extent));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(slab)) {
|
|
|
|
/* Small allocation. */
|
|
|
|
arena_dalloc_small(tsdn, ptr);
|
|
|
|
} else {
|
|
|
|
extent_t *extent = iealloc(tsdn, ptr);
|
|
|
|
large_dalloc(tsdn, extent);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-04-08 05:12:30 +08:00
|
|
|
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-03-23 02:00:40 +08:00
|
|
|
if (unlikely(tcache == NULL)) {
|
|
|
|
arena_dalloc_no_tcache(tsdn, ptr);
|
|
|
|
return;
|
|
|
|
}
|
2017-03-17 17:45:12 +08:00
|
|
|
|
|
|
|
szind_t szind;
|
|
|
|
bool slab;
|
2017-04-08 05:12:30 +08:00
|
|
|
rtree_ctx_t *rtree_ctx;
|
2017-04-12 09:13:10 +08:00
|
|
|
if (alloc_ctx != NULL) {
|
|
|
|
szind = alloc_ctx->szind;
|
|
|
|
slab = alloc_ctx->slab;
|
2017-04-08 05:12:30 +08:00
|
|
|
assert(szind != NSIZES);
|
|
|
|
} else {
|
|
|
|
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
|
|
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, true, &szind, &slab);
|
|
|
|
}
|
2017-03-17 17:45:12 +08:00
|
|
|
|
|
|
|
if (config_debug) {
|
2017-04-08 05:12:30 +08:00
|
|
|
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
2017-03-17 17:45:12 +08:00
|
|
|
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
|
|
|
|
rtree_ctx, (uintptr_t)ptr, true);
|
|
|
|
assert(szind == extent_szind_get(extent));
|
2017-03-23 02:00:40 +08:00
|
|
|
assert(szind < NSIZES);
|
2017-03-17 17:45:12 +08:00
|
|
|
assert(slab == extent_slab_get(extent));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(slab)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Small allocation. */
|
2017-03-23 02:00:40 +08:00
|
|
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
|
|
|
slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2017-03-23 02:00:40 +08:00
|
|
|
if (szind < nhbins) {
|
2017-03-14 08:36:57 +08:00
|
|
|
if (config_prof && unlikely(szind < NBINS)) {
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_dalloc_promoted(tsdn, ptr, tcache,
|
|
|
|
slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2017-03-23 02:00:40 +08:00
|
|
|
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
|
|
|
szind, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2017-03-17 17:45:12 +08:00
|
|
|
extent_t *extent = iealloc(tsdn, ptr);
|
2017-01-11 10:06:31 +08:00
|
|
|
large_dalloc(tsdn, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-23 02:00:40 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(ptr != NULL);
|
2017-03-23 02:00:40 +08:00
|
|
|
assert(size <= LARGE_MAXCLASS);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-03-17 17:45:12 +08:00
|
|
|
szind_t szind;
|
|
|
|
bool slab;
|
|
|
|
if (!config_prof || !opt_prof) {
|
|
|
|
/*
|
|
|
|
* There is no risk of being confused by a promoted sampled
|
|
|
|
* object, so base szind and slab on the given size.
|
|
|
|
*/
|
|
|
|
szind = size2index(size);
|
|
|
|
slab = (szind < NBINS);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((config_prof && opt_prof) || config_debug) {
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
|
|
|
&rtree_ctx_fallback);
|
|
|
|
|
|
|
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, true, &szind, &slab);
|
|
|
|
|
|
|
|
assert(szind == size2index(size));
|
|
|
|
assert((config_prof && opt_prof) || slab == (szind < NBINS));
|
|
|
|
|
|
|
|
if (config_debug) {
|
|
|
|
extent_t *extent = rtree_extent_read(tsdn,
|
|
|
|
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
|
|
|
assert(szind == extent_szind_get(extent));
|
|
|
|
assert(slab == extent_slab_get(extent));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(slab)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Small allocation. */
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_dalloc_small(tsdn, ptr);
|
|
|
|
} else {
|
|
|
|
extent_t *extent = iealloc(tsdn, ptr);
|
|
|
|
large_dalloc(tsdn, extent);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
2017-03-23 02:00:40 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(size <= LARGE_MAXCLASS);
|
|
|
|
|
|
|
|
if (unlikely(tcache == NULL)) {
|
|
|
|
arena_sdalloc_no_tcache(tsdn, ptr, size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
szind_t szind;
|
|
|
|
bool slab;
|
2017-04-12 09:13:10 +08:00
|
|
|
UNUSED alloc_ctx_t local_ctx;
|
2017-04-12 05:56:43 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2017-04-12 09:13:10 +08:00
|
|
|
if (alloc_ctx == NULL) {
|
2017-04-12 05:56:43 +08:00
|
|
|
/* Uncommon case and should be a static check. */
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
|
|
|
&rtree_ctx_fallback);
|
|
|
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, true, &local_ctx.szind,
|
|
|
|
&local_ctx.slab);
|
|
|
|
assert(local_ctx.szind == size2index(size));
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx = &local_ctx;
|
2017-04-12 05:56:43 +08:00
|
|
|
}
|
2017-04-12 09:13:10 +08:00
|
|
|
slab = alloc_ctx->slab;
|
|
|
|
szind = alloc_ctx->szind;
|
2017-04-12 05:56:43 +08:00
|
|
|
} else {
|
2017-03-23 02:00:40 +08:00
|
|
|
/*
|
|
|
|
* There is no risk of being confused by a promoted sampled
|
|
|
|
* object, so base szind and slab on the given size.
|
|
|
|
*/
|
|
|
|
szind = size2index(size);
|
|
|
|
slab = (szind < NBINS);
|
|
|
|
}
|
|
|
|
|
2017-04-12 05:56:43 +08:00
|
|
|
if (config_debug) {
|
2017-03-23 02:00:40 +08:00
|
|
|
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
|
|
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, true, &szind, &slab);
|
2017-04-12 05:56:43 +08:00
|
|
|
extent_t *extent = rtree_extent_read(tsdn,
|
|
|
|
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
|
|
|
assert(szind == extent_szind_get(extent));
|
|
|
|
assert(slab == extent_slab_get(extent));
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(slab)) {
|
|
|
|
/* Small allocation. */
|
|
|
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
|
|
|
slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2017-03-23 02:00:40 +08:00
|
|
|
if (szind < nhbins) {
|
2017-03-14 08:36:57 +08:00
|
|
|
if (config_prof && unlikely(szind < NBINS)) {
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_dalloc_promoted(tsdn, ptr, tcache,
|
|
|
|
slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2017-03-23 02:00:40 +08:00
|
|
|
tcache_dalloc_large(tsdn_tsd(tsdn),
|
|
|
|
tcache, ptr, szind, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2017-03-17 17:45:12 +08:00
|
|
|
extent_t *extent = iealloc(tsdn, ptr);
|
2017-01-11 10:06:31 +08:00
|
|
|
large_dalloc(tsdn, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) */
|
|
|
|
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
|