2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
|
|
|
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
|
|
|
|
2017-04-18 06:52:44 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-05-24 05:26:31 +08:00
|
|
|
#include "jemalloc/internal/rtree.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/sc.h"
|
2017-05-31 01:45:37 +08:00
|
|
|
#include "jemalloc/internal/sz.h"
|
2017-04-20 04:39:33 +08:00
|
|
|
#include "jemalloc/internal/ticker.h"
|
2017-04-18 06:52:44 +08:00
|
|
|
|
2019-09-24 09:05:57 +08:00
|
|
|
static inline arena_t *
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_get_from_edata(edata_t *edata) {
|
|
|
|
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
|
2019-09-24 09:05:57 +08:00
|
|
|
ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
2018-05-22 04:33:48 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
|
|
|
arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
|
|
|
|
if (arena != NULL) {
|
|
|
|
return arena;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For huge allocations, use the dedicated huge arena if both are true:
|
|
|
|
* 1) is using auto arena selection (i.e. arena == NULL), and 2) the
|
|
|
|
* thread is not assigned to a manual arena.
|
|
|
|
*/
|
2019-01-25 08:15:04 +08:00
|
|
|
if (unlikely(size >= oversize_threshold)) {
|
2018-05-22 04:33:48 +08:00
|
|
|
arena_t *tsd_arena = tsd_arena_get(tsd);
|
|
|
|
if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
|
|
|
|
return arena_choose_huge(tsd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return arena_choose(tsd, NULL);
|
|
|
|
}
|
|
|
|
|
2019-11-20 08:24:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2019-11-23 03:42:01 +08:00
|
|
|
arena_prof_info_get(tsd_t *tsd, const void *ptr, alloc_ctx_t *alloc_ctx,
|
2019-11-20 08:24:57 +08:00
|
|
|
prof_info_t *prof_info) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
2019-11-20 08:24:57 +08:00
|
|
|
assert(prof_info != NULL);
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
const edata_t *edata;
|
2019-11-20 08:24:57 +08:00
|
|
|
bool is_slab;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
/* Static check. */
|
|
|
|
if (alloc_ctx == NULL) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata = iealloc(tsd_tsdn(tsd), ptr);
|
|
|
|
is_slab = edata_slab_get(edata);
|
2019-11-20 08:24:57 +08:00
|
|
|
} else if (!unlikely(is_slab = alloc_ctx->slab)) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata = iealloc(tsd_tsdn(tsd), ptr);
|
2019-11-20 08:24:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!is_slab)) {
|
2019-12-10 06:36:45 +08:00
|
|
|
/* edata must have been initialized at this point. */
|
|
|
|
large_prof_info_get(edata, prof_info);
|
2017-04-12 09:13:10 +08:00
|
|
|
} else {
|
2019-11-20 08:24:57 +08:00
|
|
|
memset(prof_info, 0, sizeof(prof_info_t));
|
2019-12-06 07:35:12 +08:00
|
|
|
prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2019-12-14 08:48:03 +08:00
|
|
|
arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
/* Static check. */
|
|
|
|
if (alloc_ctx == NULL) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
|
|
|
|
if (unlikely(!edata_slab_get(edata))) {
|
|
|
|
large_prof_tctx_reset(edata);
|
2017-04-12 09:13:10 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (unlikely(!alloc_ctx->slab)) {
|
2019-12-14 08:48:03 +08:00
|
|
|
large_prof_tctx_reset(iealloc(tsd_tsdn(tsd), ptr));
|
2017-04-12 09:13:10 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2019-12-14 08:48:03 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
2017-03-21 02:00:07 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
|
|
|
|
assert(!edata_slab_get(edata));
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
large_prof_tctx_reset(edata);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2018-07-06 01:56:33 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2019-12-14 08:48:03 +08:00
|
|
|
arena_prof_info_set(tsd_t *tsd, const void *ptr, prof_tctx_t *tctx) {
|
2018-07-06 01:56:33 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
|
|
|
|
assert(!edata_slab_get(edata));
|
|
|
|
large_prof_info_set(edata, tctx);
|
2018-07-06 01:56:33 +08:00
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
|
2017-01-11 10:06:31 +08:00
|
|
|
tsd_t *tsd;
|
|
|
|
ticker_t *decay_ticker;
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(tsdn_null(tsdn))) {
|
2017-01-11 10:06:31 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
tsd = tsdn_tsd(tsdn);
|
|
|
|
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(decay_ticker == NULL)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
|
2017-03-18 03:42:33 +08:00
|
|
|
arena_decay(tsdn, arena, false, false);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
|
|
|
|
malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
|
|
|
arena_decay_ticks(tsdn, arena, 1);
|
|
|
|
}
|
|
|
|
|
2019-03-13 06:02:41 +08:00
|
|
|
/* Purge a single extent to retained / unmapped directly. */
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2019-12-03 06:19:22 +08:00
|
|
|
arena_decay_extent(tsdn_t *tsdn,arena_t *arena, ehooks_t *ehooks,
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata) {
|
|
|
|
size_t extent_size = edata_size_get(edata);
|
|
|
|
extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
|
2019-03-13 06:02:41 +08:00
|
|
|
if (config_stats) {
|
|
|
|
/* Update stats accordingly. */
|
|
|
|
arena_stats_lock(tsdn, &arena->stats);
|
|
|
|
arena_stats_add_u64(tsdn, &arena->stats,
|
|
|
|
&arena->decay_dirty.stats->nmadvise, 1);
|
|
|
|
arena_stats_add_u64(tsdn, &arena->stats,
|
|
|
|
&arena->decay_dirty.stats->purged, extent_size >> LG_PAGE);
|
|
|
|
arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
|
|
|
|
extent_size);
|
|
|
|
arena_stats_unlock(tsdn, &arena->stats);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
|
|
|
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
2017-01-16 08:56:30 +08:00
|
|
|
tcache_t *tcache, bool slow_path) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
|
|
|
|
if (likely(tcache != NULL)) {
|
2018-07-12 07:05:58 +08:00
|
|
|
if (likely(size <= SC_SMALL_MAXCLASS)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
|
|
|
tcache, size, ind, zero, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
if (likely(size <= tcache_maxclass)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
|
|
|
tcache, size, ind, zero, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
/* (size > tcache_maxclass) case falls through. */
|
|
|
|
assert(size > tcache_maxclass);
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return arena_malloc_hard(tsdn, arena, size, ind, zero);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
2019-12-10 06:36:45 +08:00
|
|
|
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(
|
2019-09-21 09:20:22 +08:00
|
|
|
iealloc(tsdn, ptr))], ATOMIC_RELAXED);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2017-03-17 16:25:12 +08:00
|
|
|
arena_salloc(tsdn_t *tsdn, const void *ptr) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
|
|
|
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, true);
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(szind != SC_NSIZES);
|
2017-03-17 08:57:52 +08:00
|
|
|
|
2017-05-31 01:45:37 +08:00
|
|
|
return sz_index2size(szind);
|
2017-03-17 08:57:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
|
|
|
arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|
|
|
/*
|
|
|
|
* Return 0 if ptr is not within an extent managed by jemalloc. This
|
|
|
|
* function has two extra costs relative to isalloc():
|
|
|
|
* - The rtree calls cannot claim to be dependent lookups, which induces
|
|
|
|
* rtree lookup load dependencies.
|
|
|
|
* - The lookup may fail, so there is an extra branch to check for
|
|
|
|
* failure.
|
|
|
|
*/
|
|
|
|
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata;
|
2017-03-17 08:57:52 +08:00
|
|
|
szind_t szind;
|
2019-12-10 06:36:45 +08:00
|
|
|
if (rtree_edata_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, false, &edata, &szind)) {
|
2017-03-17 08:57:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
if (edata == NULL) {
|
2017-03-17 08:57:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(edata_state_get(edata) == extent_state_active);
|
2017-03-17 08:57:52 +08:00
|
|
|
/* Only slab members should be looked up via interior pointers. */
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(edata_addr_get(edata) == ptr || edata_slab_get(edata));
|
2017-03-17 08:57:52 +08:00
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(szind != SC_NSIZES);
|
2017-03-17 08:57:52 +08:00
|
|
|
|
2017-05-31 01:45:37 +08:00
|
|
|
return sz_index2size(szind);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2019-07-25 07:12:06 +08:00
|
|
|
static inline void
|
|
|
|
arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
|
|
|
|
if (config_prof && unlikely(szind < SC_NBINS)) {
|
|
|
|
arena_dalloc_promoted(tsdn, ptr, NULL, true);
|
|
|
|
} else {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata = iealloc(tsdn, ptr);
|
|
|
|
large_dalloc(tsdn, edata);
|
2019-07-25 07:12:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
|
|
|
szind_t szind;
|
|
|
|
bool slab;
|
|
|
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
|
|
|
true, &szind, &slab);
|
|
|
|
|
|
|
|
if (config_debug) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree,
|
2017-03-23 02:00:40 +08:00
|
|
|
rtree_ctx, (uintptr_t)ptr, true);
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(szind == edata_szind_get(edata));
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(szind < SC_NSIZES);
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(slab == edata_slab_get(edata));
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(slab)) {
|
|
|
|
/* Small allocation. */
|
|
|
|
arena_dalloc_small(tsdn, ptr);
|
|
|
|
} else {
|
2019-07-25 07:12:06 +08:00
|
|
|
arena_dalloc_large_no_tcache(tsdn, ptr, szind);
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 07:27:30 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
|
|
|
|
bool slow_path) {
|
|
|
|
if (szind < nhbins) {
|
|
|
|
if (config_prof && unlikely(szind < SC_NBINS)) {
|
|
|
|
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
|
|
|
|
} else {
|
|
|
|
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
|
|
|
|
slow_path);
|
|
|
|
}
|
|
|
|
} else {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata = iealloc(tsdn, ptr);
|
|
|
|
large_dalloc(tsdn, edata);
|
2019-07-25 07:27:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-04-08 05:12:30 +08:00
|
|
|
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-03-23 02:00:40 +08:00
|
|
|
if (unlikely(tcache == NULL)) {
|
|
|
|
arena_dalloc_no_tcache(tsdn, ptr);
|
|
|
|
return;
|
|
|
|
}
|
2017-03-17 17:45:12 +08:00
|
|
|
|
|
|
|
szind_t szind;
|
|
|
|
bool slab;
|
2017-04-08 05:12:30 +08:00
|
|
|
rtree_ctx_t *rtree_ctx;
|
2017-04-12 09:13:10 +08:00
|
|
|
if (alloc_ctx != NULL) {
|
|
|
|
szind = alloc_ctx->szind;
|
|
|
|
slab = alloc_ctx->slab;
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(szind != SC_NSIZES);
|
2017-04-08 05:12:30 +08:00
|
|
|
} else {
|
|
|
|
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
|
|
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, true, &szind, &slab);
|
|
|
|
}
|
2017-03-17 17:45:12 +08:00
|
|
|
|
|
|
|
if (config_debug) {
|
2017-04-08 05:12:30 +08:00
|
|
|
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree,
|
2017-03-17 17:45:12 +08:00
|
|
|
rtree_ctx, (uintptr_t)ptr, true);
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(szind == edata_szind_get(edata));
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(szind < SC_NSIZES);
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(slab == edata_slab_get(edata));
|
2017-03-17 17:45:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(slab)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Small allocation. */
|
2017-03-23 02:00:40 +08:00
|
|
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
|
|
|
slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2019-07-25 07:27:30 +08:00
|
|
|
arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(ptr != NULL);
|
2018-07-12 07:05:58 +08:00
|
|
|
assert(size <= SC_LARGE_MAXCLASS);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-03-17 17:45:12 +08:00
|
|
|
szind_t szind;
|
|
|
|
bool slab;
|
|
|
|
if (!config_prof || !opt_prof) {
|
|
|
|
/*
|
|
|
|
* There is no risk of being confused by a promoted sampled
|
|
|
|
* object, so base szind and slab on the given size.
|
|
|
|
*/
|
2017-05-31 01:45:37 +08:00
|
|
|
szind = sz_size2index(size);
|
2017-12-15 04:46:39 +08:00
|
|
|
slab = (szind < SC_NBINS);
|
2017-03-17 17:45:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((config_prof && opt_prof) || config_debug) {
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
|
|
|
&rtree_ctx_fallback);
|
|
|
|
|
|
|
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, true, &szind, &slab);
|
|
|
|
|
2017-05-31 01:45:37 +08:00
|
|
|
assert(szind == sz_size2index(size));
|
2017-12-15 04:46:39 +08:00
|
|
|
assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
|
2017-03-17 17:45:12 +08:00
|
|
|
|
|
|
|
if (config_debug) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata = rtree_edata_read(tsdn,
|
2017-03-17 17:45:12 +08:00
|
|
|
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(szind == edata_szind_get(edata));
|
|
|
|
assert(slab == edata_slab_get(edata));
|
2017-03-17 17:45:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(slab)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Small allocation. */
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_dalloc_small(tsdn, ptr);
|
|
|
|
} else {
|
2019-07-25 07:12:06 +08:00
|
|
|
arena_dalloc_large_no_tcache(tsdn, ptr, szind);
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
2017-03-23 02:00:40 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
assert(ptr != NULL);
|
2018-07-12 07:05:58 +08:00
|
|
|
assert(size <= SC_LARGE_MAXCLASS);
|
2017-03-23 02:00:40 +08:00
|
|
|
|
|
|
|
if (unlikely(tcache == NULL)) {
|
|
|
|
arena_sdalloc_no_tcache(tsdn, ptr, size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
szind_t szind;
|
|
|
|
bool slab;
|
2018-05-03 17:40:53 +08:00
|
|
|
alloc_ctx_t local_ctx;
|
2017-04-12 05:56:43 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2017-04-12 09:13:10 +08:00
|
|
|
if (alloc_ctx == NULL) {
|
2017-04-12 05:56:43 +08:00
|
|
|
/* Uncommon case and should be a static check. */
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
|
|
|
&rtree_ctx_fallback);
|
|
|
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, true, &local_ctx.szind,
|
|
|
|
&local_ctx.slab);
|
2017-05-31 01:45:37 +08:00
|
|
|
assert(local_ctx.szind == sz_size2index(size));
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx = &local_ctx;
|
2017-04-12 05:56:43 +08:00
|
|
|
}
|
2017-04-12 09:13:10 +08:00
|
|
|
slab = alloc_ctx->slab;
|
|
|
|
szind = alloc_ctx->szind;
|
2017-04-12 05:56:43 +08:00
|
|
|
} else {
|
2017-03-23 02:00:40 +08:00
|
|
|
/*
|
|
|
|
* There is no risk of being confused by a promoted sampled
|
|
|
|
* object, so base szind and slab on the given size.
|
|
|
|
*/
|
2017-05-31 01:45:37 +08:00
|
|
|
szind = sz_size2index(size);
|
2017-12-15 04:46:39 +08:00
|
|
|
slab = (szind < SC_NBINS);
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
|
2017-04-12 05:56:43 +08:00
|
|
|
if (config_debug) {
|
2017-03-23 02:00:40 +08:00
|
|
|
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
|
|
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)ptr, true, &szind, &slab);
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata = rtree_edata_read(tsdn,
|
2017-04-12 05:56:43 +08:00
|
|
|
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(szind == edata_szind_get(edata));
|
|
|
|
assert(slab == edata_slab_get(edata));
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(slab)) {
|
|
|
|
/* Small allocation. */
|
|
|
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
|
|
|
slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2019-07-25 07:27:30 +08:00
|
|
|
arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
|