2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
|
|
|
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
|
|
|
|
2020-02-06 06:50:34 +08:00
|
|
|
#include "jemalloc/internal/emap.h"
|
2017-04-18 06:52:44 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-05-24 05:26:31 +08:00
|
|
|
#include "jemalloc/internal/rtree.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/sc.h"
|
2017-05-31 01:45:37 +08:00
|
|
|
#include "jemalloc/internal/sz.h"
|
2017-04-20 04:39:33 +08:00
|
|
|
#include "jemalloc/internal/ticker.h"
|
2017-04-18 06:52:44 +08:00
|
|
|
|
2019-09-24 09:05:57 +08:00
|
|
|
static inline arena_t *
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_get_from_edata(edata_t *edata) {
|
|
|
|
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
|
2019-09-24 09:05:57 +08:00
|
|
|
ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
2018-05-22 04:33:48 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
|
|
|
arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
|
|
|
|
if (arena != NULL) {
|
|
|
|
return arena;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For huge allocations, use the dedicated huge arena if both are true:
|
|
|
|
* 1) is using auto arena selection (i.e. arena == NULL), and 2) the
|
|
|
|
* thread is not assigned to a manual arena.
|
|
|
|
*/
|
2019-01-25 08:15:04 +08:00
|
|
|
if (unlikely(size >= oversize_threshold)) {
|
2018-05-22 04:33:48 +08:00
|
|
|
arena_t *tsd_arena = tsd_arena_get(tsd);
|
|
|
|
if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
|
|
|
|
return arena_choose_huge(tsd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return arena_choose(tsd, NULL);
|
|
|
|
}
|
|
|
|
|
2019-11-20 08:24:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2020-02-07 05:45:04 +08:00
|
|
|
arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
|
2019-12-19 05:38:14 +08:00
|
|
|
prof_info_t *prof_info, bool reset_recent) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
2019-11-20 08:24:57 +08:00
|
|
|
assert(prof_info != NULL);
|
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
edata_t *edata = NULL;
|
2019-11-20 08:24:57 +08:00
|
|
|
bool is_slab;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
/* Static check. */
|
|
|
|
if (alloc_ctx == NULL) {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
|
2019-12-10 06:36:45 +08:00
|
|
|
is_slab = edata_slab_get(edata);
|
2019-12-25 03:30:23 +08:00
|
|
|
} else if (unlikely(!(is_slab = alloc_ctx->slab))) {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
|
2019-11-20 08:24:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!is_slab)) {
|
2019-12-10 06:36:45 +08:00
|
|
|
/* edata must have been initialized at this point. */
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(edata != NULL);
|
|
|
|
large_prof_info_get(tsd, edata, prof_info, reset_recent);
|
2017-04-12 09:13:10 +08:00
|
|
|
} else {
|
2019-12-06 07:35:12 +08:00
|
|
|
prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
|
2019-12-19 05:38:14 +08:00
|
|
|
/*
|
|
|
|
* No need to set other fields in prof_info; they will never be
|
|
|
|
* accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U.
|
|
|
|
*/
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2020-02-07 05:45:04 +08:00
|
|
|
arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
|
|
|
|
emap_alloc_ctx_t *alloc_ctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
/* Static check. */
|
|
|
|
if (alloc_ctx == NULL) {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global,
|
|
|
|
ptr);
|
2019-12-10 06:36:45 +08:00
|
|
|
if (unlikely(!edata_slab_get(edata))) {
|
|
|
|
large_prof_tctx_reset(edata);
|
2017-04-12 09:13:10 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (unlikely(!alloc_ctx->slab)) {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
|
2020-02-06 06:50:34 +08:00
|
|
|
&emap_global, ptr);
|
|
|
|
large_prof_tctx_reset(edata);
|
2017-04-12 09:13:10 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2019-12-14 08:48:03 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
2017-03-21 02:00:07 +08:00
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(!edata_slab_get(edata));
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
large_prof_tctx_reset(edata);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2018-07-06 01:56:33 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2019-12-19 05:38:14 +08:00
|
|
|
arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx) {
|
2018-07-06 01:56:33 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(!edata_slab_get(edata));
|
|
|
|
large_prof_info_set(edata, tctx);
|
2018-07-06 01:56:33 +08:00
|
|
|
}
|
|
|
|
|
2020-01-14 08:18:32 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
arena_may_force_decay(arena_t *arena) {
|
|
|
|
return !(arena_dirty_decay_ms_get(arena) == -1
|
|
|
|
|| arena_muzzy_decay_ms_get(arena) == -1);
|
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
|
2017-01-11 10:06:31 +08:00
|
|
|
tsd_t *tsd;
|
|
|
|
ticker_t *decay_ticker;
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(tsdn_null(tsdn))) {
|
2017-01-11 10:06:31 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
tsd = tsdn_tsd(tsdn);
|
|
|
|
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(decay_ticker == NULL)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
|
2017-03-18 03:42:33 +08:00
|
|
|
arena_decay(tsdn, arena, false, false);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
|
|
|
|
malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
|
|
|
arena_decay_ticks(tsdn, arena, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
|
|
|
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
2017-01-16 08:56:30 +08:00
|
|
|
tcache_t *tcache, bool slow_path) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
|
|
|
|
if (likely(tcache != NULL)) {
|
2018-07-12 07:05:58 +08:00
|
|
|
if (likely(size <= SC_SMALL_MAXCLASS)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
|
|
|
tcache, size, ind, zero, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
if (likely(size <= tcache_maxclass)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
|
|
|
tcache, size, ind, zero, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
/* (size > tcache_maxclass) case falls through. */
|
|
|
|
assert(size > tcache_maxclass);
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return arena_malloc_hard(tsdn, arena, size, ind, zero);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
|
2020-02-06 06:50:34 +08:00
|
|
|
unsigned arena_ind = edata_arena_ind_get(edata);
|
|
|
|
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2017-03-17 16:25:12 +08:00
|
|
|
arena_salloc(tsdn_t *tsdn, const void *ptr) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(ptr != NULL);
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
|
|
|
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind != SC_NSIZES);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
return sz_index2size(alloc_ctx.szind);
|
2017-03-17 08:57:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
|
|
|
arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|
|
|
/*
|
|
|
|
* Return 0 if ptr is not within an extent managed by jemalloc. This
|
|
|
|
* function has two extra costs relative to isalloc():
|
|
|
|
* - The rtree calls cannot claim to be dependent lookups, which induces
|
|
|
|
* rtree lookup load dependencies.
|
|
|
|
* - The lookup may fail, so there is an extra branch to check for
|
|
|
|
* failure.
|
|
|
|
*/
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
emap_full_alloc_ctx_t full_alloc_ctx;
|
2020-02-07 05:45:04 +08:00
|
|
|
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &emap_global, ptr,
|
2020-02-07 05:16:07 +08:00
|
|
|
&full_alloc_ctx);
|
|
|
|
if (missing) {
|
2017-03-17 08:57:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
if (full_alloc_ctx.edata == NULL) {
|
2017-03-17 08:57:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active);
|
2017-03-17 08:57:52 +08:00
|
|
|
/* Only slab members should be looked up via interior pointers. */
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(edata_addr_get(full_alloc_ctx.edata) == ptr
|
|
|
|
|| edata_slab_get(full_alloc_ctx.edata));
|
2017-03-17 08:57:52 +08:00
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(full_alloc_ctx.szind != SC_NSIZES);
|
2017-03-17 08:57:52 +08:00
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
return sz_index2size(full_alloc_ctx.szind);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2019-07-25 07:12:06 +08:00
|
|
|
static inline void
|
|
|
|
arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
|
|
|
|
if (config_prof && unlikely(szind < SC_NBINS)) {
|
|
|
|
arena_dalloc_promoted(tsdn, ptr, NULL, true);
|
|
|
|
} else {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
|
2019-12-10 06:36:45 +08:00
|
|
|
large_dalloc(tsdn, edata);
|
2019-07-25 07:12:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
|
|
|
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
|
2017-03-23 02:00:40 +08:00
|
|
|
|
|
|
|
if (config_debug) {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
|
|
|
assert(alloc_ctx.szind < SC_NSIZES);
|
|
|
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
if (likely(alloc_ctx.slab)) {
|
2017-03-23 02:00:40 +08:00
|
|
|
/* Small allocation. */
|
|
|
|
arena_dalloc_small(tsdn, ptr);
|
|
|
|
} else {
|
2020-02-07 05:16:07 +08:00
|
|
|
arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 07:27:30 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
|
|
|
|
bool slow_path) {
|
|
|
|
if (szind < nhbins) {
|
|
|
|
if (config_prof && unlikely(szind < SC_NBINS)) {
|
|
|
|
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
|
|
|
|
} else {
|
|
|
|
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
|
|
|
|
slow_path);
|
|
|
|
}
|
|
|
|
} else {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
|
2019-12-10 06:36:45 +08:00
|
|
|
large_dalloc(tsdn, edata);
|
2019-07-25 07:27:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-04-08 05:12:30 +08:00
|
|
|
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-03-23 02:00:40 +08:00
|
|
|
if (unlikely(tcache == NULL)) {
|
|
|
|
arena_dalloc_no_tcache(tsdn, ptr);
|
|
|
|
return;
|
|
|
|
}
|
2017-03-17 17:45:12 +08:00
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2020-02-07 05:16:07 +08:00
|
|
|
if (caller_alloc_ctx != NULL) {
|
|
|
|
alloc_ctx = *caller_alloc_ctx;
|
2017-04-08 05:12:30 +08:00
|
|
|
} else {
|
2020-02-07 05:16:07 +08:00
|
|
|
util_assume(!tsdn_null(tsdn));
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
|
2017-04-08 05:12:30 +08:00
|
|
|
}
|
2017-03-17 17:45:12 +08:00
|
|
|
|
|
|
|
if (config_debug) {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
|
|
|
assert(alloc_ctx.szind < SC_NSIZES);
|
|
|
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
2017-03-17 17:45:12 +08:00
|
|
|
}
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
if (likely(alloc_ctx.slab)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Small allocation. */
|
2020-02-07 05:16:07 +08:00
|
|
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
|
|
|
alloc_ctx.szind, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2020-02-07 05:16:07 +08:00
|
|
|
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
|
|
|
|
slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(ptr != NULL);
|
2018-07-12 07:05:58 +08:00
|
|
|
assert(size <= SC_LARGE_MAXCLASS);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2017-03-17 17:45:12 +08:00
|
|
|
if (!config_prof || !opt_prof) {
|
|
|
|
/*
|
|
|
|
* There is no risk of being confused by a promoted sampled
|
|
|
|
* object, so base szind and slab on the given size.
|
|
|
|
*/
|
2020-02-07 05:16:07 +08:00
|
|
|
alloc_ctx.szind = sz_size2index(size);
|
|
|
|
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
2017-03-17 17:45:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((config_prof && opt_prof) || config_debug) {
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
|
2017-03-17 17:45:12 +08:00
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind == sz_size2index(size));
|
|
|
|
assert((config_prof && opt_prof)
|
|
|
|
|| alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
|
2017-03-17 17:45:12 +08:00
|
|
|
|
|
|
|
if (config_debug) {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &emap_global,
|
|
|
|
ptr);
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
|
|
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
2017-03-17 17:45:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
if (likely(alloc_ctx.slab)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Small allocation. */
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_dalloc_small(tsdn, ptr);
|
|
|
|
} else {
|
2020-02-07 05:16:07 +08:00
|
|
|
arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
|
2017-03-23 02:00:40 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
assert(ptr != NULL);
|
2018-07-12 07:05:58 +08:00
|
|
|
assert(size <= SC_LARGE_MAXCLASS);
|
2017-03-23 02:00:40 +08:00
|
|
|
|
|
|
|
if (unlikely(tcache == NULL)) {
|
|
|
|
arena_sdalloc_no_tcache(tsdn, ptr, size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2017-04-12 05:56:43 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2020-02-07 05:16:07 +08:00
|
|
|
if (caller_alloc_ctx == NULL) {
|
2017-04-12 05:56:43 +08:00
|
|
|
/* Uncommon case and should be a static check. */
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr,
|
2020-02-07 05:16:07 +08:00
|
|
|
&alloc_ctx);
|
|
|
|
assert(alloc_ctx.szind == sz_size2index(size));
|
|
|
|
} else {
|
|
|
|
alloc_ctx = *caller_alloc_ctx;
|
2017-04-12 05:56:43 +08:00
|
|
|
}
|
|
|
|
} else {
|
2017-03-23 02:00:40 +08:00
|
|
|
/*
|
|
|
|
* There is no risk of being confused by a promoted sampled
|
|
|
|
* object, so base szind and slab on the given size.
|
|
|
|
*/
|
2020-02-07 05:16:07 +08:00
|
|
|
alloc_ctx.szind = sz_size2index(size);
|
|
|
|
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
|
2017-04-12 05:56:43 +08:00
|
|
|
if (config_debug) {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
|
|
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
if (likely(alloc_ctx.slab)) {
|
2017-03-23 02:00:40 +08:00
|
|
|
/* Small allocation. */
|
2020-02-07 05:16:07 +08:00
|
|
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
|
|
|
alloc_ctx.szind, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2020-02-07 05:16:07 +08:00
|
|
|
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
|
|
|
|
slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 01:11:02 +08:00
|
|
|
static inline void
|
|
|
|
arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
|
|
|
size_t alignment) {
|
|
|
|
assert(edata_base_get(edata) == edata_addr_get(edata));
|
|
|
|
|
|
|
|
if (alignment < PAGE) {
|
|
|
|
unsigned lg_range = LG_PAGE -
|
|
|
|
lg_floor(CACHELINE_CEILING(alignment));
|
|
|
|
size_t r;
|
|
|
|
if (!tsdn_null(tsdn)) {
|
|
|
|
tsd_t *tsd = tsdn_tsd(tsdn);
|
|
|
|
r = (size_t)prng_lg_range_u64(
|
|
|
|
tsd_prng_statep_get(tsd), lg_range);
|
|
|
|
} else {
|
|
|
|
uint64_t stack_value = (uint64_t)(uintptr_t)&r;
|
|
|
|
r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
|
|
|
|
}
|
|
|
|
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
|
|
|
lg_range);
|
|
|
|
edata->e_addr = (void *)((uintptr_t)edata->e_addr +
|
|
|
|
random_offset);
|
|
|
|
assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
|
|
|
|
edata->e_addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
|