2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
|
|
|
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
|
|
|
|
2021-01-30 08:06:28 +08:00
|
|
|
#include "jemalloc/internal/div.h"
|
2020-02-06 06:50:34 +08:00
|
|
|
#include "jemalloc/internal/emap.h"
|
2017-04-18 06:52:44 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-05-24 05:26:31 +08:00
|
|
|
#include "jemalloc/internal/rtree.h"
|
2020-10-15 07:45:19 +08:00
|
|
|
#include "jemalloc/internal/safety_check.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/sc.h"
|
2017-05-31 01:45:37 +08:00
|
|
|
#include "jemalloc/internal/sz.h"
|
2017-04-20 04:39:33 +08:00
|
|
|
#include "jemalloc/internal/ticker.h"
|
2017-04-18 06:52:44 +08:00
|
|
|
|
2019-09-24 09:05:57 +08:00
|
|
|
static inline arena_t *
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_get_from_edata(edata_t *edata) {
|
|
|
|
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
|
2019-09-24 09:05:57 +08:00
|
|
|
ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
2018-05-22 04:33:48 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
|
|
|
arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
|
|
|
|
if (arena != NULL) {
|
|
|
|
return arena;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For huge allocations, use the dedicated huge arena if both are true:
|
|
|
|
* 1) is using auto arena selection (i.e. arena == NULL), and 2) the
|
|
|
|
* thread is not assigned to a manual arena.
|
|
|
|
*/
|
2019-01-25 08:15:04 +08:00
|
|
|
if (unlikely(size >= oversize_threshold)) {
|
2018-05-22 04:33:48 +08:00
|
|
|
arena_t *tsd_arena = tsd_arena_get(tsd);
|
|
|
|
if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
|
|
|
|
return arena_choose_huge(tsd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return arena_choose(tsd, NULL);
|
|
|
|
}
|
|
|
|
|
2019-11-20 08:24:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2020-02-07 05:45:04 +08:00
|
|
|
arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
|
2019-12-19 05:38:14 +08:00
|
|
|
prof_info_t *prof_info, bool reset_recent) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
2019-11-20 08:24:57 +08:00
|
|
|
assert(prof_info != NULL);
|
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
edata_t *edata = NULL;
|
2019-11-20 08:24:57 +08:00
|
|
|
bool is_slab;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
/* Static check. */
|
|
|
|
if (alloc_ctx == NULL) {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
|
|
|
|
ptr);
|
2019-12-10 06:36:45 +08:00
|
|
|
is_slab = edata_slab_get(edata);
|
2019-12-25 03:30:23 +08:00
|
|
|
} else if (unlikely(!(is_slab = alloc_ctx->slab))) {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
|
|
|
|
ptr);
|
2019-11-20 08:24:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!is_slab)) {
|
2019-12-10 06:36:45 +08:00
|
|
|
/* edata must have been initialized at this point. */
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(edata != NULL);
|
|
|
|
large_prof_info_get(tsd, edata, prof_info, reset_recent);
|
2017-04-12 09:13:10 +08:00
|
|
|
} else {
|
2019-12-06 07:35:12 +08:00
|
|
|
prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
|
2019-12-19 05:38:14 +08:00
|
|
|
/*
|
|
|
|
* No need to set other fields in prof_info; they will never be
|
|
|
|
* accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U.
|
|
|
|
*/
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2020-02-07 05:45:04 +08:00
|
|
|
arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
|
|
|
|
emap_alloc_ctx_t *alloc_ctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
/* Static check. */
|
|
|
|
if (alloc_ctx == NULL) {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
|
|
|
|
&arena_emap_global, ptr);
|
2019-12-10 06:36:45 +08:00
|
|
|
if (unlikely(!edata_slab_get(edata))) {
|
|
|
|
large_prof_tctx_reset(edata);
|
2017-04-12 09:13:10 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (unlikely(!alloc_ctx->slab)) {
|
2020-02-07 05:45:04 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
|
2020-03-15 01:49:34 +08:00
|
|
|
&arena_emap_global, ptr);
|
2020-02-06 06:50:34 +08:00
|
|
|
large_prof_tctx_reset(edata);
|
2017-04-12 09:13:10 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2019-12-14 08:48:03 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
2017-03-21 02:00:07 +08:00
|
|
|
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
|
|
|
|
ptr);
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(!edata_slab_get(edata));
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
large_prof_tctx_reset(edata);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2018-07-06 01:56:33 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2020-08-22 02:31:53 +08:00
|
|
|
arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx,
|
|
|
|
size_t size) {
|
2018-07-06 01:56:33 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(!edata_slab_get(edata));
|
2020-08-22 02:31:53 +08:00
|
|
|
large_prof_info_set(edata, tctx, size);
|
2018-07-06 01:56:33 +08:00
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
|
2017-01-11 10:06:31 +08:00
|
|
|
tsd_t *tsd;
|
|
|
|
ticker_t *decay_ticker;
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(tsdn_null(tsdn))) {
|
2017-01-11 10:06:31 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
tsd = tsdn_tsd(tsdn);
|
|
|
|
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(decay_ticker == NULL)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
|
2017-03-18 03:42:33 +08:00
|
|
|
arena_decay(tsdn, arena, false, false);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
2017-01-11 10:06:31 +08:00
|
|
|
arena_decay_ticks(tsdn, arena, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
|
|
|
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
2017-01-16 08:56:30 +08:00
|
|
|
tcache_t *tcache, bool slow_path) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
|
|
|
|
if (likely(tcache != NULL)) {
|
2018-07-12 07:05:58 +08:00
|
|
|
if (likely(size <= SC_SMALL_MAXCLASS)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
|
|
|
tcache, size, ind, zero, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
if (likely(size <= tcache_maxclass)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
|
|
|
tcache, size, ind, zero, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
/* (size > tcache_maxclass) case falls through. */
|
|
|
|
assert(size > tcache_maxclass);
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return arena_malloc_hard(tsdn, arena, size, ind, zero);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
|
2020-02-06 06:50:34 +08:00
|
|
|
unsigned arena_ind = edata_arena_ind_get(edata);
|
|
|
|
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2017-03-17 16:25:12 +08:00
|
|
|
arena_salloc(tsdn_t *tsdn, const void *ptr) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(ptr != NULL);
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind != SC_NSIZES);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
return sz_index2size(alloc_ctx.szind);
|
2017-03-17 08:57:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
|
|
|
arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|
|
|
/*
|
|
|
|
* Return 0 if ptr is not within an extent managed by jemalloc. This
|
|
|
|
* function has two extra costs relative to isalloc():
|
|
|
|
* - The rtree calls cannot claim to be dependent lookups, which induces
|
|
|
|
* rtree lookup load dependencies.
|
|
|
|
* - The lookup may fail, so there is an extra branch to check for
|
|
|
|
* failure.
|
|
|
|
*/
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
emap_full_alloc_ctx_t full_alloc_ctx;
|
2020-03-15 01:49:34 +08:00
|
|
|
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
|
|
|
|
ptr, &full_alloc_ctx);
|
2020-02-07 05:16:07 +08:00
|
|
|
if (missing) {
|
2017-03-17 08:57:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
if (full_alloc_ctx.edata == NULL) {
|
2017-03-17 08:57:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active);
|
2017-03-17 08:57:52 +08:00
|
|
|
/* Only slab members should be looked up via interior pointers. */
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(edata_addr_get(full_alloc_ctx.edata) == ptr
|
|
|
|
|| edata_slab_get(full_alloc_ctx.edata));
|
2017-03-17 08:57:52 +08:00
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(full_alloc_ctx.szind != SC_NSIZES);
|
2017-03-17 08:57:52 +08:00
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
return sz_index2size(full_alloc_ctx.szind);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2020-10-15 07:45:19 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
large_dalloc_safety_checks(edata_t *edata, szind_t szind) {
|
|
|
|
if (!config_opt_safety_checks) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Eagerly detect double free and sized dealloc bugs for large sizes.
|
|
|
|
* The cost is low enough (as edata will be accessed anyway) to be
|
|
|
|
* enabled all the time.
|
|
|
|
*/
|
|
|
|
if (unlikely(edata_state_get(edata) != extent_state_active)) {
|
|
|
|
safety_check_fail("Invalid deallocation detected: "
|
|
|
|
"pages being freed (%p) not currently active, "
|
|
|
|
"possibly caused by double free bugs.",
|
|
|
|
(uintptr_t)edata_addr_get(edata));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (unlikely(sz_index2size(szind) != edata_usize_get(edata))) {
|
|
|
|
safety_check_fail_sized_dealloc(/* current_dealloc */ true);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-25 07:12:06 +08:00
|
|
|
static inline void
|
|
|
|
arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
|
|
|
|
if (config_prof && unlikely(szind < SC_NBINS)) {
|
|
|
|
arena_dalloc_promoted(tsdn, ptr, NULL, true);
|
|
|
|
} else {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
|
|
|
ptr);
|
2020-10-15 07:45:19 +08:00
|
|
|
if (large_dalloc_safety_checks(edata, szind)) {
|
|
|
|
/* See the comment in isfree. */
|
|
|
|
return;
|
|
|
|
}
|
2019-12-10 06:36:45 +08:00
|
|
|
large_dalloc(tsdn, edata);
|
2019-07-25 07:12:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
|
2017-03-23 02:00:40 +08:00
|
|
|
|
|
|
|
if (config_debug) {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
|
|
|
ptr);
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
|
|
|
assert(alloc_ctx.szind < SC_NSIZES);
|
|
|
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
if (likely(alloc_ctx.slab)) {
|
2017-03-23 02:00:40 +08:00
|
|
|
/* Small allocation. */
|
|
|
|
arena_dalloc_small(tsdn, ptr);
|
|
|
|
} else {
|
2020-02-07 05:16:07 +08:00
|
|
|
arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 07:27:30 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
|
|
|
|
bool slow_path) {
|
|
|
|
if (szind < nhbins) {
|
|
|
|
if (config_prof && unlikely(szind < SC_NBINS)) {
|
|
|
|
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
|
|
|
|
} else {
|
|
|
|
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
|
|
|
|
slow_path);
|
|
|
|
}
|
|
|
|
} else {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
|
|
|
ptr);
|
2020-10-15 07:45:19 +08:00
|
|
|
if (large_dalloc_safety_checks(edata, szind)) {
|
|
|
|
/* See the comment in isfree. */
|
|
|
|
return;
|
|
|
|
}
|
2019-12-10 06:36:45 +08:00
|
|
|
large_dalloc(tsdn, edata);
|
2019-07-25 07:27:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-04-08 05:12:30 +08:00
|
|
|
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-03-23 02:00:40 +08:00
|
|
|
if (unlikely(tcache == NULL)) {
|
|
|
|
arena_dalloc_no_tcache(tsdn, ptr);
|
|
|
|
return;
|
|
|
|
}
|
2017-03-17 17:45:12 +08:00
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2020-02-07 05:16:07 +08:00
|
|
|
if (caller_alloc_ctx != NULL) {
|
|
|
|
alloc_ctx = *caller_alloc_ctx;
|
2017-04-08 05:12:30 +08:00
|
|
|
} else {
|
2020-02-07 05:16:07 +08:00
|
|
|
util_assume(!tsdn_null(tsdn));
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
|
|
|
|
&alloc_ctx);
|
2017-04-08 05:12:30 +08:00
|
|
|
}
|
2017-03-17 17:45:12 +08:00
|
|
|
|
|
|
|
if (config_debug) {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
|
|
|
ptr);
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
|
|
|
assert(alloc_ctx.szind < SC_NSIZES);
|
|
|
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
2017-03-17 17:45:12 +08:00
|
|
|
}
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
if (likely(alloc_ctx.slab)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Small allocation. */
|
2020-02-07 05:16:07 +08:00
|
|
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
|
|
|
alloc_ctx.szind, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2020-02-07 05:16:07 +08:00
|
|
|
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
|
|
|
|
slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
2017-01-11 10:06:31 +08:00
|
|
|
assert(ptr != NULL);
|
2018-07-12 07:05:58 +08:00
|
|
|
assert(size <= SC_LARGE_MAXCLASS);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2017-03-17 17:45:12 +08:00
|
|
|
if (!config_prof || !opt_prof) {
|
|
|
|
/*
|
|
|
|
* There is no risk of being confused by a promoted sampled
|
|
|
|
* object, so base szind and slab on the given size.
|
|
|
|
*/
|
2020-02-07 05:16:07 +08:00
|
|
|
alloc_ctx.szind = sz_size2index(size);
|
|
|
|
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
2017-03-17 17:45:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((config_prof && opt_prof) || config_debug) {
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
|
|
|
|
&alloc_ctx);
|
2017-03-17 17:45:12 +08:00
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind == sz_size2index(size));
|
|
|
|
assert((config_prof && opt_prof)
|
|
|
|
|| alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
|
2017-03-17 17:45:12 +08:00
|
|
|
|
|
|
|
if (config_debug) {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn,
|
|
|
|
&arena_emap_global, ptr);
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
|
|
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
2017-03-17 17:45:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
if (likely(alloc_ctx.slab)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Small allocation. */
|
2017-03-23 02:00:40 +08:00
|
|
|
arena_dalloc_small(tsdn, ptr);
|
|
|
|
} else {
|
2020-02-07 05:16:07 +08:00
|
|
|
arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
|
2017-03-23 02:00:40 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
|
|
|
assert(ptr != NULL);
|
2018-07-12 07:05:58 +08:00
|
|
|
assert(size <= SC_LARGE_MAXCLASS);
|
2017-03-23 02:00:40 +08:00
|
|
|
|
|
|
|
if (unlikely(tcache == NULL)) {
|
|
|
|
arena_sdalloc_no_tcache(tsdn, ptr, size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2017-04-12 05:56:43 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2020-02-07 05:16:07 +08:00
|
|
|
if (caller_alloc_ctx == NULL) {
|
2017-04-12 05:56:43 +08:00
|
|
|
/* Uncommon case and should be a static check. */
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
|
2020-02-07 05:16:07 +08:00
|
|
|
&alloc_ctx);
|
|
|
|
assert(alloc_ctx.szind == sz_size2index(size));
|
|
|
|
} else {
|
|
|
|
alloc_ctx = *caller_alloc_ctx;
|
2017-04-12 05:56:43 +08:00
|
|
|
}
|
|
|
|
} else {
|
2017-03-23 02:00:40 +08:00
|
|
|
/*
|
|
|
|
* There is no risk of being confused by a promoted sampled
|
|
|
|
* object, so base szind and slab on the given size.
|
|
|
|
*/
|
2020-02-07 05:16:07 +08:00
|
|
|
alloc_ctx.szind = sz_size2index(size);
|
|
|
|
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
|
2017-04-12 05:56:43 +08:00
|
|
|
if (config_debug) {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
|
|
|
ptr);
|
2020-02-07 05:16:07 +08:00
|
|
|
assert(alloc_ctx.szind == edata_szind_get(edata));
|
|
|
|
assert(alloc_ctx.slab == edata_slab_get(edata));
|
2017-03-23 02:00:40 +08:00
|
|
|
}
|
|
|
|
|
2020-02-07 05:16:07 +08:00
|
|
|
if (likely(alloc_ctx.slab)) {
|
2017-03-23 02:00:40 +08:00
|
|
|
/* Small allocation. */
|
2020-02-07 05:16:07 +08:00
|
|
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
|
|
|
alloc_ctx.szind, slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2020-02-07 05:16:07 +08:00
|
|
|
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
|
|
|
|
slow_path);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 01:11:02 +08:00
|
|
|
static inline void
|
|
|
|
arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
|
|
|
size_t alignment) {
|
|
|
|
assert(edata_base_get(edata) == edata_addr_get(edata));
|
|
|
|
|
|
|
|
if (alignment < PAGE) {
|
|
|
|
unsigned lg_range = LG_PAGE -
|
|
|
|
lg_floor(CACHELINE_CEILING(alignment));
|
|
|
|
size_t r;
|
|
|
|
if (!tsdn_null(tsdn)) {
|
|
|
|
tsd_t *tsd = tsdn_tsd(tsdn);
|
|
|
|
r = (size_t)prng_lg_range_u64(
|
|
|
|
tsd_prng_statep_get(tsd), lg_range);
|
|
|
|
} else {
|
|
|
|
uint64_t stack_value = (uint64_t)(uintptr_t)&r;
|
|
|
|
r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
|
|
|
|
}
|
|
|
|
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
|
|
|
lg_range);
|
|
|
|
edata->e_addr = (void *)((uintptr_t)edata->e_addr +
|
|
|
|
random_offset);
|
|
|
|
assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
|
|
|
|
edata->e_addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-30 08:06:28 +08:00
|
|
|
/*
|
|
|
|
* The dalloc bin info contains just the information that the common paths need
|
|
|
|
* during tcache flushes. By force-inlining these paths, and using local copies
|
|
|
|
* of data (so that the compiler knows it's constant), we avoid a whole bunch of
|
|
|
|
* redundant loads and stores by leaving this information in registers.
|
|
|
|
*/
|
|
|
|
typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t;
|
|
|
|
struct arena_dalloc_bin_locked_info_s {
|
|
|
|
div_info_t div_info;
|
|
|
|
uint32_t nregs;
|
|
|
|
uint64_t ndalloc;
|
|
|
|
};
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
|
|
|
arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind,
|
|
|
|
edata_t *slab, const void *ptr) {
|
|
|
|
size_t diff, regind;
|
|
|
|
|
|
|
|
/* Freeing a pointer outside the slab can cause assertion failure. */
|
|
|
|
assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
|
|
|
|
assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
|
|
|
|
/* Freeing an interior pointer can cause assertion failure. */
|
|
|
|
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
|
|
|
|
(uintptr_t)bin_infos[binind].reg_size == 0);
|
|
|
|
|
|
|
|
diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
|
|
|
|
|
|
|
|
/* Avoid doing division with a variable divisor. */
|
|
|
|
regind = div_compute(&info->div_info, diff);
|
|
|
|
|
|
|
|
assert(regind < bin_infos[binind].nregs);
|
|
|
|
|
|
|
|
return regind;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info,
|
|
|
|
szind_t binind) {
|
|
|
|
info->div_info = arena_binind_div_info[binind];
|
|
|
|
info->nregs = bin_infos[binind].nregs;
|
|
|
|
info->ndalloc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Does the deallocation work associated with freeing a single pointer (a
|
|
|
|
* "step") in between a arena_dalloc_bin_locked begin and end call.
|
|
|
|
*
|
|
|
|
* Returns true if arena_slab_dalloc must be called on slab. Doesn't do
|
|
|
|
* stats updates, which happen during finish (this lets running counts get left
|
|
|
|
* in a register).
|
|
|
|
*/
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|
|
|
arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab,
|
|
|
|
void *ptr) {
|
|
|
|
const bin_info_t *bin_info = &bin_infos[binind];
|
|
|
|
size_t regind = arena_slab_regind(info, binind, slab, ptr);
|
|
|
|
slab_data_t *slab_data = edata_slab_data_get(slab);
|
|
|
|
|
|
|
|
assert(edata_nfree_get(slab) < bin_info->nregs);
|
|
|
|
/* Freeing an unallocated pointer can cause assertion failure. */
|
|
|
|
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
|
|
|
|
|
|
|
|
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
|
|
|
|
edata_nfree_inc(slab);
|
|
|
|
|
|
|
|
if (config_stats) {
|
|
|
|
info->ndalloc++;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned nfree = edata_nfree_get(slab);
|
|
|
|
if (nfree == bin_info->nregs) {
|
|
|
|
arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab,
|
|
|
|
bin);
|
|
|
|
return true;
|
|
|
|
} else if (nfree == 1 && slab != bin->slabcur) {
|
|
|
|
arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab,
|
|
|
|
bin);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|
|
|
arena_dalloc_bin_locked_info_t *info) {
|
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.ndalloc += info->ndalloc;
|
|
|
|
assert(bin->stats.curregs >= (size_t)info->ndalloc);
|
|
|
|
bin->stats.curregs -= (size_t)info->ndalloc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
|