2017-04-11 09:17:55 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_INLINES_A_H
|
|
|
|
#define JEMALLOC_INTERNAL_INLINES_A_H
|
|
|
|
|
2017-04-11 10:04:40 +08:00
|
|
|
#include "jemalloc/internal/atomic.h"
|
2017-04-12 03:57:18 +08:00
|
|
|
#include "jemalloc/internal/bit_util.h"
|
2017-04-18 06:52:44 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
2017-04-20 06:09:01 +08:00
|
|
|
#include "jemalloc/internal/size_classes.h"
|
2017-04-20 04:39:33 +08:00
|
|
|
#include "jemalloc/internal/ticker.h"
|
2017-04-11 10:04:40 +08:00
|
|
|
|
2017-04-11 09:17:55 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
|
|
|
|
malloc_getcpu(void) {
|
|
|
|
assert(have_percpu_arena);
|
|
|
|
#if defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
|
|
|
return (malloc_cpuid_t)sched_getcpu();
|
|
|
|
#else
|
|
|
|
not_reached();
|
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the chosen arena index based on current cpu. */
|
|
|
|
JEMALLOC_ALWAYS_INLINE unsigned
|
|
|
|
percpu_arena_choose(void) {
|
2017-06-01 07:45:14 +08:00
|
|
|
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
|
2017-04-11 09:17:55 +08:00
|
|
|
|
|
|
|
malloc_cpuid_t cpuid = malloc_getcpu();
|
|
|
|
assert(cpuid >= 0);
|
2017-06-01 07:45:14 +08:00
|
|
|
|
|
|
|
unsigned arena_ind;
|
|
|
|
if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
|
|
|
|
2)) {
|
2017-04-11 09:17:55 +08:00
|
|
|
arena_ind = cpuid;
|
|
|
|
} else {
|
2017-06-01 07:45:14 +08:00
|
|
|
assert(opt_percpu_arena == per_phycpu_arena);
|
2017-04-11 09:17:55 +08:00
|
|
|
/* Hyper threads on the same physical CPU share arena. */
|
|
|
|
arena_ind = cpuid - ncpus / 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return arena_ind;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
|
|
|
|
JEMALLOC_ALWAYS_INLINE unsigned
|
2017-06-01 07:45:14 +08:00
|
|
|
percpu_arena_ind_limit(percpu_arena_mode_t mode) {
|
|
|
|
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
|
|
|
|
if (mode == per_phycpu_arena && ncpus > 1) {
|
2017-04-11 09:17:55 +08:00
|
|
|
if (ncpus % 2) {
|
|
|
|
/* This likely means a misconfig. */
|
|
|
|
return ncpus / 2 + 1;
|
|
|
|
}
|
|
|
|
return ncpus / 2;
|
|
|
|
} else {
|
|
|
|
return ncpus;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline arena_tdata_t *
|
2017-04-11 09:17:55 +08:00
|
|
|
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
|
|
|
|
arena_tdata_t *tdata;
|
|
|
|
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
|
|
|
|
|
|
|
|
if (unlikely(arenas_tdata == NULL)) {
|
|
|
|
/* arenas_tdata hasn't been initialized yet. */
|
|
|
|
return arena_tdata_get_hard(tsd, ind);
|
|
|
|
}
|
|
|
|
if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
|
|
|
|
/*
|
|
|
|
* ind is invalid, cache is old (too small), or tdata to be
|
|
|
|
* initialized.
|
|
|
|
*/
|
|
|
|
return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
tdata = &arenas_tdata[ind];
|
|
|
|
if (likely(tdata != NULL) || !refresh_if_missing) {
|
|
|
|
return tdata;
|
|
|
|
}
|
|
|
|
return arena_tdata_get_hard(tsd, ind);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline arena_t *
|
2017-04-11 09:17:55 +08:00
|
|
|
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
|
|
|
|
arena_t *ret;
|
|
|
|
|
2017-05-14 06:20:48 +08:00
|
|
|
assert(ind < MALLOCX_ARENA_LIMIT);
|
2017-04-11 09:17:55 +08:00
|
|
|
|
|
|
|
ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
|
|
|
|
if (unlikely(ret == NULL)) {
|
|
|
|
if (init_if_missing) {
|
|
|
|
ret = arena_init(tsdn, ind,
|
|
|
|
(extent_hooks_t *)&extent_hooks_default);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline ticker_t *
|
2017-04-11 09:17:55 +08:00
|
|
|
decay_ticker_get(tsd_t *tsd, unsigned ind) {
|
|
|
|
arena_tdata_t *tdata;
|
|
|
|
|
|
|
|
tdata = arena_tdata_get(tsd, ind, true);
|
|
|
|
if (unlikely(tdata == NULL)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return &tdata->decay_ticker;
|
|
|
|
}
|
|
|
|
|
2017-08-11 05:27:58 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE cache_bin_t *
|
2017-04-11 09:17:55 +08:00
|
|
|
tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
|
|
|
|
assert(binind < NBINS);
|
2017-08-11 05:27:58 +08:00
|
|
|
return &tcache->bins_small[binind];
|
2017-04-11 09:17:55 +08:00
|
|
|
}
|
|
|
|
|
2017-08-11 05:27:58 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE cache_bin_t *
|
2017-04-11 09:17:55 +08:00
|
|
|
tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
|
|
|
|
assert(binind >= NBINS &&binind < nhbins);
|
2017-08-11 05:27:58 +08:00
|
|
|
return &tcache->bins_large[binind - NBINS];
|
2017-04-11 09:17:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
tcache_available(tsd_t *tsd) {
|
|
|
|
/*
|
|
|
|
* Thread specific auto tcache might be unavailable if: 1) during tcache
|
|
|
|
* initialization, or 2) disabled through thread.tcache.enabled mallctl
|
|
|
|
* or config options. This check covers all cases.
|
|
|
|
*/
|
2017-04-21 08:21:37 +08:00
|
|
|
if (likely(tsd_tcache_enabled_get(tsd))) {
|
|
|
|
/* Associated arena == NULL implies tcache init in progress. */
|
|
|
|
assert(tsd_tcachep_get(tsd)->arena == NULL ||
|
|
|
|
tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
|
|
|
|
NULL);
|
2017-04-11 09:17:55 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE tcache_t *
|
|
|
|
tcache_get(tsd_t *tsd) {
|
|
|
|
if (!tcache_available(tsd)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tsd_tcachep_get(tsd);
|
|
|
|
}
|
2017-04-13 07:16:27 +08:00
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-06-23 07:18:30 +08:00
|
|
|
pre_reentrancy(tsd_t *tsd, arena_t *arena) {
|
|
|
|
/* arena is the current context. Reentry from a0 is not allowed. */
|
|
|
|
assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
|
|
|
|
|
2017-04-13 07:16:27 +08:00
|
|
|
bool fast = tsd_fast(tsd);
|
2017-08-31 07:17:04 +08:00
|
|
|
assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
|
2017-04-13 07:16:27 +08:00
|
|
|
++*tsd_reentrancy_levelp_get(tsd);
|
|
|
|
if (fast) {
|
|
|
|
/* Prepare slow path for reentrancy. */
|
|
|
|
tsd_slow_update(tsd);
|
2018-03-09 08:34:17 +08:00
|
|
|
assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
|
2017-04-13 07:16:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-04-13 07:16:27 +08:00
|
|
|
post_reentrancy(tsd_t *tsd) {
|
|
|
|
int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
|
|
|
|
assert(*reentrancy_level > 0);
|
|
|
|
if (--*reentrancy_level == 0) {
|
|
|
|
tsd_slow_update(tsd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-11 09:17:55 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
|