2017-04-11 09:17:55 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_INLINES_A_H
|
|
|
|
#define JEMALLOC_INTERNAL_INLINES_A_H
|
|
|
|
|
2017-04-11 10:04:40 +08:00
|
|
|
#include "jemalloc/internal/atomic.h"
|
2017-04-12 03:57:18 +08:00
|
|
|
#include "jemalloc/internal/bit_util.h"
|
2017-04-18 06:52:44 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/sc.h"
|
2017-04-20 04:39:33 +08:00
|
|
|
#include "jemalloc/internal/ticker.h"
|
2017-04-11 10:04:40 +08:00
|
|
|
|
2017-04-11 09:17:55 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
|
|
|
|
malloc_getcpu(void) {
|
|
|
|
assert(have_percpu_arena);
|
2018-10-31 19:03:42 +08:00
|
|
|
#if defined(_WIN32)
|
|
|
|
return GetCurrentProcessorNumber();
|
|
|
|
#elif defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
2017-04-11 09:17:55 +08:00
|
|
|
return (malloc_cpuid_t)sched_getcpu();
|
2022-12-15 09:23:41 +08:00
|
|
|
#elif defined(JEMALLOC_HAVE_RDTSCP)
|
2022-05-21 03:14:33 +08:00
|
|
|
unsigned int ax, cx, dx;
|
|
|
|
asm volatile("rdtscp" : "=a"(ax), "=d"(dx), "=c"(cx) ::);
|
|
|
|
return (malloc_cpuid_t)(dx & 0xfff);
|
|
|
|
#elif defined(__aarch64__) && defined(__APPLE__)
|
|
|
|
/* Other oses most likely use tpidr_el0 instead */
|
|
|
|
uintptr_t c;
|
|
|
|
asm volatile("mrs %x0, tpidrro_el0" : "=r"(c) :: "memory");
|
|
|
|
return (malloc_cpuid_t)(c & (1 << 3) - 1);
|
2017-04-11 09:17:55 +08:00
|
|
|
#else
|
|
|
|
not_reached();
|
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the chosen arena index based on current cpu. */
|
|
|
|
JEMALLOC_ALWAYS_INLINE unsigned
|
|
|
|
percpu_arena_choose(void) {
|
2017-06-01 07:45:14 +08:00
|
|
|
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
|
2017-04-11 09:17:55 +08:00
|
|
|
|
|
|
|
malloc_cpuid_t cpuid = malloc_getcpu();
|
|
|
|
assert(cpuid >= 0);
|
2017-06-01 07:45:14 +08:00
|
|
|
|
|
|
|
unsigned arena_ind;
|
|
|
|
if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
|
|
|
|
2)) {
|
2017-04-11 09:17:55 +08:00
|
|
|
arena_ind = cpuid;
|
|
|
|
} else {
|
2017-06-01 07:45:14 +08:00
|
|
|
assert(opt_percpu_arena == per_phycpu_arena);
|
2017-04-11 09:17:55 +08:00
|
|
|
/* Hyper threads on the same physical CPU share arena. */
|
|
|
|
arena_ind = cpuid - ncpus / 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return arena_ind;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
|
|
|
|
JEMALLOC_ALWAYS_INLINE unsigned
|
2017-06-01 07:45:14 +08:00
|
|
|
percpu_arena_ind_limit(percpu_arena_mode_t mode) {
|
|
|
|
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
|
|
|
|
if (mode == per_phycpu_arena && ncpus > 1) {
|
2017-04-11 09:17:55 +08:00
|
|
|
if (ncpus % 2) {
|
|
|
|
/* This likely means a misconfig. */
|
|
|
|
return ncpus / 2 + 1;
|
|
|
|
}
|
|
|
|
return ncpus / 2;
|
|
|
|
} else {
|
|
|
|
return ncpus;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline arena_t *
|
2017-04-11 09:17:55 +08:00
|
|
|
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
|
|
|
|
arena_t *ret;
|
|
|
|
|
2017-05-14 06:20:48 +08:00
|
|
|
assert(ind < MALLOCX_ARENA_LIMIT);
|
2017-04-11 09:17:55 +08:00
|
|
|
|
|
|
|
ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
|
|
|
|
if (unlikely(ret == NULL)) {
|
|
|
|
if (init_if_missing) {
|
2021-09-28 04:43:24 +08:00
|
|
|
ret = arena_init(tsdn, ind, &arena_config_default);
|
2017-04-11 09:17:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
tcache_available(tsd_t *tsd) {
|
|
|
|
/*
|
|
|
|
* Thread specific auto tcache might be unavailable if: 1) during tcache
|
|
|
|
* initialization, or 2) disabled through thread.tcache.enabled mallctl
|
|
|
|
* or config options. This check covers all cases.
|
|
|
|
*/
|
2017-04-21 08:21:37 +08:00
|
|
|
if (likely(tsd_tcache_enabled_get(tsd))) {
|
|
|
|
/* Associated arena == NULL implies tcache init in progress. */
|
2020-04-08 11:04:46 +08:00
|
|
|
if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) {
|
|
|
|
tcache_assert_initialized(tsd_tcachep_get(tsd));
|
|
|
|
}
|
2017-04-11 09:17:55 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE tcache_t *
|
|
|
|
tcache_get(tsd_t *tsd) {
|
|
|
|
if (!tcache_available(tsd)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tsd_tcachep_get(tsd);
|
|
|
|
}
|
2017-04-13 07:16:27 +08:00
|
|
|
|
2020-04-08 08:48:35 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE tcache_slow_t *
|
|
|
|
tcache_slow_get(tsd_t *tsd) {
|
|
|
|
if (!tcache_available(tsd)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tsd_tcache_slowp_get(tsd);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-06-23 07:18:30 +08:00
|
|
|
pre_reentrancy(tsd_t *tsd, arena_t *arena) {
|
|
|
|
/* arena is the current context. Reentry from a0 is not allowed. */
|
|
|
|
assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
|
2019-12-03 08:45:40 +08:00
|
|
|
tsd_pre_reentrancy_raw(tsd);
|
2017-04-13 07:16:27 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-04-13 07:16:27 +08:00
|
|
|
post_reentrancy(tsd_t *tsd) {
|
2019-12-03 08:45:40 +08:00
|
|
|
tsd_post_reentrancy_raw(tsd);
|
2017-04-13 07:16:27 +08:00
|
|
|
}
|
|
|
|
|
2017-04-11 09:17:55 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
|