2021-09-28 04:43:24 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H
|
|
|
|
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_H
|
2017-04-11 08:11:33 +08:00
|
|
|
|
2017-11-05 03:50:19 +08:00
|
|
|
#include "jemalloc/internal/arena_stats.h"
|
2017-04-11 10:04:40 +08:00
|
|
|
#include "jemalloc/internal/atomic.h"
|
2017-10-02 08:22:06 +08:00
|
|
|
#include "jemalloc/internal/bin.h"
|
2017-04-21 05:32:24 +08:00
|
|
|
#include "jemalloc/internal/bitmap.h"
|
2020-01-24 05:18:04 +08:00
|
|
|
#include "jemalloc/internal/counter.h"
|
2019-12-13 08:25:24 +08:00
|
|
|
#include "jemalloc/internal/ecache.h"
|
2019-12-12 03:17:19 +08:00
|
|
|
#include "jemalloc/internal/edata_cache.h"
|
2017-05-24 05:36:09 +08:00
|
|
|
#include "jemalloc/internal/extent_dss.h"
|
2017-04-20 06:09:01 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-04-18 07:17:02 +08:00
|
|
|
#include "jemalloc/internal/nstime.h"
|
2020-03-09 01:35:56 +08:00
|
|
|
#include "jemalloc/internal/pa.h"
|
2017-04-11 08:11:33 +08:00
|
|
|
#include "jemalloc/internal/ql.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/sc.h"
|
2017-04-20 04:39:33 +08:00
|
|
|
#include "jemalloc/internal/ticker.h"
|
2017-04-11 08:11:33 +08:00
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
struct arena_s {
|
|
|
|
/*
|
2017-01-30 13:57:14 +08:00
|
|
|
* Number of threads currently assigned to this arena. Each thread has
|
|
|
|
* two distinct assignments, one for application-serving allocation, and
|
|
|
|
* the other for internal metadata allocation. Internal metadata must
|
|
|
|
* not be allocated from arenas explicitly created via the arenas.create
|
|
|
|
* mallctl, because the arena.<i>.reset mallctl indiscriminately
|
|
|
|
* discards all allocations for the affected arena.
|
2017-01-11 10:06:31 +08:00
|
|
|
*
|
|
|
|
* 0: Application allocation.
|
|
|
|
* 1: Internal metadata allocation.
|
2017-01-30 13:57:14 +08:00
|
|
|
*
|
|
|
|
* Synchronization: atomic.
|
2017-01-11 10:06:31 +08:00
|
|
|
*/
|
2017-04-05 08:22:24 +08:00
|
|
|
atomic_u_t nthreads[2];
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2018-11-13 07:56:04 +08:00
|
|
|
/* Next bin shard for binding new threads. Synchronization: atomic. */
|
|
|
|
atomic_u_t binshard_next;
|
|
|
|
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
/*
|
|
|
|
* When percpu_arena is enabled, to amortize the cost of reading /
|
|
|
|
* updating the current CPU id, track the most recent thread accessing
|
|
|
|
* this arena, and only read CPU if there is a mismatch.
|
|
|
|
*/
|
|
|
|
tsdn_t *last_thd;
|
|
|
|
|
2017-02-13 09:43:33 +08:00
|
|
|
/* Synchronization: internal. */
|
2017-01-11 10:06:31 +08:00
|
|
|
arena_stats_t stats;
|
2017-02-13 09:43:33 +08:00
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
/*
|
2017-08-12 08:34:21 +08:00
|
|
|
* Lists of tcaches and cache_bin_array_descriptors for extant threads
|
|
|
|
* associated with this arena. Stats from these are merged
|
|
|
|
* incrementally, and at exit if opt_stats_print is enabled.
|
2017-01-30 13:57:14 +08:00
|
|
|
*
|
2017-02-13 10:50:53 +08:00
|
|
|
* Synchronization: tcache_ql_mtx.
|
2017-01-11 10:06:31 +08:00
|
|
|
*/
|
2020-04-08 08:48:35 +08:00
|
|
|
ql_head(tcache_slow_t) tcache_ql;
|
2017-08-12 08:34:21 +08:00
|
|
|
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
|
|
|
|
malloc_mutex_t tcache_ql_mtx;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-04-05 08:22:24 +08:00
|
|
|
/*
|
|
|
|
* Represents a dss_prec_t, but atomically.
|
|
|
|
*
|
|
|
|
* Synchronization: atomic.
|
|
|
|
*/
|
|
|
|
atomic_u_t dss_prec;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Extant large allocations.
|
|
|
|
*
|
|
|
|
* Synchronization: large_mtx.
|
|
|
|
*/
|
2020-06-12 06:15:51 +08:00
|
|
|
edata_list_active_t large;
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Synchronizes all large allocation/update/deallocation. */
|
|
|
|
malloc_mutex_t large_mtx;
|
|
|
|
|
2020-03-09 01:35:56 +08:00
|
|
|
/* The page-level allocator shard this arena uses. */
|
|
|
|
pa_shard_t pa_shard;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2021-01-30 13:22:57 +08:00
|
|
|
/*
|
|
|
|
* A cached copy of base->ind. This can get accessed on hot paths;
|
|
|
|
* looking it up in base requires an extra pointer hop / cache miss.
|
|
|
|
*/
|
|
|
|
unsigned ind;
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Base allocator, from which arena metadata are allocated.
|
|
|
|
*
|
|
|
|
* Synchronization: internal.
|
|
|
|
*/
|
2017-01-11 10:06:31 +08:00
|
|
|
base_t *base;
|
2017-05-17 04:56:00 +08:00
|
|
|
/* Used to determine uptime. Read-only after initialization. */
|
|
|
|
nstime_t create_time;
|
2021-01-31 07:35:33 +08:00
|
|
|
|
2022-09-02 07:42:56 +08:00
|
|
|
/* The name of the arena. */
|
|
|
|
char name[ARENA_NAME_LEN];
|
|
|
|
|
2021-01-31 07:35:33 +08:00
|
|
|
/*
|
|
|
|
* The arena is allocated alongside its bins; really this is a
|
|
|
|
* dynamically sized array determined by the binshard settings.
|
|
|
|
*/
|
|
|
|
bin_t bins[0];
|
2017-01-11 10:06:31 +08:00
|
|
|
};
|
|
|
|
|
2021-09-28 04:43:24 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */
|