2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
|
|
|
#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline unsigned
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_ind_get(const arena_t *arena) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return base_ind_get(arena->base);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_internal_add(arena_t *arena, size_t size) {
|
2017-03-14 07:18:40 +08:00
|
|
|
atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_internal_sub(arena_t *arena, size_t size) {
|
2017-03-14 07:18:40 +08:00
|
|
|
atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline size_t
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_internal_get(arena_t *arena) {
|
2017-03-14 07:18:40 +08:00
|
|
|
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
|
|
|
|
assert(have_percpu_arena);
|
|
|
|
arena_t *oldarena = tsd_arena_get(tsd);
|
|
|
|
assert(oldarena != NULL);
|
|
|
|
unsigned oldind = arena_ind_get(oldarena);
|
|
|
|
|
|
|
|
if (oldind != cpu) {
|
|
|
|
unsigned newind = cpu;
|
|
|
|
arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
|
|
|
|
assert(newarena != NULL);
|
|
|
|
|
|
|
|
/* Set new arena/tcache associations. */
|
|
|
|
arena_migrate(tsd, oldind, newind);
|
2017-03-28 12:50:38 +08:00
|
|
|
tcache_t *tcache = tcache_get(tsd);
|
2017-04-21 08:21:37 +08:00
|
|
|
if (tcache != NULL) {
|
2017-03-28 12:50:38 +08:00
|
|
|
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
|
|
|
|
newarena);
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
|