Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association dynamically based CPU id. Three modes are supported: "percpu", "phycpu" and disabled. "percpu" uses the current core id (with help from sched_getcpu()) directly as the arena index, while "phycpu" will assign threads on the same physical CPU to the same arena. In other words, "percpu" means # of arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of CPUs). Note that no runtime check on whether hyper threading is enabled is added yet. When enabled, threads will be migrated between arenas when a CPU change is detected. In the current design, to reduce overhead from reading CPU id, each arena tracks the thread accessed most recently. When a new thread comes in, we will read CPU id and update arena if necessary.
This commit is contained in:
@@ -160,6 +160,7 @@ TEST_BEGIN(test_mallctl_opt) {
|
||||
TEST_MALLCTL_OPT(bool, abort, always);
|
||||
TEST_MALLCTL_OPT(const char *, dss, always);
|
||||
TEST_MALLCTL_OPT(unsigned, narenas, always);
|
||||
TEST_MALLCTL_OPT(const char *, percpu_arena, always);
|
||||
TEST_MALLCTL_OPT(ssize_t, decay_time, always);
|
||||
TEST_MALLCTL_OPT(bool, stats_print, always);
|
||||
TEST_MALLCTL_OPT(const char *, junk, fill);
|
||||
@@ -327,20 +328,38 @@ TEST_BEGIN(test_tcache) {
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_thread_arena) {
|
||||
unsigned arena_old, arena_new, narenas;
|
||||
size_t sz = sizeof(unsigned);
|
||||
unsigned old_arena_ind, new_arena_ind, narenas;
|
||||
const char *opt_percpu_arena;
|
||||
|
||||
size_t sz = sizeof(opt_percpu_arena);
|
||||
assert_d_eq(mallctl("opt.percpu_arena", &opt_percpu_arena, &sz, NULL,
|
||||
0), 0, "Unexpected mallctl() failure");
|
||||
|
||||
sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
|
||||
arena_new = narenas - 1;
|
||||
assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz,
|
||||
(void *)&arena_new, sizeof(unsigned)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
arena_new = 0;
|
||||
assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz,
|
||||
(void *)&arena_new, sizeof(unsigned)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
if (strcmp(opt_percpu_arena, "disabled") == 0) {
|
||||
new_arena_ind = narenas - 1;
|
||||
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
||||
(void *)&new_arena_ind, sizeof(unsigned)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
new_arena_ind = 0;
|
||||
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
||||
(void *)&new_arena_ind, sizeof(unsigned)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
} else {
|
||||
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctl() failure");
|
||||
new_arena_ind = percpu_arena_ind_limit() - 1;
|
||||
if (old_arena_ind != new_arena_ind) {
|
||||
assert_d_eq(mallctl("thread.arena",
|
||||
(void *)&old_arena_ind, &sz, (void *)&new_arena_ind,
|
||||
sizeof(unsigned)), EPERM, "thread.arena ctl "
|
||||
"should not be allowed with percpu arena");
|
||||
}
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
|
Reference in New Issue
Block a user