2013-12-19 15:21:42 +08:00
|
|
|
#include "test/jemalloc_test.h"
|
|
|
|
|
2018-05-01 07:24:36 +08:00
|
|
|
#include "jemalloc/internal/hook.h"
|
2017-04-12 04:31:16 +08:00
|
|
|
#include "jemalloc/internal/util.h"
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctl_errors) {
|
2013-12-19 15:21:42 +08:00
|
|
|
uint64_t epoch;
|
|
|
|
size_t sz;
|
|
|
|
|
|
|
|
assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
|
|
|
|
"mallctl() should return ENOENT for non-existent names");
|
|
|
|
|
|
|
|
assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
|
|
|
|
EPERM, "mallctl() should return EPERM on attempt to write "
|
|
|
|
"read-only value");
|
|
|
|
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
|
|
|
sizeof(epoch)-1), EINVAL,
|
|
|
|
"mallctl() should return EINVAL for input size mismatch");
|
|
|
|
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
|
|
|
sizeof(epoch)+1), EINVAL,
|
|
|
|
"mallctl() should return EINVAL for input size mismatch");
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
sz = sizeof(epoch)-1;
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctl() should return EINVAL for output size mismatch");
|
|
|
|
sz = sizeof(epoch)+1;
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctl() should return EINVAL for output size mismatch");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctlnametomib_errors) {
|
2013-12-19 15:21:42 +08:00
|
|
|
size_t mib[1];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
|
|
|
assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
|
|
|
|
"mallctlnametomib() should return ENOENT for non-existent names");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctlbymib_errors) {
|
2013-12-19 15:21:42 +08:00
|
|
|
uint64_t epoch;
|
|
|
|
size_t sz;
|
|
|
|
size_t mib[1];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
|
|
|
assert_d_eq(mallctlnametomib("version", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
|
|
|
|
strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
|
|
|
|
"attempt to write read-only value");
|
|
|
|
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
|
|
|
assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
|
2013-12-19 15:21:42 +08:00
|
|
|
sizeof(epoch)-1), EINVAL,
|
|
|
|
"mallctlbymib() should return EINVAL for input size mismatch");
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
|
2013-12-19 15:21:42 +08:00
|
|
|
sizeof(epoch)+1), EINVAL,
|
|
|
|
"mallctlbymib() should return EINVAL for input size mismatch");
|
|
|
|
|
|
|
|
sz = sizeof(epoch)-1;
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
|
|
|
|
EINVAL,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctlbymib() should return EINVAL for output size mismatch");
|
|
|
|
sz = sizeof(epoch)+1;
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
|
|
|
|
EINVAL,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctlbymib() should return EINVAL for output size mismatch");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctl_read_write) {
|
2013-12-19 15:21:42 +08:00
|
|
|
uint64_t old_epoch, new_epoch;
|
|
|
|
size_t sz = sizeof(old_epoch);
|
|
|
|
|
|
|
|
/* Blind. */
|
|
|
|
assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
|
|
|
|
|
|
|
/* Read. */
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
|
|
|
|
|
|
|
/* Write. */
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
|
|
|
|
sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
|
2013-12-19 15:21:42 +08:00
|
|
|
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
|
|
|
|
|
|
|
/* Read+write. */
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
|
|
|
|
(void *)&new_epoch, sizeof(new_epoch)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
2013-12-19 15:21:42 +08:00
|
|
|
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctlnametomib_short_mib) {
|
2013-12-19 15:21:42 +08:00
|
|
|
size_t mib[4];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
miblen = 3;
|
|
|
|
mib[3] = 42;
|
|
|
|
assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
assert_zu_eq(miblen, 3, "Unexpected mib output length");
|
|
|
|
assert_zu_eq(mib[3], 42,
|
|
|
|
"mallctlnametomib() wrote past the end of the input mib");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctl_config) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_MALLCTL_CONFIG(config, t) do { \
|
2016-02-08 06:23:22 +08:00
|
|
|
t oldval; \
|
2013-12-19 15:21:42 +08:00
|
|
|
size_t sz = sizeof(oldval); \
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
|
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure"); \
|
2013-12-19 15:21:42 +08:00
|
|
|
assert_b_eq(oldval, config_##config, "Incorrect config value"); \
|
|
|
|
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
|
|
|
|
} while (0)
|
|
|
|
|
2016-02-08 06:23:22 +08:00
|
|
|
TEST_MALLCTL_CONFIG(cache_oblivious, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(debug, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(fill, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(lazy_lock, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(malloc_conf, const char *);
|
|
|
|
TEST_MALLCTL_CONFIG(prof, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(prof_libgcc, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(prof_libunwind, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(stats, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(utrace, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(xmalloc, bool);
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
#undef TEST_MALLCTL_CONFIG
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctl_opt) {
|
2013-12-19 15:21:42 +08:00
|
|
|
bool config_always = true;
|
|
|
|
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_MALLCTL_OPT(t, opt, config) do { \
|
2013-12-19 15:21:42 +08:00
|
|
|
t oldval; \
|
|
|
|
size_t sz = sizeof(oldval); \
|
|
|
|
int expected = config_##config ? 0 : ENOENT; \
|
2016-10-28 12:31:25 +08:00
|
|
|
int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \
|
|
|
|
0); \
|
2013-12-19 15:21:42 +08:00
|
|
|
assert_d_eq(result, expected, \
|
|
|
|
"Unexpected mallctl() result for opt."#opt); \
|
|
|
|
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
TEST_MALLCTL_OPT(bool, abort, always);
|
2017-08-12 07:06:51 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, abort_conf, always);
|
2017-08-25 05:29:28 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, metadata_thp, always);
|
2017-04-27 07:26:12 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, retain, always);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, dss, always);
|
2016-02-25 03:03:40 +08:00
|
|
|
TEST_MALLCTL_OPT(unsigned, narenas, always);
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, percpu_arena, always);
|
2019-01-25 08:15:04 +08:00
|
|
|
TEST_MALLCTL_OPT(size_t, oversize_threshold, always);
|
2017-05-16 08:44:13 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, background_thread, always);
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always);
|
|
|
|
TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, stats_print, always);
|
2014-12-09 05:12:41 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, junk, fill);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, zero, fill);
|
|
|
|
TEST_MALLCTL_OPT(bool, utrace, utrace);
|
|
|
|
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
|
2017-04-21 08:21:37 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, tcache, always);
|
2017-11-10 05:51:39 +08:00
|
|
|
TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always);
|
2017-04-21 08:21:37 +08:00
|
|
|
TEST_MALLCTL_OPT(size_t, lg_tcache_max, always);
|
2018-02-17 06:19:19 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, thp, always);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, prof, prof);
|
|
|
|
TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
|
|
|
|
TEST_MALLCTL_OPT(bool, prof_active, prof);
|
|
|
|
TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
|
|
|
|
TEST_MALLCTL_OPT(bool, prof_accum, prof);
|
|
|
|
TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
|
|
|
|
TEST_MALLCTL_OPT(bool, prof_gdump, prof);
|
|
|
|
TEST_MALLCTL_OPT(bool, prof_final, prof);
|
|
|
|
TEST_MALLCTL_OPT(bool, prof_leak, prof);
|
|
|
|
|
|
|
|
#undef TEST_MALLCTL_OPT
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_manpage_example) {
|
2013-12-19 15:21:42 +08:00
|
|
|
unsigned nbins, i;
|
|
|
|
size_t mib[4];
|
|
|
|
size_t len, miblen;
|
|
|
|
|
|
|
|
len = sizeof(nbins);
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
|
|
|
miblen = 4;
|
|
|
|
assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
for (i = 0; i < nbins; i++) {
|
|
|
|
size_t bin_size;
|
|
|
|
|
|
|
|
mib[2] = i;
|
|
|
|
len = sizeof(bin_size);
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
|
|
|
|
NULL, 0), 0, "Unexpected mallctlbymib() failure");
|
2013-12-19 15:21:42 +08:00
|
|
|
/* Do something with bin_size... */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_tcache_none) {
|
2017-06-01 12:34:26 +08:00
|
|
|
test_skip_if(!opt_tcache);
|
2015-01-30 07:30:47 +08:00
|
|
|
|
|
|
|
/* Allocate p and q. */
|
2017-06-01 12:34:26 +08:00
|
|
|
void *p0 = mallocx(42, 0);
|
2015-01-30 07:30:47 +08:00
|
|
|
assert_ptr_not_null(p0, "Unexpected mallocx() failure");
|
2017-06-01 12:34:26 +08:00
|
|
|
void *q = mallocx(42, 0);
|
2015-01-30 07:30:47 +08:00
|
|
|
assert_ptr_not_null(q, "Unexpected mallocx() failure");
|
|
|
|
|
|
|
|
/* Deallocate p and q, but bypass the tcache for q. */
|
|
|
|
dallocx(p0, 0);
|
|
|
|
dallocx(q, MALLOCX_TCACHE_NONE);
|
|
|
|
|
|
|
|
/* Make sure that tcache-based allocation returns p, not q. */
|
2017-06-01 12:34:26 +08:00
|
|
|
void *p1 = mallocx(42, 0);
|
2015-01-30 07:30:47 +08:00
|
|
|
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
|
|
|
|
assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region");
|
|
|
|
|
|
|
|
/* Clean up. */
|
|
|
|
dallocx(p1, MALLOCX_TCACHE_NONE);
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_tcache) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define NTCACHES 10
|
2015-01-30 07:30:47 +08:00
|
|
|
unsigned tis[NTCACHES];
|
|
|
|
void *ps[NTCACHES];
|
|
|
|
void *qs[NTCACHES];
|
|
|
|
unsigned i;
|
|
|
|
size_t sz, psz, qsz;
|
|
|
|
|
|
|
|
psz = 42;
|
|
|
|
qsz = nallocx(psz, 0) + 1;
|
|
|
|
|
|
|
|
/* Create tcaches. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
|
|
|
sz = sizeof(unsigned);
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
|
|
|
|
0), 0, "Unexpected mallctl() failure, i=%u", i);
|
2015-01-30 07:30:47 +08:00
|
|
|
}
|
|
|
|
|
2015-02-11 01:03:48 +08:00
|
|
|
/* Exercise tcache ID recycling. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
|
|
|
|
(void *)&tis[i], sizeof(unsigned)), 0,
|
|
|
|
"Unexpected mallctl() failure, i=%u", i);
|
2015-02-11 01:03:48 +08:00
|
|
|
}
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
|
|
|
sz = sizeof(unsigned);
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
|
|
|
|
0), 0, "Unexpected mallctl() failure, i=%u", i);
|
2015-02-11 01:03:48 +08:00
|
|
|
}
|
|
|
|
|
2015-01-30 07:30:47 +08:00
|
|
|
/* Flush empty tcaches. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
|
2015-01-30 07:30:47 +08:00
|
|
|
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
|
|
|
|
i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cache some allocations. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
|
|
|
ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
|
|
|
|
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
|
|
|
|
i);
|
|
|
|
dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
|
|
|
|
|
|
|
|
qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
|
|
|
|
assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
|
|
|
|
i);
|
|
|
|
dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify that tcaches allocate cached regions. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
|
|
|
void *p0 = ps[i];
|
|
|
|
ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
|
|
|
|
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
|
|
|
|
i);
|
|
|
|
assert_ptr_eq(ps[i], p0,
|
|
|
|
"Expected mallocx() to allocate cached region, i=%u", i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify that reallocation uses cached regions. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
|
|
|
void *q0 = qs[i];
|
|
|
|
qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
|
|
|
|
assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
|
|
|
|
i);
|
|
|
|
assert_ptr_eq(qs[i], q0,
|
|
|
|
"Expected rallocx() to allocate cached region, i=%u", i);
|
|
|
|
/* Avoid undefined behavior in case of test failure. */
|
2017-01-16 08:56:30 +08:00
|
|
|
if (qs[i] == NULL) {
|
2015-01-30 07:30:47 +08:00
|
|
|
qs[i] = ps[i];
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-01-30 07:30:47 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
2015-01-30 07:30:47 +08:00
|
|
|
dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-01-30 07:30:47 +08:00
|
|
|
|
|
|
|
/* Flush some non-empty tcaches. */
|
|
|
|
for (i = 0; i < NTCACHES/2; i++) {
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
|
2015-01-30 07:30:47 +08:00
|
|
|
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
|
|
|
|
i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Destroy tcaches. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
|
|
|
|
(void *)&tis[i], sizeof(unsigned)), 0,
|
|
|
|
"Unexpected mallctl() failure, i=%u", i);
|
2015-01-30 07:30:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_thread_arena) {
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
unsigned old_arena_ind, new_arena_ind, narenas;
|
2013-12-19 15:21:42 +08:00
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
const char *opa;
|
|
|
|
size_t sz = sizeof(opa);
|
2018-04-07 02:50:17 +08:00
|
|
|
assert_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0,
|
2017-06-01 07:45:14 +08:00
|
|
|
"Unexpected mallctl() failure");
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
|
|
|
|
sz = sizeof(unsigned);
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
|
|
|
0, "Unexpected mallctl() failure");
|
2019-01-25 08:15:04 +08:00
|
|
|
if (opt_oversize_threshold != 0) {
|
2018-05-22 04:33:48 +08:00
|
|
|
narenas--;
|
|
|
|
}
|
2013-12-19 15:21:42 +08:00
|
|
|
assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
if (strcmp(opa, "disabled") == 0) {
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
new_arena_ind = narenas - 1;
|
|
|
|
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
|
|
|
(void *)&new_arena_ind, sizeof(unsigned)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
new_arena_ind = 0;
|
|
|
|
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
|
|
|
(void *)&new_arena_ind, sizeof(unsigned)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
} else {
|
|
|
|
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure");
|
2017-06-01 07:45:14 +08:00
|
|
|
new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1;
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
if (old_arena_ind != new_arena_ind) {
|
|
|
|
assert_d_eq(mallctl("thread.arena",
|
|
|
|
(void *)&old_arena_ind, &sz, (void *)&new_arena_ind,
|
|
|
|
sizeof(unsigned)), EPERM, "thread.arena ctl "
|
|
|
|
"should not be allowed with percpu arena");
|
|
|
|
}
|
|
|
|
}
|
2013-12-19 15:21:42 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arena_i_initialized) {
|
2017-01-05 02:21:53 +08:00
|
|
|
unsigned narenas, i;
|
|
|
|
size_t sz;
|
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen = sizeof(mib) / sizeof(size_t);
|
|
|
|
bool initialized;
|
|
|
|
|
|
|
|
sz = sizeof(narenas);
|
|
|
|
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
|
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
|
|
|
|
assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
mib[1] = i;
|
|
|
|
sz = sizeof(initialized);
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
|
|
|
|
0), 0, "Unexpected mallctl() failure");
|
|
|
|
}
|
|
|
|
|
|
|
|
mib[1] = MALLCTL_ARENAS_ALL;
|
|
|
|
sz = sizeof(initialized);
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
assert_true(initialized,
|
|
|
|
"Merged arena statistics should always be initialized");
|
2017-01-04 09:21:59 +08:00
|
|
|
|
|
|
|
/* Equivalent to the above but using mallctl() directly. */
|
|
|
|
sz = sizeof(initialized);
|
|
|
|
assert_d_eq(mallctl(
|
|
|
|
"arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized",
|
|
|
|
(void *)&initialized, &sz, NULL, 0), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
assert_true(initialized,
|
|
|
|
"Merged arena statistics should always be initialized");
|
2017-01-05 02:21:53 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_BEGIN(test_arena_i_dirty_decay_ms) {
|
|
|
|
ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
|
2016-02-20 12:09:31 +08:00
|
|
|
size_t sz = sizeof(ssize_t);
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
assert_d_eq(mallctl("arena.0.dirty_decay_ms",
|
|
|
|
(void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
dirty_decay_ms = -2;
|
|
|
|
assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
|
|
|
|
(void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Unexpected mallctl() success");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
dirty_decay_ms = 0x7fffffff;
|
|
|
|
assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
|
|
|
|
(void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
|
|
|
|
dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
|
|
|
|
dirty_decay_ms++) {
|
|
|
|
ssize_t old_dirty_decay_ms;
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
assert_d_eq(mallctl("arena.0.dirty_decay_ms",
|
|
|
|
(void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
|
|
|
|
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
|
|
|
assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
|
|
|
|
"Unexpected old arena.0.dirty_decay_ms");
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_BEGIN(test_arena_i_muzzy_decay_ms) {
|
|
|
|
ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
size_t sz = sizeof(ssize_t);
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
assert_d_eq(mallctl("arena.0.muzzy_decay_ms",
|
|
|
|
(void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
muzzy_decay_ms = -2;
|
|
|
|
assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
|
|
|
|
(void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() success");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
muzzy_decay_ms = 0x7fffffff;
|
|
|
|
assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
|
|
|
|
(void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
|
|
|
|
muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
|
|
|
|
muzzy_decay_ms++) {
|
|
|
|
ssize_t old_muzzy_decay_ms;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
assert_d_eq(mallctl("arena.0.muzzy_decay_ms",
|
|
|
|
(void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
|
|
|
|
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
|
|
|
assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
|
|
|
|
"Unexpected old arena.0.muzzy_decay_ms");
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arena_i_purge) {
|
2013-12-19 15:21:42 +08:00
|
|
|
unsigned narenas;
|
|
|
|
size_t sz = sizeof(unsigned);
|
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen = 3;
|
|
|
|
|
|
|
|
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
|
|
|
0, "Unexpected mallctl() failure");
|
2013-12-19 15:21:42 +08:00
|
|
|
assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
mib[1] = narenas;
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
|
|
|
"Unexpected mallctlbymib() failure");
|
2017-01-03 23:27:42 +08:00
|
|
|
|
|
|
|
mib[1] = MALLCTL_ARENAS_ALL;
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
|
|
|
"Unexpected mallctlbymib() failure");
|
2013-12-19 15:21:42 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arena_i_decay) {
|
2016-02-20 12:09:31 +08:00
|
|
|
unsigned narenas;
|
|
|
|
size_t sz = sizeof(unsigned);
|
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen = 3;
|
|
|
|
|
|
|
|
assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
|
|
|
0, "Unexpected mallctl() failure");
|
2016-02-20 12:09:31 +08:00
|
|
|
assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
mib[1] = narenas;
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
|
|
|
"Unexpected mallctlbymib() failure");
|
2017-01-03 23:27:42 +08:00
|
|
|
|
|
|
|
mib[1] = MALLCTL_ARENAS_ALL;
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
|
|
|
"Unexpected mallctlbymib() failure");
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arena_i_dss) {
|
2013-12-19 15:21:42 +08:00
|
|
|
const char *dss_prec_old, *dss_prec_new;
|
|
|
|
size_t sz = sizeof(dss_prec_old);
|
2014-04-16 03:09:48 +08:00
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
|
|
|
assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() error");
|
2013-12-19 15:21:42 +08:00
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
dss_prec_new = "disabled";
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
|
|
|
|
(void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
2013-12-19 15:21:42 +08:00
|
|
|
assert_str_ne(dss_prec_old, "primary",
|
|
|
|
"Unexpected default for dss precedence");
|
|
|
|
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
|
|
|
|
(void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
|
2014-08-16 03:20:20 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2016-10-28 12:31:25 +08:00
|
|
|
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
|
|
|
|
0), 0, "Unexpected mallctl() failure");
|
2014-08-16 03:20:20 +08:00
|
|
|
assert_str_ne(dss_prec_old, "primary",
|
|
|
|
"Unexpected value for dss precedence");
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
mib[1] = narenas_total_get();
|
|
|
|
dss_prec_new = "disabled";
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
|
|
|
|
(void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
2014-04-16 03:09:48 +08:00
|
|
|
assert_str_ne(dss_prec_old, "primary",
|
|
|
|
"Unexpected default for dss precedence");
|
2014-08-16 03:20:20 +08:00
|
|
|
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
|
|
|
|
(void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
|
2014-08-16 03:20:20 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2016-10-28 12:31:25 +08:00
|
|
|
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
|
|
|
|
0), 0, "Unexpected mallctl() failure");
|
2014-08-16 03:20:20 +08:00
|
|
|
assert_str_ne(dss_prec_old, "primary",
|
|
|
|
"Unexpected value for dss precedence");
|
2013-12-19 15:21:42 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-11-03 08:48:39 +08:00
|
|
|
TEST_BEGIN(test_arena_i_retain_grow_limit) {
|
|
|
|
size_t old_limit, new_limit, default_limit;
|
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
bool retain_enabled;
|
|
|
|
size_t sz = sizeof(retain_enabled);
|
|
|
|
assert_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0),
|
|
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
test_skip_if(!retain_enabled);
|
|
|
|
|
|
|
|
sz = sizeof(default_limit);
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
|
|
|
assert_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen),
|
|
|
|
0, "Unexpected mallctlnametomib() error");
|
|
|
|
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
2018-07-20 08:08:10 +08:00
|
|
|
assert_zu_eq(default_limit, SC_LARGE_MAXCLASS,
|
2017-11-03 08:48:39 +08:00
|
|
|
"Unexpected default for retain_grow_limit");
|
|
|
|
|
|
|
|
new_limit = PAGE - 1;
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
|
|
|
|
sizeof(new_limit)), EFAULT, "Unexpected mallctl() success");
|
|
|
|
|
|
|
|
new_limit = PAGE + 1;
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
|
|
|
|
sizeof(new_limit)), 0, "Unexpected mallctl() failure");
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
assert_zu_eq(old_limit, PAGE,
|
|
|
|
"Unexpected value for retain_grow_limit");
|
|
|
|
|
|
|
|
/* Expect grow less than psize class 10. */
|
|
|
|
new_limit = sz_pind2sz(10) - 1;
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
|
|
|
|
sizeof(new_limit)), 0, "Unexpected mallctl() failure");
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
assert_zu_eq(old_limit, sz_pind2sz(9),
|
|
|
|
"Unexpected value for retain_grow_limit");
|
|
|
|
|
|
|
|
/* Restore to default. */
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit,
|
|
|
|
sizeof(default_limit)), 0, "Unexpected mallctl() failure");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_BEGIN(test_arenas_dirty_decay_ms) {
|
|
|
|
ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
|
2016-02-20 12:09:31 +08:00
|
|
|
size_t sz = sizeof(ssize_t);
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
assert_d_eq(mallctl("arenas.dirty_decay_ms",
|
|
|
|
(void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
dirty_decay_ms = -2;
|
|
|
|
assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
|
|
|
|
(void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() success");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
dirty_decay_ms = 0x7fffffff;
|
|
|
|
assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
|
|
|
|
(void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Expected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
|
|
|
|
dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
|
|
|
|
dirty_decay_ms++) {
|
|
|
|
ssize_t old_dirty_decay_ms;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
assert_d_eq(mallctl("arenas.dirty_decay_ms",
|
|
|
|
(void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
|
|
|
|
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
|
|
|
assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
|
|
|
|
"Unexpected old arenas.dirty_decay_ms");
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_BEGIN(test_arenas_muzzy_decay_ms) {
|
|
|
|
ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
size_t sz = sizeof(ssize_t);
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
assert_d_eq(mallctl("arenas.muzzy_decay_ms",
|
|
|
|
(void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
muzzy_decay_ms = -2;
|
|
|
|
assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
|
|
|
|
(void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Unexpected mallctl() success");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
muzzy_decay_ms = 0x7fffffff;
|
|
|
|
assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
|
|
|
|
(void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Expected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
|
|
|
|
muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
|
|
|
|
muzzy_decay_ms++) {
|
|
|
|
ssize_t old_muzzy_decay_ms;
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
assert_d_eq(mallctl("arenas.muzzy_decay_ms",
|
|
|
|
(void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
|
|
|
|
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
|
|
|
assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
|
|
|
|
"Unexpected old arenas.muzzy_decay_ms");
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arenas_constants) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
|
2013-12-19 15:21:42 +08:00
|
|
|
t name; \
|
|
|
|
size_t sz = sizeof(t); \
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
|
|
|
|
0), 0, "Unexpected mallctl() failure"); \
|
2013-12-19 15:21:42 +08:00
|
|
|
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
|
|
|
|
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
|
2017-12-15 04:46:39 +08:00
|
|
|
TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS);
|
|
|
|
TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS);
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
#undef TEST_ARENAS_CONSTANT
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arenas_bin_constants) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
|
2013-12-19 15:21:42 +08:00
|
|
|
t name; \
|
|
|
|
size_t sz = sizeof(t); \
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
|
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure"); \
|
2013-12-19 15:21:42 +08:00
|
|
|
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
|
|
|
|
} while (0)
|
|
|
|
|
2017-10-02 08:22:06 +08:00
|
|
|
TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size);
|
|
|
|
TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs);
|
2016-05-30 09:34:50 +08:00
|
|
|
TEST_ARENAS_BIN_CONSTANT(size_t, slab_size,
|
2017-10-02 08:22:06 +08:00
|
|
|
bin_infos[0].slab_size);
|
2018-11-22 03:17:31 +08:00
|
|
|
TEST_ARENAS_BIN_CONSTANT(uint32_t, nshards, bin_infos[0].n_shards);
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
#undef TEST_ARENAS_BIN_CONSTANT
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arenas_lextent_constants) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \
|
2014-10-13 13:53:59 +08:00
|
|
|
t name; \
|
|
|
|
size_t sz = sizeof(t); \
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \
|
|
|
|
&sz, NULL, 0), 0, "Unexpected mallctl() failure"); \
|
2014-10-13 13:53:59 +08:00
|
|
|
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
|
|
|
|
} while (0)
|
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
|
2018-07-12 07:05:58 +08:00
|
|
|
SC_LARGE_MINCLASS);
|
2014-10-13 13:53:59 +08:00
|
|
|
|
2016-06-02 04:53:56 +08:00
|
|
|
#undef TEST_ARENAS_LEXTENT_CONSTANT
|
2014-10-13 13:53:59 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arenas_create) {
|
2013-12-19 15:21:42 +08:00
|
|
|
unsigned narenas_before, arena, narenas_after;
|
|
|
|
size_t sz = sizeof(unsigned);
|
|
|
|
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
|
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure");
|
2017-01-04 00:21:29 +08:00
|
|
|
assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
|
|
|
|
0), 0, "Unexpected mallctl() failure");
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
assert_u_eq(narenas_before+1, narenas_after,
|
|
|
|
"Unexpected number of arenas before versus after extension");
|
|
|
|
assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2018-05-02 02:31:09 +08:00
|
|
|
TEST_BEGIN(test_arenas_lookup) {
|
|
|
|
unsigned arena, arena1;
|
|
|
|
void *ptr;
|
|
|
|
size_t sz = sizeof(unsigned);
|
|
|
|
|
|
|
|
assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
|
|
|
|
assert_ptr_not_null(ptr, "Unexpected mallocx() failure");
|
|
|
|
assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
|
|
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
assert_u_eq(arena, arena1, "Unexpected arena index");
|
|
|
|
dallocx(ptr, 0);
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_stats_arenas) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_STATS_ARENAS(t, name) do { \
|
2013-12-19 15:21:42 +08:00
|
|
|
t name; \
|
|
|
|
size_t sz = sizeof(t); \
|
2016-10-28 12:31:25 +08:00
|
|
|
assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
|
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure"); \
|
2013-12-19 15:21:42 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
TEST_STATS_ARENAS(unsigned, nthreads);
|
2016-02-28 12:40:13 +08:00
|
|
|
TEST_STATS_ARENAS(const char *, dss);
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_STATS_ARENAS(ssize_t, dirty_decay_ms);
|
|
|
|
TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_STATS_ARENAS(size_t, pactive);
|
|
|
|
TEST_STATS_ARENAS(size_t, pdirty);
|
|
|
|
|
|
|
|
#undef TEST_STATS_ARENAS
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2018-04-25 05:45:41 +08:00
|
|
|
static void
|
|
|
|
alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result,
|
|
|
|
UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) {
|
|
|
|
*(bool *)extra = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dalloc_hook(void *extra, UNUSED hook_dalloc_t type,
|
|
|
|
UNUSED void *address, UNUSED uintptr_t args_raw[3]) {
|
|
|
|
*(bool *)extra = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_BEGIN(test_hooks) {
|
|
|
|
bool hook_called = false;
|
|
|
|
hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
|
|
|
|
void *handle = NULL;
|
|
|
|
size_t sz = sizeof(handle);
|
|
|
|
int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
|
|
|
|
sizeof(hooks));
|
|
|
|
assert_d_eq(err, 0, "Hook installation failed");
|
|
|
|
assert_ptr_ne(handle, NULL, "Hook installation gave null handle");
|
|
|
|
void *ptr = mallocx(1, 0);
|
|
|
|
assert_true(hook_called, "Alloc hook not called");
|
|
|
|
hook_called = false;
|
|
|
|
free(ptr);
|
|
|
|
assert_true(hook_called, "Free hook not called");
|
|
|
|
|
|
|
|
err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
|
|
|
|
sizeof(handle));
|
|
|
|
assert_d_eq(err, 0, "Hook removal failed");
|
|
|
|
hook_called = false;
|
|
|
|
ptr = mallocx(1, 0);
|
|
|
|
free(ptr);
|
|
|
|
assert_false(hook_called, "Hook called after removal");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2018-05-01 07:24:36 +08:00
|
|
|
TEST_BEGIN(test_hooks_exhaustion) {
|
|
|
|
bool hook_called = false;
|
|
|
|
hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
|
|
|
|
|
|
|
|
void *handle;
|
|
|
|
void *handles[HOOK_MAX];
|
|
|
|
size_t sz = sizeof(handle);
|
|
|
|
int err;
|
|
|
|
for (int i = 0; i < HOOK_MAX; i++) {
|
|
|
|
handle = NULL;
|
|
|
|
err = mallctl("experimental.hooks.install", &handle, &sz,
|
|
|
|
&hooks, sizeof(hooks));
|
|
|
|
assert_d_eq(err, 0, "Error installation hooks");
|
|
|
|
assert_ptr_ne(handle, NULL, "Got NULL handle");
|
|
|
|
handles[i] = handle;
|
|
|
|
}
|
|
|
|
err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
|
|
|
|
sizeof(hooks));
|
|
|
|
assert_d_eq(err, EAGAIN, "Should have failed hook installation");
|
|
|
|
for (int i = 0; i < HOOK_MAX; i++) {
|
|
|
|
err = mallctl("experimental.hooks.remove", NULL, NULL,
|
|
|
|
&handles[i], sizeof(handles[i]));
|
|
|
|
assert_d_eq(err, 0, "Hook removal failed");
|
|
|
|
}
|
|
|
|
/* Insertion failed, but then we removed some; it should work now. */
|
|
|
|
handle = NULL;
|
|
|
|
err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
|
|
|
|
sizeof(hooks));
|
|
|
|
assert_d_eq(err, 0, "Hook insertion failed");
|
|
|
|
assert_ptr_ne(handle, NULL, "Got NULL handle");
|
|
|
|
err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
|
|
|
|
sizeof(handle));
|
|
|
|
assert_d_eq(err, 0, "Hook removal failed");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2019-03-16 02:01:45 +08:00
|
|
|
#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \
|
|
|
|
assert_d_eq(mallctl("experimental.utilization." node, \
|
|
|
|
a, b, c, d), EINVAL, "Should fail when " why_inval); \
|
|
|
|
assert_zu_eq(out_sz, out_sz_ref, \
|
|
|
|
"Output size touched when given invalid arguments"); \
|
|
|
|
assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \
|
|
|
|
"Output content touched when given invalid arguments"); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define TEST_UTIL_VALID(node) do { \
|
|
|
|
assert_d_eq(mallctl("experimental.utilization." node, \
|
|
|
|
out, &out_sz, in, in_sz), 0, \
|
|
|
|
"Should return 0 on correct arguments"); \
|
|
|
|
assert_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \
|
|
|
|
assert_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \
|
|
|
|
"Output content should be changed"); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
TEST_BEGIN(test_utilization_query) {
|
|
|
|
void *p = mallocx(1, 0);
|
|
|
|
void **in = &p;
|
|
|
|
size_t in_sz = sizeof(const void *);
|
|
|
|
size_t out_sz = sizeof(void *) + sizeof(size_t) * 5;
|
|
|
|
void *out = mallocx(out_sz, 0);
|
|
|
|
void *out_ref = mallocx(out_sz, 0);
|
|
|
|
size_t out_sz_ref = out_sz;
|
|
|
|
|
|
|
|
assert_ptr_not_null(p, "test pointer allocation failed");
|
|
|
|
assert_ptr_not_null(out, "test output allocation failed");
|
|
|
|
assert_ptr_not_null(out_ref, "test reference output allocation failed");
|
|
|
|
|
|
|
|
#define SLABCUR_READ(out) (*(void **)out)
|
|
|
|
#define COUNTS(out) ((size_t *)((void **)out + 1))
|
|
|
|
#define NFREE_READ(out) COUNTS(out)[0]
|
|
|
|
#define NREGS_READ(out) COUNTS(out)[1]
|
|
|
|
#define SIZE_READ(out) COUNTS(out)[2]
|
|
|
|
#define BIN_NFREE_READ(out) COUNTS(out)[3]
|
|
|
|
#define BIN_NREGS_READ(out) COUNTS(out)[4]
|
|
|
|
|
|
|
|
SLABCUR_READ(out) = NULL;
|
|
|
|
NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1;
|
|
|
|
BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1;
|
|
|
|
memcpy(out_ref, out, out_sz);
|
|
|
|
|
|
|
|
/* Test invalid argument(s) errors */
|
|
|
|
#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \
|
|
|
|
TEST_UTIL_EINVAL("query", a, b, c, d, why_inval)
|
|
|
|
|
|
|
|
TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL");
|
|
|
|
TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL");
|
|
|
|
TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz, "newp is NULL");
|
|
|
|
TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0, "newlen is zero");
|
|
|
|
in_sz -= 1;
|
|
|
|
TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, "invalid newlen");
|
|
|
|
in_sz += 1;
|
|
|
|
out_sz_ref = out_sz -= 2 * sizeof(size_t);
|
|
|
|
TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, "invalid *oldlenp");
|
|
|
|
out_sz_ref = out_sz += 2 * sizeof(size_t);
|
|
|
|
|
|
|
|
#undef TEST_UTIL_QUERY_EINVAL
|
|
|
|
|
|
|
|
/* Examine output for valid call */
|
|
|
|
TEST_UTIL_VALID("query");
|
|
|
|
assert_zu_le(NFREE_READ(out), NREGS_READ(out),
|
|
|
|
"Extent free count exceeded region count");
|
|
|
|
assert_zu_le(NREGS_READ(out), SIZE_READ(out),
|
|
|
|
"Extent region count exceeded size");
|
|
|
|
assert_zu_ne(NREGS_READ(out), 0,
|
|
|
|
"Extent region count must be positive");
|
|
|
|
assert_zu_ne(SIZE_READ(out), 0, "Extent size must be positive");
|
|
|
|
if (config_stats) {
|
|
|
|
assert_zu_le(BIN_NFREE_READ(out), BIN_NREGS_READ(out),
|
|
|
|
"Bin free count exceeded region count");
|
|
|
|
assert_zu_ne(BIN_NREGS_READ(out), 0,
|
|
|
|
"Bin region count must be positive");
|
|
|
|
assert_zu_le(NFREE_READ(out), BIN_NFREE_READ(out),
|
|
|
|
"Extent free count exceeded bin free count");
|
|
|
|
assert_zu_le(NREGS_READ(out), BIN_NREGS_READ(out),
|
|
|
|
"Extent region count exceeded bin region count");
|
|
|
|
assert_zu_eq(BIN_NREGS_READ(out) % NREGS_READ(out), 0,
|
|
|
|
"Bin region count isn't a multiple of extent region count");
|
|
|
|
assert_zu_le(NREGS_READ(out) - NFREE_READ(out),
|
|
|
|
BIN_NREGS_READ(out) - BIN_NFREE_READ(out),
|
|
|
|
"Extent utilized count exceeded bin utilized count");
|
|
|
|
} else {
|
|
|
|
assert_zu_eq(BIN_NFREE_READ(out), 0,
|
|
|
|
"Bin free count should be zero when stats are disabled");
|
|
|
|
assert_zu_eq(BIN_NREGS_READ(out), 0,
|
|
|
|
"Bin region count should be zero when stats are disabled");
|
|
|
|
}
|
|
|
|
assert_ptr_not_null(SLABCUR_READ(out), "Current slab is null");
|
|
|
|
assert_true(NFREE_READ(out) == 0 || SLABCUR_READ(out) <= p,
|
|
|
|
"Allocation should follow first fit principle");
|
|
|
|
|
|
|
|
#undef BIN_NREGS_READ
|
|
|
|
#undef BIN_NFREE_READ
|
|
|
|
#undef SIZE_READ
|
|
|
|
#undef NREGS_READ
|
|
|
|
#undef NFREE_READ
|
|
|
|
#undef COUNTS
|
|
|
|
#undef SLABCUR_READ
|
|
|
|
|
|
|
|
free(out_ref);
|
|
|
|
free(out);
|
|
|
|
free(p);
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
|
|
|
TEST_BEGIN(test_utilization_batch_query) {
|
|
|
|
void *p = mallocx(1, 0);
|
|
|
|
void *q = mallocx(1, 0);
|
|
|
|
void *in[] = {p, q};
|
|
|
|
size_t in_sz = sizeof(const void *) * 2;
|
|
|
|
size_t out[] = {-1, -1, -1, -1, -1, -1};
|
|
|
|
size_t out_sz = sizeof(size_t) * 6;
|
|
|
|
size_t out_ref[] = {-1, -1, -1, -1, -1, -1};
|
|
|
|
size_t out_sz_ref = out_sz;
|
|
|
|
|
|
|
|
assert_ptr_not_null(p, "test pointer allocation failed");
|
|
|
|
assert_ptr_not_null(q, "test pointer allocation failed");
|
|
|
|
|
|
|
|
/* Test invalid argument(s) errors */
|
|
|
|
#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \
|
|
|
|
TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval)
|
|
|
|
|
|
|
|
TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL");
|
|
|
|
TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL");
|
|
|
|
TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz, "newp is NULL");
|
|
|
|
TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0, "newlen is zero");
|
|
|
|
in_sz -= 1;
|
|
|
|
TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
|
|
|
|
"newlen is not an exact multiple");
|
|
|
|
in_sz += 1;
|
|
|
|
out_sz_ref = out_sz -= 2 * sizeof(size_t);
|
|
|
|
TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
|
|
|
|
"*oldlenp is not an exact multiple");
|
|
|
|
out_sz_ref = out_sz += 2 * sizeof(size_t);
|
|
|
|
in_sz -= sizeof(const void *);
|
|
|
|
TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
|
|
|
|
"*oldlenp and newlen do not match");
|
|
|
|
in_sz += sizeof(const void *);
|
|
|
|
|
|
|
|
#undef TEST_UTIL_BATCH_EINVAL
|
|
|
|
|
|
|
|
/* Examine output for valid calls */
|
|
|
|
#define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query")
|
|
|
|
#define TEST_EQUAL_REF(i, message) \
|
|
|
|
assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message)
|
|
|
|
|
|
|
|
#define NFREE_READ(out, i) out[(i) * 3]
|
|
|
|
#define NREGS_READ(out, i) out[(i) * 3 + 1]
|
|
|
|
#define SIZE_READ(out, i) out[(i) * 3 + 2]
|
|
|
|
|
|
|
|
out_sz_ref = out_sz /= 2;
|
|
|
|
in_sz /= 2;
|
|
|
|
TEST_UTIL_BATCH_VALID;
|
|
|
|
assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0),
|
|
|
|
"Extent free count exceeded region count");
|
|
|
|
assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0),
|
|
|
|
"Extent region count exceeded size");
|
|
|
|
assert_zu_ne(NREGS_READ(out, 0), 0,
|
|
|
|
"Extent region count must be positive");
|
|
|
|
assert_zu_ne(SIZE_READ(out, 0), 0, "Extent size must be positive");
|
|
|
|
TEST_EQUAL_REF(1, "Should not overwrite content beyond what's needed");
|
|
|
|
in_sz *= 2;
|
|
|
|
out_sz_ref = out_sz *= 2;
|
|
|
|
|
|
|
|
memcpy(out_ref, out, 3 * sizeof(size_t));
|
|
|
|
TEST_UTIL_BATCH_VALID;
|
|
|
|
TEST_EQUAL_REF(0, "Statistics should be stable across calls");
|
|
|
|
assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1),
|
|
|
|
"Extent free count exceeded region count");
|
|
|
|
assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1),
|
|
|
|
"Extent region count should be same for same region size");
|
|
|
|
assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1),
|
|
|
|
"Extent size should be same for same region size");
|
|
|
|
|
|
|
|
#undef SIZE_READ
|
|
|
|
#undef NREGS_READ
|
|
|
|
#undef NFREE_READ
|
|
|
|
|
|
|
|
#undef TEST_EQUAL_REF
|
|
|
|
#undef TEST_UTIL_BATCH_VALID
|
|
|
|
|
|
|
|
free(q);
|
|
|
|
free(p);
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
|
|
|
#undef TEST_UTIL_VALID
|
|
|
|
#undef TEST_UTIL_EINVAL
|
|
|
|
|
2013-12-19 15:21:42 +08:00
|
|
|
int
|
2017-01-16 08:56:30 +08:00
|
|
|
main(void) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return test(
|
2013-12-19 15:21:42 +08:00
|
|
|
test_mallctl_errors,
|
|
|
|
test_mallctlnametomib_errors,
|
|
|
|
test_mallctlbymib_errors,
|
|
|
|
test_mallctl_read_write,
|
|
|
|
test_mallctlnametomib_short_mib,
|
|
|
|
test_mallctl_config,
|
|
|
|
test_mallctl_opt,
|
|
|
|
test_manpage_example,
|
2015-01-30 07:30:47 +08:00
|
|
|
test_tcache_none,
|
|
|
|
test_tcache,
|
2013-12-19 15:21:42 +08:00
|
|
|
test_thread_arena,
|
2017-01-05 02:21:53 +08:00
|
|
|
test_arena_i_initialized,
|
2017-05-18 01:47:00 +08:00
|
|
|
test_arena_i_dirty_decay_ms,
|
|
|
|
test_arena_i_muzzy_decay_ms,
|
2013-12-19 15:21:42 +08:00
|
|
|
test_arena_i_purge,
|
2016-02-20 12:09:31 +08:00
|
|
|
test_arena_i_decay,
|
2013-12-19 15:21:42 +08:00
|
|
|
test_arena_i_dss,
|
2017-11-03 08:48:39 +08:00
|
|
|
test_arena_i_retain_grow_limit,
|
2017-05-18 01:47:00 +08:00
|
|
|
test_arenas_dirty_decay_ms,
|
|
|
|
test_arenas_muzzy_decay_ms,
|
2013-12-19 15:21:42 +08:00
|
|
|
test_arenas_constants,
|
|
|
|
test_arenas_bin_constants,
|
2016-06-01 05:50:21 +08:00
|
|
|
test_arenas_lextent_constants,
|
2017-01-04 00:21:29 +08:00
|
|
|
test_arenas_create,
|
2018-05-02 02:31:09 +08:00
|
|
|
test_arenas_lookup,
|
2018-04-25 05:45:41 +08:00
|
|
|
test_stats_arenas,
|
2018-05-01 07:24:36 +08:00
|
|
|
test_hooks,
|
2019-03-16 02:01:45 +08:00
|
|
|
test_hooks_exhaustion,
|
|
|
|
test_utilization_query,
|
|
|
|
test_utilization_batch_query);
|
2013-12-19 15:21:42 +08:00
|
|
|
}
|