2013-12-19 15:21:42 +08:00
|
|
|
#include "test/jemalloc_test.h"
|
|
|
|
|
2020-08-14 02:28:22 +08:00
|
|
|
#include "jemalloc/internal/ctl.h"
|
2018-05-01 07:24:36 +08:00
|
|
|
#include "jemalloc/internal/hook.h"
|
2017-04-12 04:31:16 +08:00
|
|
|
#include "jemalloc/internal/util.h"
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctl_errors) {
|
2013-12-19 15:21:42 +08:00
|
|
|
uint64_t epoch;
|
|
|
|
size_t sz;
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctl() should return ENOENT for non-existent names");
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
|
2013-12-19 15:21:42 +08:00
|
|
|
EPERM, "mallctl() should return EPERM on attempt to write "
|
|
|
|
"read-only value");
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
2016-10-28 12:31:25 +08:00
|
|
|
sizeof(epoch)-1), EINVAL,
|
|
|
|
"mallctl() should return EINVAL for input size mismatch");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
2016-10-28 12:31:25 +08:00
|
|
|
sizeof(epoch)+1), EINVAL,
|
|
|
|
"mallctl() should return EINVAL for input size mismatch");
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
sz = sizeof(epoch)-1;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctl() should return EINVAL for output size mismatch");
|
|
|
|
sz = sizeof(epoch)+1;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctl() should return EINVAL for output size mismatch");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctlnametomib_errors) {
|
2013-12-19 15:21:42 +08:00
|
|
|
size_t mib[1];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctlnametomib() should return ENOENT for non-existent names");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctlbymib_errors) {
|
2013-12-19 15:21:42 +08:00
|
|
|
uint64_t epoch;
|
|
|
|
size_t sz;
|
|
|
|
size_t mib[1];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlnametomib("version", mib, &miblen), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
|
2013-12-19 15:21:42 +08:00
|
|
|
strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
|
|
|
|
"attempt to write read-only value");
|
|
|
|
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
|
2013-12-19 15:21:42 +08:00
|
|
|
sizeof(epoch)-1), EINVAL,
|
|
|
|
"mallctlbymib() should return EINVAL for input size mismatch");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
|
2013-12-19 15:21:42 +08:00
|
|
|
sizeof(epoch)+1), EINVAL,
|
|
|
|
"mallctlbymib() should return EINVAL for input size mismatch");
|
|
|
|
|
|
|
|
sz = sizeof(epoch)-1;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
|
2016-10-28 12:31:25 +08:00
|
|
|
EINVAL,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctlbymib() should return EINVAL for output size mismatch");
|
|
|
|
sz = sizeof(epoch)+1;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
|
2016-10-28 12:31:25 +08:00
|
|
|
EINVAL,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctlbymib() should return EINVAL for output size mismatch");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctl_read_write) {
|
2013-12-19 15:21:42 +08:00
|
|
|
uint64_t old_epoch, new_epoch;
|
|
|
|
size_t sz = sizeof(old_epoch);
|
|
|
|
|
|
|
|
/* Blind. */
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
/* Read. */
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
/* Write. */
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
|
2016-10-28 12:31:25 +08:00
|
|
|
sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
/* Read+write. */
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
|
2016-10-28 12:31:25 +08:00
|
|
|
(void *)&new_epoch, sizeof(new_epoch)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
2013-12-19 15:21:42 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctlnametomib_short_mib) {
|
2013-12-19 15:21:42 +08:00
|
|
|
size_t mib[4];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
miblen = 3;
|
|
|
|
mib[3] = 42;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctlnametomib() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(miblen, 3, "Unexpected mib output length");
|
|
|
|
expect_zu_eq(mib[3], 42,
|
2013-12-19 15:21:42 +08:00
|
|
|
"mallctlnametomib() wrote past the end of the input mib");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2020-08-14 04:26:44 +08:00
|
|
|
TEST_BEGIN(test_mallctlnametomib_short_name) {
|
|
|
|
size_t mib[4];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
miblen = 4;
|
|
|
|
mib[3] = 42;
|
|
|
|
expect_d_eq(mallctlnametomib("arenas.bin.0", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
expect_zu_eq(miblen, 3, "Unexpected mib output length");
|
|
|
|
expect_zu_eq(mib[3], 42,
|
|
|
|
"mallctlnametomib() wrote past the end of the input mib");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2020-08-14 02:28:22 +08:00
|
|
|
TEST_BEGIN(test_mallctlmibnametomib) {
|
|
|
|
size_t mib[4];
|
|
|
|
size_t miblen = 4;
|
|
|
|
uint32_t result, result_ref;
|
|
|
|
size_t len_result = sizeof(uint32_t);
|
|
|
|
|
|
|
|
tsd_t *tsd = tsd_fetch();
|
|
|
|
|
|
|
|
/* Error cases */
|
|
|
|
assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "bob", &miblen), ENOENT, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "9999", &miblen), ENOENT, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
|
|
|
|
/* Valid case. */
|
|
|
|
assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "arenas", &miblen), 0, "");
|
|
|
|
assert_zu_eq(miblen, 1, "");
|
|
|
|
miblen = 4;
|
|
|
|
assert_d_eq(ctl_mibnametomib(tsd, mib, 1, "bin", &miblen), 0, "");
|
|
|
|
assert_zu_eq(miblen, 2, "");
|
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
|
|
|
|
ENOENT, "mallctlbymib() should fail on partial path");
|
|
|
|
|
|
|
|
/* Error cases. */
|
|
|
|
miblen = 4;
|
|
|
|
assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "bob", &miblen), ENOENT, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "9999", &miblen), ENOENT, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
|
|
|
|
/* Valid case. */
|
|
|
|
assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "0", &miblen), 0, "");
|
|
|
|
assert_zu_eq(miblen, 3, "");
|
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
|
|
|
|
ENOENT, "mallctlbymib() should fail on partial path");
|
|
|
|
|
|
|
|
/* Error cases. */
|
|
|
|
miblen = 4;
|
|
|
|
assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "bob", &miblen), ENOENT, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "9999", &miblen), ENOENT, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
|
|
|
|
/* Valid case. */
|
|
|
|
assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "nregs", &miblen), 0, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
assert_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
|
|
|
|
0, "Unexpected mallctlbymib() failure");
|
|
|
|
assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result,
|
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure");
|
|
|
|
expect_zu_eq(result, result_ref,
|
|
|
|
"mallctlbymib() and mallctl() returned different result");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2020-08-14 06:26:46 +08:00
|
|
|
TEST_BEGIN(test_mallctlbymibname) {
|
|
|
|
size_t mib[4];
|
|
|
|
size_t miblen = 4;
|
|
|
|
uint32_t result, result_ref;
|
|
|
|
size_t len_result = sizeof(uint32_t);
|
|
|
|
|
|
|
|
tsd_t *tsd = tsd_fetch();
|
|
|
|
|
|
|
|
/* Error cases. */
|
|
|
|
|
|
|
|
assert_d_eq(mallctlnametomib("arenas", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
assert_zu_eq(miblen, 1, "");
|
|
|
|
|
|
|
|
miblen = 4;
|
|
|
|
assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0", &miblen,
|
|
|
|
&result, &len_result, NULL, 0), ENOENT, "");
|
|
|
|
miblen = 4;
|
|
|
|
assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.bob", &miblen,
|
|
|
|
&result, &len_result, NULL, 0), ENOENT, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
|
|
|
|
/* Valid cases. */
|
|
|
|
|
|
|
|
assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result,
|
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure");
|
|
|
|
miblen = 4;
|
|
|
|
|
|
|
|
assert_d_eq(ctl_bymibname(tsd, mib, 0, "arenas.bin.0.nregs", &miblen,
|
|
|
|
&result, &len_result, NULL, 0), 0, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
expect_zu_eq(result, result_ref, "Unexpected result");
|
|
|
|
|
|
|
|
assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.nregs", &miblen, &result,
|
|
|
|
&len_result, NULL, 0), 0, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
expect_zu_eq(result, result_ref, "Unexpected result");
|
|
|
|
|
|
|
|
assert_d_eq(ctl_bymibname(tsd, mib, 2, "0.nregs", &miblen, &result,
|
|
|
|
&len_result, NULL, 0), 0, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
expect_zu_eq(result, result_ref, "Unexpected result");
|
|
|
|
|
|
|
|
assert_d_eq(ctl_bymibname(tsd, mib, 3, "nregs", &miblen, &result,
|
|
|
|
&len_result, NULL, 0), 0, "");
|
|
|
|
assert_zu_eq(miblen, 4, "");
|
|
|
|
expect_zu_eq(result, result_ref, "Unexpected result");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctl_config) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_MALLCTL_CONFIG(config, t) do { \
|
2016-02-08 06:23:22 +08:00
|
|
|
t oldval; \
|
2013-12-19 15:21:42 +08:00
|
|
|
size_t sz = sizeof(oldval); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
|
2016-10-28 12:31:25 +08:00
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure"); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_b_eq(oldval, config_##config, "Incorrect config value"); \
|
|
|
|
expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
|
2013-12-19 15:21:42 +08:00
|
|
|
} while (0)
|
|
|
|
|
2016-02-08 06:23:22 +08:00
|
|
|
TEST_MALLCTL_CONFIG(cache_oblivious, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(debug, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(fill, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(lazy_lock, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(malloc_conf, const char *);
|
|
|
|
TEST_MALLCTL_CONFIG(prof, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(prof_libgcc, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(prof_libunwind, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(stats, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(utrace, bool);
|
|
|
|
TEST_MALLCTL_CONFIG(xmalloc, bool);
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
#undef TEST_MALLCTL_CONFIG
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_mallctl_opt) {
|
2013-12-19 15:21:42 +08:00
|
|
|
bool config_always = true;
|
|
|
|
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_MALLCTL_OPT(t, opt, config) do { \
|
2013-12-19 15:21:42 +08:00
|
|
|
t oldval; \
|
|
|
|
size_t sz = sizeof(oldval); \
|
|
|
|
int expected = config_##config ? 0 : ENOENT; \
|
2016-10-28 12:31:25 +08:00
|
|
|
int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \
|
|
|
|
0); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(result, expected, \
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctl() result for opt."#opt); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
|
2013-12-19 15:21:42 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
TEST_MALLCTL_OPT(bool, abort, always);
|
2017-08-12 07:06:51 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, abort_conf, always);
|
2021-02-10 14:24:35 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, cache_oblivious, always);
|
Add runtime detection for MADV_DONTNEED zeroes pages (mostly for qemu)
qemu does not support this, yet [1], and you can get very tricky assert
if you will run program with jemalloc in use under qemu:
<jemalloc>: ../contrib/jemalloc/src/extent.c:1195: Failed assertion: "p[i] == 0"
[1]: https://patchwork.kernel.org/patch/10576637/
Here is a simple example that shows the problem [2]:
// Gist to check possible issues with MADV_DONTNEED
// For example it does not supported by qemu user
// There is a patch for this [1], but it hasn't been applied.
// [1]: https://lists.gnu.org/archive/html/qemu-devel/2018-08/msg05422.html
#include <sys/mman.h>
#include <stdio.h>
#include <stddef.h>
#include <assert.h>
#include <string.h>
int main(int argc, char **argv)
{
void *addr = mmap(NULL, 1<<16, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
return 1;
}
memset(addr, 'A', 1<<16);
if (!madvise(addr, 1<<16, MADV_DONTNEED)) {
puts("MADV_DONTNEED does not return error. Check memory.");
for (int i = 0; i < 1<<16; ++i) {
assert(((unsigned char *)addr)[i] == 0);
}
} else {
perror("madvise");
}
if (munmap(addr, 1<<16)) {
perror("munmap");
return 1;
}
return 0;
}
### unpatched qemu
$ qemu-x86_64-static /tmp/test-MADV_DONTNEED
MADV_DONTNEED does not return error. Check memory.
test-MADV_DONTNEED: /tmp/test-MADV_DONTNEED.c:19: main: Assertion `((unsigned char *)addr)[i] == 0' failed.
qemu: uncaught target signal 6 (Aborted) - core dumped
Aborted (core dumped)
### patched qemu (by returning ENOSYS error)
$ qemu-x86_64 /tmp/test-MADV_DONTNEED
madvise: Success
### patch for qemu to return ENOSYS
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 897d20c076..5540792e0e 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -11775,7 +11775,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
turns private file-backed mappings into anonymous mappings.
This will break MADV_DONTNEED.
This is a hint, so ignoring and returning success is ok. */
- return 0;
+ return ENOSYS;
#endif
#ifdef TARGET_NR_fcntl64
case TARGET_NR_fcntl64:
[2]: https://gist.github.com/azat/12ba2c825b710653ece34dba7f926ece
v2:
- review fixes
- add opt_dont_trust_madvise
v3:
- review fixes
- rename opt_dont_trust_madvise to opt_trust_madvise
2020-12-19 03:23:35 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, trust_madvise, always);
|
2019-05-01 04:54:00 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, confirm_conf, always);
|
2017-08-25 05:29:28 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, metadata_thp, always);
|
2017-04-27 07:26:12 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, retain, always);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, dss, always);
|
2020-08-15 04:36:41 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, hpa, always);
|
2020-09-05 09:29:28 +08:00
|
|
|
TEST_MALLCTL_OPT(size_t, hpa_slab_max_alloc, always);
|
2021-01-27 10:35:18 +08:00
|
|
|
TEST_MALLCTL_OPT(size_t, hpa_sec_nshards, always);
|
2020-10-17 04:14:59 +08:00
|
|
|
TEST_MALLCTL_OPT(size_t, hpa_sec_max_alloc, always);
|
|
|
|
TEST_MALLCTL_OPT(size_t, hpa_sec_max_bytes, always);
|
2021-01-27 10:35:18 +08:00
|
|
|
TEST_MALLCTL_OPT(size_t, hpa_sec_bytes_after_flush, always);
|
|
|
|
TEST_MALLCTL_OPT(size_t, hpa_sec_batch_fill_extra, always);
|
2016-02-25 03:03:40 +08:00
|
|
|
TEST_MALLCTL_OPT(unsigned, narenas, always);
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, percpu_arena, always);
|
2019-01-25 08:15:04 +08:00
|
|
|
TEST_MALLCTL_OPT(size_t, oversize_threshold, always);
|
2017-05-16 08:44:13 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, background_thread, always);
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always);
|
|
|
|
TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, stats_print, always);
|
2020-01-14 14:29:17 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, stats_print_opts, always);
|
|
|
|
TEST_MALLCTL_OPT(int64_t, stats_interval, always);
|
|
|
|
TEST_MALLCTL_OPT(const char *, stats_interval_opts, always);
|
2014-12-09 05:12:41 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, junk, fill);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, zero, fill);
|
|
|
|
TEST_MALLCTL_OPT(bool, utrace, utrace);
|
|
|
|
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
|
2017-04-21 08:21:37 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, tcache, always);
|
2017-11-10 05:51:39 +08:00
|
|
|
TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always);
|
2020-10-20 13:48:26 +08:00
|
|
|
TEST_MALLCTL_OPT(size_t, tcache_max, always);
|
2018-02-17 06:19:19 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, thp, always);
|
2019-11-05 22:46:52 +08:00
|
|
|
TEST_MALLCTL_OPT(const char *, zero_realloc, always);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, prof, prof);
|
|
|
|
TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
|
|
|
|
TEST_MALLCTL_OPT(bool, prof_active, prof);
|
2022-08-20 03:17:10 +08:00
|
|
|
TEST_MALLCTL_OPT(unsigned, prof_bt_max, prof);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
|
|
|
|
TEST_MALLCTL_OPT(bool, prof_accum, prof);
|
|
|
|
TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
|
|
|
|
TEST_MALLCTL_OPT(bool, prof_gdump, prof);
|
|
|
|
TEST_MALLCTL_OPT(bool, prof_final, prof);
|
|
|
|
TEST_MALLCTL_OPT(bool, prof_leak, prof);
|
2022-01-12 18:46:34 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, prof_leak_error, prof);
|
2019-12-19 05:38:14 +08:00
|
|
|
TEST_MALLCTL_OPT(ssize_t, prof_recent_alloc_max, prof);
|
2020-12-19 09:14:59 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, prof_stats, prof);
|
2020-06-20 06:16:53 +08:00
|
|
|
TEST_MALLCTL_OPT(bool, prof_sys_thread_name, prof);
|
2021-10-19 08:33:15 +08:00
|
|
|
TEST_MALLCTL_OPT(ssize_t, lg_san_uaf_align, uaf_detection);
|
2022-07-21 06:25:56 +08:00
|
|
|
TEST_MALLCTL_OPT(unsigned, debug_double_free_max_scan, always);
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
#undef TEST_MALLCTL_OPT
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_manpage_example) {
|
2013-12-19 15:21:42 +08:00
|
|
|
unsigned nbins, i;
|
|
|
|
size_t mib[4];
|
|
|
|
size_t len, miblen;
|
|
|
|
|
|
|
|
len = sizeof(nbins);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
|
|
|
miblen = 4;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
for (i = 0; i < nbins; i++) {
|
|
|
|
size_t bin_size;
|
|
|
|
|
|
|
|
mib[2] = i;
|
|
|
|
len = sizeof(bin_size);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
|
2016-10-28 12:31:25 +08:00
|
|
|
NULL, 0), 0, "Unexpected mallctlbymib() failure");
|
2013-12-19 15:21:42 +08:00
|
|
|
/* Do something with bin_size... */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_tcache_none) {
|
2017-06-01 12:34:26 +08:00
|
|
|
test_skip_if(!opt_tcache);
|
2015-01-30 07:30:47 +08:00
|
|
|
|
|
|
|
/* Allocate p and q. */
|
2017-06-01 12:34:26 +08:00
|
|
|
void *p0 = mallocx(42, 0);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_ptr_not_null(p0, "Unexpected mallocx() failure");
|
2017-06-01 12:34:26 +08:00
|
|
|
void *q = mallocx(42, 0);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_ptr_not_null(q, "Unexpected mallocx() failure");
|
2015-01-30 07:30:47 +08:00
|
|
|
|
|
|
|
/* Deallocate p and q, but bypass the tcache for q. */
|
|
|
|
dallocx(p0, 0);
|
|
|
|
dallocx(q, MALLOCX_TCACHE_NONE);
|
|
|
|
|
|
|
|
/* Make sure that tcache-based allocation returns p, not q. */
|
2017-06-01 12:34:26 +08:00
|
|
|
void *p1 = mallocx(42, 0);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_ptr_not_null(p1, "Unexpected mallocx() failure");
|
2021-10-19 08:33:15 +08:00
|
|
|
if (!opt_prof && !san_uaf_detection_enabled()) {
|
2021-11-16 07:23:47 +08:00
|
|
|
expect_ptr_eq(p0, p1,
|
|
|
|
"Expected tcache to allocate cached region");
|
|
|
|
}
|
2015-01-30 07:30:47 +08:00
|
|
|
|
|
|
|
/* Clean up. */
|
|
|
|
dallocx(p1, MALLOCX_TCACHE_NONE);
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_tcache) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define NTCACHES 10
|
2015-01-30 07:30:47 +08:00
|
|
|
unsigned tis[NTCACHES];
|
|
|
|
void *ps[NTCACHES];
|
|
|
|
void *qs[NTCACHES];
|
|
|
|
unsigned i;
|
|
|
|
size_t sz, psz, qsz;
|
|
|
|
|
|
|
|
psz = 42;
|
|
|
|
qsz = nallocx(psz, 0) + 1;
|
|
|
|
|
|
|
|
/* Create tcaches. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
|
|
|
sz = sizeof(unsigned);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
|
2016-10-28 12:31:25 +08:00
|
|
|
0), 0, "Unexpected mallctl() failure, i=%u", i);
|
2015-01-30 07:30:47 +08:00
|
|
|
}
|
|
|
|
|
2015-02-11 01:03:48 +08:00
|
|
|
/* Exercise tcache ID recycling. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
|
2016-10-28 12:31:25 +08:00
|
|
|
(void *)&tis[i], sizeof(unsigned)), 0,
|
|
|
|
"Unexpected mallctl() failure, i=%u", i);
|
2015-02-11 01:03:48 +08:00
|
|
|
}
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
|
|
|
sz = sizeof(unsigned);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
|
2016-10-28 12:31:25 +08:00
|
|
|
0), 0, "Unexpected mallctl() failure, i=%u", i);
|
2015-02-11 01:03:48 +08:00
|
|
|
}
|
|
|
|
|
2015-01-30 07:30:47 +08:00
|
|
|
/* Flush empty tcaches. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
|
2015-01-30 07:30:47 +08:00
|
|
|
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
|
|
|
|
i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cache some allocations. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
|
|
|
ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
|
2015-01-30 07:30:47 +08:00
|
|
|
i);
|
|
|
|
dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
|
|
|
|
|
|
|
|
qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
|
2015-01-30 07:30:47 +08:00
|
|
|
i);
|
|
|
|
dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify that tcaches allocate cached regions. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
|
|
|
void *p0 = ps[i];
|
|
|
|
ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
|
2015-01-30 07:30:47 +08:00
|
|
|
i);
|
2021-10-19 08:33:15 +08:00
|
|
|
if (!san_uaf_detection_enabled()) {
|
|
|
|
expect_ptr_eq(ps[i], p0, "Expected mallocx() to "
|
|
|
|
"allocate cached region, i=%u", i);
|
|
|
|
}
|
2015-01-30 07:30:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify that reallocation uses cached regions. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
|
|
|
void *q0 = qs[i];
|
|
|
|
qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
|
2015-01-30 07:30:47 +08:00
|
|
|
i);
|
2021-10-19 08:33:15 +08:00
|
|
|
if (!san_uaf_detection_enabled()) {
|
|
|
|
expect_ptr_eq(qs[i], q0, "Expected rallocx() to "
|
|
|
|
"allocate cached region, i=%u", i);
|
|
|
|
}
|
2015-01-30 07:30:47 +08:00
|
|
|
/* Avoid undefined behavior in case of test failure. */
|
2017-01-16 08:56:30 +08:00
|
|
|
if (qs[i] == NULL) {
|
2015-01-30 07:30:47 +08:00
|
|
|
qs[i] = ps[i];
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-01-30 07:30:47 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
2015-01-30 07:30:47 +08:00
|
|
|
dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-01-30 07:30:47 +08:00
|
|
|
|
|
|
|
/* Flush some non-empty tcaches. */
|
|
|
|
for (i = 0; i < NTCACHES/2; i++) {
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
|
2015-01-30 07:30:47 +08:00
|
|
|
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
|
|
|
|
i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Destroy tcaches. */
|
|
|
|
for (i = 0; i < NTCACHES; i++) {
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
|
2016-10-28 12:31:25 +08:00
|
|
|
(void *)&tis[i], sizeof(unsigned)), 0,
|
|
|
|
"Unexpected mallctl() failure, i=%u", i);
|
2015-01-30 07:30:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_thread_arena) {
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
unsigned old_arena_ind, new_arena_ind, narenas;
|
2013-12-19 15:21:42 +08:00
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
const char *opa;
|
|
|
|
size_t sz = sizeof(opa);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0,
|
2017-06-01 07:45:14 +08:00
|
|
|
"Unexpected mallctl() failure");
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
|
|
|
|
sz = sizeof(unsigned);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
2016-10-28 12:31:25 +08:00
|
|
|
0, "Unexpected mallctl() failure");
|
2019-01-25 08:15:04 +08:00
|
|
|
if (opt_oversize_threshold != 0) {
|
2018-05-22 04:33:48 +08:00
|
|
|
narenas--;
|
|
|
|
}
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
if (strcmp(opa, "disabled") == 0) {
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
new_arena_ind = narenas - 1;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
(void *)&new_arena_ind, sizeof(unsigned)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
new_arena_ind = 0;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
(void *)&new_arena_ind, sizeof(unsigned)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
} else {
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure");
|
2017-06-01 07:45:14 +08:00
|
|
|
new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1;
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
if (old_arena_ind != new_arena_ind) {
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("thread.arena",
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
(void *)&old_arena_ind, &sz, (void *)&new_arena_ind,
|
|
|
|
sizeof(unsigned)), EPERM, "thread.arena ctl "
|
|
|
|
"should not be allowed with percpu arena");
|
|
|
|
}
|
|
|
|
}
|
2013-12-19 15:21:42 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arena_i_initialized) {
|
2017-01-05 02:21:53 +08:00
|
|
|
unsigned narenas, i;
|
|
|
|
size_t sz;
|
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen = sizeof(mib) / sizeof(size_t);
|
|
|
|
bool initialized;
|
|
|
|
|
|
|
|
sz = sizeof(narenas);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
2017-01-05 02:21:53 +08:00
|
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
|
2017-01-05 02:21:53 +08:00
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
mib[1] = i;
|
|
|
|
sz = sizeof(initialized);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
|
2017-01-05 02:21:53 +08:00
|
|
|
0), 0, "Unexpected mallctl() failure");
|
|
|
|
}
|
|
|
|
|
|
|
|
mib[1] = MALLCTL_ARENAS_ALL;
|
|
|
|
sz = sizeof(initialized);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
|
2017-01-05 02:21:53 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_true(initialized,
|
2017-01-05 02:21:53 +08:00
|
|
|
"Merged arena statistics should always be initialized");
|
2017-01-04 09:21:59 +08:00
|
|
|
|
|
|
|
/* Equivalent to the above but using mallctl() directly. */
|
|
|
|
sz = sizeof(initialized);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl(
|
2017-01-04 09:21:59 +08:00
|
|
|
"arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized",
|
|
|
|
(void *)&initialized, &sz, NULL, 0), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_true(initialized,
|
2017-01-04 09:21:59 +08:00
|
|
|
"Merged arena statistics should always be initialized");
|
2017-01-05 02:21:53 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_BEGIN(test_arena_i_dirty_decay_ms) {
|
|
|
|
ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
|
2016-02-20 12:09:31 +08:00
|
|
|
size_t sz = sizeof(ssize_t);
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arena.0.dirty_decay_ms",
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
dirty_decay_ms = -2;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Unexpected mallctl() success");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
dirty_decay_ms = 0x7fffffff;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
|
|
|
|
dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
|
|
|
|
dirty_decay_ms++) {
|
|
|
|
ssize_t old_dirty_decay_ms;
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arena.0.dirty_decay_ms",
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
|
|
|
|
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
|
2017-05-18 01:47:00 +08:00
|
|
|
"Unexpected old arena.0.dirty_decay_ms");
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_BEGIN(test_arena_i_muzzy_decay_ms) {
|
|
|
|
ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
size_t sz = sizeof(ssize_t);
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
muzzy_decay_ms = -2;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() success");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
muzzy_decay_ms = 0x7fffffff;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
|
|
|
|
muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
|
|
|
|
muzzy_decay_ms++) {
|
|
|
|
ssize_t old_muzzy_decay_ms;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
|
|
|
|
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
|
2017-05-18 01:47:00 +08:00
|
|
|
"Unexpected old arena.0.muzzy_decay_ms");
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arena_i_purge) {
|
2013-12-19 15:21:42 +08:00
|
|
|
unsigned narenas;
|
|
|
|
size_t sz = sizeof(unsigned);
|
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen = 3;
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
2016-10-28 12:31:25 +08:00
|
|
|
0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
mib[1] = narenas;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctlbymib() failure");
|
2017-01-03 23:27:42 +08:00
|
|
|
|
|
|
|
mib[1] = MALLCTL_ARENAS_ALL;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
2017-01-03 23:27:42 +08:00
|
|
|
"Unexpected mallctlbymib() failure");
|
2013-12-19 15:21:42 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arena_i_decay) {
|
2016-02-20 12:09:31 +08:00
|
|
|
unsigned narenas;
|
|
|
|
size_t sz = sizeof(unsigned);
|
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen = 3;
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
2016-10-28 12:31:25 +08:00
|
|
|
0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Unexpected mallctlnametomib() failure");
|
|
|
|
mib[1] = narenas;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Unexpected mallctlbymib() failure");
|
2017-01-03 23:27:42 +08:00
|
|
|
|
|
|
|
mib[1] = MALLCTL_ARENAS_ALL;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
2017-01-03 23:27:42 +08:00
|
|
|
"Unexpected mallctlbymib() failure");
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arena_i_dss) {
|
2013-12-19 15:21:42 +08:00
|
|
|
const char *dss_prec_old, *dss_prec_new;
|
|
|
|
size_t sz = sizeof(dss_prec_old);
|
2014-04-16 03:09:48 +08:00
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
|
2014-04-16 03:09:48 +08:00
|
|
|
"Unexpected mallctlnametomib() error");
|
2013-12-19 15:21:42 +08:00
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
dss_prec_new = "disabled";
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
|
2016-10-28 12:31:25 +08:00
|
|
|
(void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_str_ne(dss_prec_old, "primary",
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected default for dss precedence");
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
|
2016-10-28 12:31:25 +08:00
|
|
|
(void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
|
2014-08-16 03:20:20 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2016-10-28 12:31:25 +08:00
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
|
2016-10-28 12:31:25 +08:00
|
|
|
0), 0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_str_ne(dss_prec_old, "primary",
|
2014-08-16 03:20:20 +08:00
|
|
|
"Unexpected value for dss precedence");
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
mib[1] = narenas_total_get();
|
|
|
|
dss_prec_new = "disabled";
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
|
2016-10-28 12:31:25 +08:00
|
|
|
(void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_str_ne(dss_prec_old, "primary",
|
2014-04-16 03:09:48 +08:00
|
|
|
"Unexpected default for dss precedence");
|
2014-08-16 03:20:20 +08:00
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
|
2016-10-28 12:31:25 +08:00
|
|
|
(void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
|
2014-08-16 03:20:20 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2016-10-28 12:31:25 +08:00
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
|
2016-10-28 12:31:25 +08:00
|
|
|
0), 0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_str_ne(dss_prec_old, "primary",
|
2014-08-16 03:20:20 +08:00
|
|
|
"Unexpected value for dss precedence");
|
2013-12-19 15:21:42 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2022-09-02 07:42:56 +08:00
|
|
|
TEST_BEGIN(test_arena_i_name) {
|
|
|
|
unsigned arena_ind;
|
|
|
|
size_t ind_sz = sizeof(arena_ind);
|
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen;
|
|
|
|
char name_old[ARENA_NAME_LEN];
|
|
|
|
char *name_oldp = name_old;
|
|
|
|
size_t sz = sizeof(name_oldp);
|
|
|
|
char default_name[ARENA_NAME_LEN];
|
|
|
|
const char *name_new = "test name";
|
|
|
|
const char *super_long_name = "A name longer than ARENA_NAME_LEN";
|
|
|
|
size_t super_long_name_len = strlen(super_long_name);
|
|
|
|
assert(super_long_name_len > ARENA_NAME_LEN);
|
|
|
|
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
|
|
|
expect_d_eq(mallctlnametomib("arena.0.name", mib, &miblen), 0,
|
|
|
|
"Unexpected mallctlnametomib() error");
|
|
|
|
|
|
|
|
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &ind_sz, NULL,
|
|
|
|
0), 0, "Unexpected mallctl() failure");
|
|
|
|
mib[1] = arena_ind;
|
|
|
|
|
|
|
|
malloc_snprintf(default_name, sizeof(default_name), "manual_%u",
|
|
|
|
arena_ind);
|
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&name_oldp, &sz,
|
|
|
|
(void *)&name_new, sizeof(name_new)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
expect_str_eq(name_old, default_name,
|
|
|
|
"Unexpected default value for arena name");
|
|
|
|
|
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&name_oldp, &sz,
|
|
|
|
(void *)&super_long_name, sizeof(super_long_name)), 0,
|
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
expect_str_eq(name_old, name_new, "Unexpected value for arena name");
|
|
|
|
|
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, (void *)&name_oldp, &sz,
|
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure");
|
|
|
|
int cmp = strncmp(name_old, super_long_name, ARENA_NAME_LEN - 1);
|
|
|
|
expect_true(cmp == 0, "Unexpected value for long arena name ");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-11-03 08:48:39 +08:00
|
|
|
TEST_BEGIN(test_arena_i_retain_grow_limit) {
|
|
|
|
size_t old_limit, new_limit, default_limit;
|
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
bool retain_enabled;
|
|
|
|
size_t sz = sizeof(retain_enabled);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0),
|
2017-11-03 08:48:39 +08:00
|
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
test_skip_if(!retain_enabled);
|
|
|
|
|
|
|
|
sz = sizeof(default_limit);
|
|
|
|
miblen = sizeof(mib)/sizeof(size_t);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen),
|
2017-11-03 08:48:39 +08:00
|
|
|
0, "Unexpected mallctlnametomib() error");
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
|
2017-11-03 08:48:39 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(default_limit, SC_LARGE_MAXCLASS,
|
2017-11-03 08:48:39 +08:00
|
|
|
"Unexpected default for retain_grow_limit");
|
|
|
|
|
|
|
|
new_limit = PAGE - 1;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
|
2017-11-03 08:48:39 +08:00
|
|
|
sizeof(new_limit)), EFAULT, "Unexpected mallctl() success");
|
|
|
|
|
|
|
|
new_limit = PAGE + 1;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
|
2017-11-03 08:48:39 +08:00
|
|
|
sizeof(new_limit)), 0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
|
2017-11-03 08:48:39 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(old_limit, PAGE,
|
2017-11-03 08:48:39 +08:00
|
|
|
"Unexpected value for retain_grow_limit");
|
|
|
|
|
|
|
|
/* Expect grow less than psize class 10. */
|
|
|
|
new_limit = sz_pind2sz(10) - 1;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
|
2017-11-03 08:48:39 +08:00
|
|
|
sizeof(new_limit)), 0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
|
2017-11-03 08:48:39 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(old_limit, sz_pind2sz(9),
|
2017-11-03 08:48:39 +08:00
|
|
|
"Unexpected value for retain_grow_limit");
|
|
|
|
|
|
|
|
/* Restore to default. */
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit,
|
2017-11-03 08:48:39 +08:00
|
|
|
sizeof(default_limit)), 0, "Unexpected mallctl() failure");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_BEGIN(test_arenas_dirty_decay_ms) {
|
|
|
|
ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
|
2016-02-20 12:09:31 +08:00
|
|
|
size_t sz = sizeof(ssize_t);
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.dirty_decay_ms",
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
dirty_decay_ms = -2;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() success");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
dirty_decay_ms = 0x7fffffff;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Expected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
|
|
|
|
dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
|
|
|
|
dirty_decay_ms++) {
|
|
|
|
ssize_t old_dirty_decay_ms;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.dirty_decay_ms",
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
|
|
|
|
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
|
2017-05-18 01:47:00 +08:00
|
|
|
"Unexpected old arenas.dirty_decay_ms");
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_BEGIN(test_arenas_muzzy_decay_ms) {
|
|
|
|
ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
size_t sz = sizeof(ssize_t);
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.muzzy_decay_ms",
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
muzzy_decay_ms = -2;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Unexpected mallctl() success");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
muzzy_decay_ms = 0x7fffffff;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
|
2016-02-20 12:09:31 +08:00
|
|
|
"Expected mallctl() failure");
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
|
|
|
|
muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
|
|
|
|
muzzy_decay_ms++) {
|
|
|
|
ssize_t old_muzzy_decay_ms;
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.muzzy_decay_ms",
|
2017-05-18 01:47:00 +08:00
|
|
|
(void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
|
|
|
|
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
|
2017-05-18 01:47:00 +08:00
|
|
|
"Unexpected old arenas.muzzy_decay_ms");
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arenas_constants) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
|
2013-12-19 15:21:42 +08:00
|
|
|
t name; \
|
|
|
|
size_t sz = sizeof(t); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
|
2016-10-28 12:31:25 +08:00
|
|
|
0), 0, "Unexpected mallctl() failure"); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(name, expected, "Incorrect "#name" size"); \
|
2013-12-19 15:21:42 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
|
|
|
|
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
|
2017-12-15 04:46:39 +08:00
|
|
|
TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS);
|
|
|
|
TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS);
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
#undef TEST_ARENAS_CONSTANT
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arenas_bin_constants) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
|
2013-12-19 15:21:42 +08:00
|
|
|
t name; \
|
|
|
|
size_t sz = sizeof(t); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
|
2016-10-28 12:31:25 +08:00
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure"); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(name, expected, "Incorrect "#name" size"); \
|
2013-12-19 15:21:42 +08:00
|
|
|
} while (0)
|
|
|
|
|
2017-10-02 08:22:06 +08:00
|
|
|
TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size);
|
|
|
|
TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs);
|
2016-05-30 09:34:50 +08:00
|
|
|
TEST_ARENAS_BIN_CONSTANT(size_t, slab_size,
|
2017-10-02 08:22:06 +08:00
|
|
|
bin_infos[0].slab_size);
|
2018-11-22 03:17:31 +08:00
|
|
|
TEST_ARENAS_BIN_CONSTANT(uint32_t, nshards, bin_infos[0].n_shards);
|
2013-12-19 15:21:42 +08:00
|
|
|
|
|
|
|
#undef TEST_ARENAS_BIN_CONSTANT
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arenas_lextent_constants) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \
|
2014-10-13 13:53:59 +08:00
|
|
|
t name; \
|
|
|
|
size_t sz = sizeof(t); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \
|
2016-10-28 12:31:25 +08:00
|
|
|
&sz, NULL, 0), 0, "Unexpected mallctl() failure"); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_zu_eq(name, expected, "Incorrect "#name" size"); \
|
2014-10-13 13:53:59 +08:00
|
|
|
} while (0)
|
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
|
2018-07-12 07:05:58 +08:00
|
|
|
SC_LARGE_MINCLASS);
|
2014-10-13 13:53:59 +08:00
|
|
|
|
2016-06-02 04:53:56 +08:00
|
|
|
#undef TEST_ARENAS_LEXTENT_CONSTANT
|
2014-10-13 13:53:59 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_arenas_create) {
|
2013-12-19 15:21:42 +08:00
|
|
|
unsigned narenas_before, arena, narenas_after;
|
|
|
|
size_t sz = sizeof(unsigned);
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
|
2016-10-28 12:31:25 +08:00
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
|
2016-10-28 12:31:25 +08:00
|
|
|
0), 0, "Unexpected mallctl() failure");
|
2013-12-19 15:21:42 +08:00
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_u_eq(narenas_before+1, narenas_after,
|
2013-12-19 15:21:42 +08:00
|
|
|
"Unexpected number of arenas before versus after extension");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_u_eq(arena, narenas_after-1, "Unexpected arena index");
|
2013-12-19 15:21:42 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2018-05-02 02:31:09 +08:00
|
|
|
TEST_BEGIN(test_arenas_lookup) {
|
|
|
|
unsigned arena, arena1;
|
|
|
|
void *ptr;
|
|
|
|
size_t sz = sizeof(unsigned);
|
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
|
2018-05-02 02:31:09 +08:00
|
|
|
"Unexpected mallctl() failure");
|
|
|
|
ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_ptr_not_null(ptr, "Unexpected mallocx() failure");
|
|
|
|
expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
|
2018-05-02 02:31:09 +08:00
|
|
|
0, "Unexpected mallctl() failure");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_u_eq(arena, arena1, "Unexpected arena index");
|
2018-05-02 02:31:09 +08:00
|
|
|
dallocx(ptr, 0);
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2019-08-28 05:42:14 +08:00
|
|
|
TEST_BEGIN(test_prof_active) {
|
|
|
|
/*
|
|
|
|
* If config_prof is off, then the test for prof_active in
|
|
|
|
* test_mallctl_opt was already enough.
|
|
|
|
*/
|
|
|
|
test_skip_if(!config_prof);
|
2021-11-16 07:23:47 +08:00
|
|
|
test_skip_if(opt_prof);
|
2019-08-28 05:42:14 +08:00
|
|
|
|
|
|
|
bool active, old;
|
|
|
|
size_t len = sizeof(bool);
|
|
|
|
|
|
|
|
active = true;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), ENOENT,
|
2019-08-28 05:42:14 +08:00
|
|
|
"Setting prof_active to true should fail when opt_prof is off");
|
|
|
|
old = true;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("prof.active", &old, &len, &active, len), ENOENT,
|
2019-08-28 05:42:14 +08:00
|
|
|
"Setting prof_active to true should fail when opt_prof is off");
|
2021-09-09 01:58:04 +08:00
|
|
|
expect_true(old, "old value should not be touched when mallctl fails");
|
2019-08-28 05:42:14 +08:00
|
|
|
active = false;
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), 0,
|
2019-08-28 05:42:14 +08:00
|
|
|
"Setting prof_active to false should succeed when opt_prof is off");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("prof.active", &old, &len, &active, len), 0,
|
2019-08-28 05:42:14 +08:00
|
|
|
"Setting prof_active to false should succeed when opt_prof is off");
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_false(old, "prof_active should be false when opt_prof is off");
|
2019-08-28 05:42:14 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
TEST_BEGIN(test_stats_arenas) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define TEST_STATS_ARENAS(t, name) do { \
|
2013-12-19 15:21:42 +08:00
|
|
|
t name; \
|
|
|
|
size_t sz = sizeof(t); \
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
|
2016-10-28 12:31:25 +08:00
|
|
|
NULL, 0), 0, "Unexpected mallctl() failure"); \
|
2013-12-19 15:21:42 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
TEST_STATS_ARENAS(unsigned, nthreads);
|
2016-02-28 12:40:13 +08:00
|
|
|
TEST_STATS_ARENAS(const char *, dss);
|
2017-05-18 01:47:00 +08:00
|
|
|
TEST_STATS_ARENAS(ssize_t, dirty_decay_ms);
|
|
|
|
TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms);
|
2013-12-19 15:21:42 +08:00
|
|
|
TEST_STATS_ARENAS(size_t, pactive);
|
|
|
|
TEST_STATS_ARENAS(size_t, pdirty);
|
|
|
|
|
|
|
|
#undef TEST_STATS_ARENAS
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2018-04-25 05:45:41 +08:00
|
|
|
static void
|
|
|
|
alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result,
|
|
|
|
UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) {
|
|
|
|
*(bool *)extra = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dalloc_hook(void *extra, UNUSED hook_dalloc_t type,
|
|
|
|
UNUSED void *address, UNUSED uintptr_t args_raw[3]) {
|
|
|
|
*(bool *)extra = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_BEGIN(test_hooks) {
|
|
|
|
bool hook_called = false;
|
|
|
|
hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
|
|
|
|
void *handle = NULL;
|
|
|
|
size_t sz = sizeof(handle);
|
|
|
|
int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
|
|
|
|
sizeof(hooks));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "Hook installation failed");
|
|
|
|
expect_ptr_ne(handle, NULL, "Hook installation gave null handle");
|
2018-04-25 05:45:41 +08:00
|
|
|
void *ptr = mallocx(1, 0);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_true(hook_called, "Alloc hook not called");
|
2018-04-25 05:45:41 +08:00
|
|
|
hook_called = false;
|
|
|
|
free(ptr);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_true(hook_called, "Free hook not called");
|
2018-04-25 05:45:41 +08:00
|
|
|
|
|
|
|
err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
|
|
|
|
sizeof(handle));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "Hook removal failed");
|
2018-04-25 05:45:41 +08:00
|
|
|
hook_called = false;
|
|
|
|
ptr = mallocx(1, 0);
|
|
|
|
free(ptr);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_false(hook_called, "Hook called after removal");
|
2018-04-25 05:45:41 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2018-05-01 07:24:36 +08:00
|
|
|
TEST_BEGIN(test_hooks_exhaustion) {
|
|
|
|
bool hook_called = false;
|
|
|
|
hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
|
|
|
|
|
|
|
|
void *handle;
|
|
|
|
void *handles[HOOK_MAX];
|
|
|
|
size_t sz = sizeof(handle);
|
|
|
|
int err;
|
|
|
|
for (int i = 0; i < HOOK_MAX; i++) {
|
|
|
|
handle = NULL;
|
|
|
|
err = mallctl("experimental.hooks.install", &handle, &sz,
|
|
|
|
&hooks, sizeof(hooks));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "Error installation hooks");
|
|
|
|
expect_ptr_ne(handle, NULL, "Got NULL handle");
|
2018-05-01 07:24:36 +08:00
|
|
|
handles[i] = handle;
|
|
|
|
}
|
|
|
|
err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
|
|
|
|
sizeof(hooks));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, EAGAIN, "Should have failed hook installation");
|
2018-05-01 07:24:36 +08:00
|
|
|
for (int i = 0; i < HOOK_MAX; i++) {
|
|
|
|
err = mallctl("experimental.hooks.remove", NULL, NULL,
|
|
|
|
&handles[i], sizeof(handles[i]));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "Hook removal failed");
|
2018-05-01 07:24:36 +08:00
|
|
|
}
|
|
|
|
/* Insertion failed, but then we removed some; it should work now. */
|
|
|
|
handle = NULL;
|
|
|
|
err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
|
|
|
|
sizeof(hooks));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "Hook insertion failed");
|
|
|
|
expect_ptr_ne(handle, NULL, "Got NULL handle");
|
2018-05-01 07:24:36 +08:00
|
|
|
err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
|
|
|
|
sizeof(handle));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "Hook removal failed");
|
2018-05-01 07:24:36 +08:00
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2020-01-23 03:13:26 +08:00
|
|
|
TEST_BEGIN(test_thread_idle) {
|
|
|
|
/*
|
|
|
|
* We're cheating a little bit in this test, and inferring things about
|
|
|
|
* implementation internals (like tcache details). We have to;
|
|
|
|
* thread.idle has no guaranteed effects. We need stats to make these
|
|
|
|
* inferences.
|
|
|
|
*/
|
|
|
|
test_skip_if(!config_stats);
|
|
|
|
|
|
|
|
int err;
|
|
|
|
size_t sz;
|
|
|
|
size_t miblen;
|
|
|
|
|
|
|
|
bool tcache_enabled = false;
|
|
|
|
sz = sizeof(tcache_enabled);
|
|
|
|
err = mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL, 0);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "");
|
2020-01-23 03:13:26 +08:00
|
|
|
test_skip_if(!tcache_enabled);
|
|
|
|
|
|
|
|
size_t tcache_max;
|
|
|
|
sz = sizeof(tcache_max);
|
|
|
|
err = mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, 0);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "");
|
2020-01-23 03:13:26 +08:00
|
|
|
test_skip_if(tcache_max == 0);
|
|
|
|
|
|
|
|
unsigned arena_ind;
|
|
|
|
sz = sizeof(arena_ind);
|
|
|
|
err = mallctl("thread.arena", &arena_ind, &sz, NULL, 0);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "");
|
2020-01-23 03:13:26 +08:00
|
|
|
|
|
|
|
/* We're going to do an allocation of size 1, which we know is small. */
|
|
|
|
size_t mib[5];
|
|
|
|
miblen = sizeof(mib)/sizeof(mib[0]);
|
|
|
|
err = mallctlnametomib("stats.arenas.0.small.ndalloc", mib, &miblen);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "");
|
2020-01-23 03:13:26 +08:00
|
|
|
mib[2] = arena_ind;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This alloc and dalloc should leave something in the tcache, in a
|
|
|
|
* small size's cache bin.
|
|
|
|
*/
|
|
|
|
void *ptr = mallocx(1, 0);
|
|
|
|
dallocx(ptr, 0);
|
|
|
|
|
|
|
|
uint64_t epoch;
|
|
|
|
err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "");
|
2020-01-23 03:13:26 +08:00
|
|
|
|
|
|
|
uint64_t small_dalloc_pre_idle;
|
|
|
|
sz = sizeof(small_dalloc_pre_idle);
|
|
|
|
err = mallctlbymib(mib, miblen, &small_dalloc_pre_idle, &sz, NULL, 0);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "");
|
2020-01-23 03:13:26 +08:00
|
|
|
|
|
|
|
err = mallctl("thread.idle", NULL, NULL, NULL, 0);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "");
|
2020-01-23 03:13:26 +08:00
|
|
|
|
|
|
|
err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "");
|
2020-01-23 03:13:26 +08:00
|
|
|
|
|
|
|
uint64_t small_dalloc_post_idle;
|
|
|
|
sz = sizeof(small_dalloc_post_idle);
|
|
|
|
err = mallctlbymib(mib, miblen, &small_dalloc_post_idle, &sz, NULL, 0);
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_d_eq(err, 0, "");
|
2020-01-23 03:13:26 +08:00
|
|
|
|
2020-02-19 06:39:06 +08:00
|
|
|
expect_u64_lt(small_dalloc_pre_idle, small_dalloc_post_idle,
|
2020-01-23 03:13:26 +08:00
|
|
|
"Purge didn't flush the tcache");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2020-05-28 05:31:00 +08:00
|
|
|
TEST_BEGIN(test_thread_peak) {
|
|
|
|
test_skip_if(!config_stats);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't commit to any stable amount of accuracy for peak tracking
|
|
|
|
* (in practice, when this test was written, we made sure to be within
|
|
|
|
* 100k). But 10MB is big for more or less any definition of big.
|
|
|
|
*/
|
|
|
|
size_t big_size = 10 * 1024 * 1024;
|
|
|
|
size_t small_size = 256;
|
|
|
|
|
|
|
|
void *ptr;
|
|
|
|
int err;
|
|
|
|
size_t sz;
|
|
|
|
uint64_t peak;
|
|
|
|
sz = sizeof(uint64_t);
|
|
|
|
|
|
|
|
err = mallctl("thread.peak.reset", NULL, NULL, NULL, 0);
|
|
|
|
expect_d_eq(err, 0, "");
|
|
|
|
ptr = mallocx(SC_SMALL_MAXCLASS, 0);
|
|
|
|
err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
|
|
|
|
expect_d_eq(err, 0, "");
|
|
|
|
expect_u64_eq(peak, SC_SMALL_MAXCLASS, "Missed an update");
|
|
|
|
free(ptr);
|
|
|
|
err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
|
|
|
|
expect_d_eq(err, 0, "");
|
|
|
|
expect_u64_eq(peak, SC_SMALL_MAXCLASS, "Freeing changed peak");
|
|
|
|
ptr = mallocx(big_size, 0);
|
|
|
|
free(ptr);
|
|
|
|
/*
|
|
|
|
* The peak should have hit big_size in the last two lines, even though
|
|
|
|
* the net allocated bytes has since dropped back down to zero. We
|
|
|
|
* should have noticed the peak change without having down any mallctl
|
|
|
|
* calls while net allocated bytes was high.
|
|
|
|
*/
|
|
|
|
err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
|
|
|
|
expect_d_eq(err, 0, "");
|
|
|
|
expect_u64_ge(peak, big_size, "Missed a peak change.");
|
|
|
|
|
|
|
|
/* Allocate big_size, but using small allocations. */
|
|
|
|
size_t nallocs = big_size / small_size;
|
|
|
|
void **ptrs = calloc(nallocs, sizeof(void *));
|
|
|
|
err = mallctl("thread.peak.reset", NULL, NULL, NULL, 0);
|
|
|
|
expect_d_eq(err, 0, "");
|
|
|
|
err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
|
|
|
|
expect_d_eq(err, 0, "");
|
|
|
|
expect_u64_eq(0, peak, "Missed a reset.");
|
|
|
|
for (size_t i = 0; i < nallocs; i++) {
|
|
|
|
ptrs[i] = mallocx(small_size, 0);
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < nallocs; i++) {
|
|
|
|
free(ptrs[i]);
|
|
|
|
}
|
|
|
|
err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
|
|
|
|
expect_d_eq(err, 0, "");
|
|
|
|
/*
|
|
|
|
* We don't guarantee exactness; make sure we're within 10% of the peak,
|
|
|
|
* though.
|
|
|
|
*/
|
|
|
|
expect_u64_ge(peak, nallocx(small_size, 0) * nallocs * 9 / 10,
|
|
|
|
"Missed some peak changes.");
|
|
|
|
expect_u64_le(peak, nallocx(small_size, 0) * nallocs * 11 / 10,
|
|
|
|
"Overcounted peak changes.");
|
|
|
|
free(ptrs);
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2020-10-31 07:31:32 +08:00
|
|
|
typedef struct activity_test_data_s activity_test_data_t;
|
|
|
|
struct activity_test_data_s {
|
|
|
|
uint64_t obtained_alloc;
|
|
|
|
uint64_t obtained_dalloc;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
activity_test_callback(void *uctx, uint64_t alloc, uint64_t dalloc) {
|
|
|
|
activity_test_data_t *test_data = (activity_test_data_t *)uctx;
|
|
|
|
test_data->obtained_alloc = alloc;
|
|
|
|
test_data->obtained_dalloc = dalloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_BEGIN(test_thread_activity_callback) {
|
|
|
|
test_skip_if(!config_stats);
|
|
|
|
|
|
|
|
const size_t big_size = 10 * 1024 * 1024;
|
|
|
|
void *ptr;
|
|
|
|
int err;
|
|
|
|
size_t sz;
|
|
|
|
|
|
|
|
uint64_t *allocatedp;
|
|
|
|
uint64_t *deallocatedp;
|
|
|
|
sz = sizeof(allocatedp);
|
|
|
|
err = mallctl("thread.allocatedp", &allocatedp, &sz, NULL, 0);
|
|
|
|
assert_d_eq(0, err, "");
|
|
|
|
err = mallctl("thread.deallocatedp", &deallocatedp, &sz, NULL, 0);
|
|
|
|
assert_d_eq(0, err, "");
|
|
|
|
|
|
|
|
activity_callback_thunk_t old_thunk = {(activity_callback_t)111,
|
|
|
|
(void *)222};
|
|
|
|
|
|
|
|
activity_test_data_t test_data = {333, 444};
|
|
|
|
activity_callback_thunk_t new_thunk =
|
|
|
|
{&activity_test_callback, &test_data};
|
|
|
|
|
|
|
|
sz = sizeof(old_thunk);
|
|
|
|
err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
|
|
|
|
&new_thunk, sizeof(new_thunk));
|
|
|
|
assert_d_eq(0, err, "");
|
|
|
|
|
|
|
|
expect_true(old_thunk.callback == NULL, "Callback already installed");
|
|
|
|
expect_true(old_thunk.uctx == NULL, "Callback data already installed");
|
|
|
|
|
|
|
|
ptr = mallocx(big_size, 0);
|
|
|
|
expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
|
|
|
|
expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
|
|
|
|
|
|
|
|
free(ptr);
|
|
|
|
expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
|
|
|
|
expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
|
|
|
|
|
|
|
|
sz = sizeof(old_thunk);
|
|
|
|
new_thunk = (activity_callback_thunk_t){ NULL, NULL };
|
|
|
|
err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
|
|
|
|
&new_thunk, sizeof(new_thunk));
|
|
|
|
assert_d_eq(0, err, "");
|
|
|
|
|
|
|
|
expect_true(old_thunk.callback == &activity_test_callback, "");
|
|
|
|
expect_true(old_thunk.uctx == &test_data, "");
|
|
|
|
|
|
|
|
/* Inserting NULL should have turned off tracking. */
|
|
|
|
test_data.obtained_alloc = 333;
|
|
|
|
test_data.obtained_dalloc = 444;
|
|
|
|
ptr = mallocx(big_size, 0);
|
|
|
|
free(ptr);
|
|
|
|
expect_u64_eq(333, test_data.obtained_alloc, "");
|
|
|
|
expect_u64_eq(444, test_data.obtained_dalloc, "");
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
2013-12-19 15:21:42 +08:00
|
|
|
int
|
2017-01-16 08:56:30 +08:00
|
|
|
main(void) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return test(
|
2013-12-19 15:21:42 +08:00
|
|
|
test_mallctl_errors,
|
|
|
|
test_mallctlnametomib_errors,
|
|
|
|
test_mallctlbymib_errors,
|
|
|
|
test_mallctl_read_write,
|
|
|
|
test_mallctlnametomib_short_mib,
|
2020-08-14 04:26:44 +08:00
|
|
|
test_mallctlnametomib_short_name,
|
2020-08-14 02:28:22 +08:00
|
|
|
test_mallctlmibnametomib,
|
2020-08-14 06:26:46 +08:00
|
|
|
test_mallctlbymibname,
|
2013-12-19 15:21:42 +08:00
|
|
|
test_mallctl_config,
|
|
|
|
test_mallctl_opt,
|
|
|
|
test_manpage_example,
|
2015-01-30 07:30:47 +08:00
|
|
|
test_tcache_none,
|
|
|
|
test_tcache,
|
2013-12-19 15:21:42 +08:00
|
|
|
test_thread_arena,
|
2017-01-05 02:21:53 +08:00
|
|
|
test_arena_i_initialized,
|
2017-05-18 01:47:00 +08:00
|
|
|
test_arena_i_dirty_decay_ms,
|
|
|
|
test_arena_i_muzzy_decay_ms,
|
2013-12-19 15:21:42 +08:00
|
|
|
test_arena_i_purge,
|
2016-02-20 12:09:31 +08:00
|
|
|
test_arena_i_decay,
|
2013-12-19 15:21:42 +08:00
|
|
|
test_arena_i_dss,
|
2022-09-02 07:42:56 +08:00
|
|
|
test_arena_i_name,
|
2017-11-03 08:48:39 +08:00
|
|
|
test_arena_i_retain_grow_limit,
|
2017-05-18 01:47:00 +08:00
|
|
|
test_arenas_dirty_decay_ms,
|
|
|
|
test_arenas_muzzy_decay_ms,
|
2013-12-19 15:21:42 +08:00
|
|
|
test_arenas_constants,
|
|
|
|
test_arenas_bin_constants,
|
2016-06-01 05:50:21 +08:00
|
|
|
test_arenas_lextent_constants,
|
2017-01-04 00:21:29 +08:00
|
|
|
test_arenas_create,
|
2018-05-02 02:31:09 +08:00
|
|
|
test_arenas_lookup,
|
2019-08-28 05:42:14 +08:00
|
|
|
test_prof_active,
|
2018-04-25 05:45:41 +08:00
|
|
|
test_stats_arenas,
|
2018-05-01 07:24:36 +08:00
|
|
|
test_hooks,
|
2020-01-23 03:13:26 +08:00
|
|
|
test_hooks_exhaustion,
|
2020-05-28 05:31:00 +08:00
|
|
|
test_thread_idle,
|
2020-10-31 07:31:32 +08:00
|
|
|
test_thread_peak,
|
|
|
|
test_thread_activity_callback);
|
2013-12-19 15:21:42 +08:00
|
|
|
}
|