Change assert_* to expect_* in tests

```
grep -Irl assert_ test/ | xargs sed -i \
    's/witness_assert/witness_do_not_replace/g';
grep -Irl assert_ test/ | xargs sed -i \
    's/malloc_mutex_assert_owner/malloc_mutex_do_not_replace_owner/g';

grep -Ir assert_ test/ | grep -o "[_a-zA-Z]*assert_[_a-zA-Z]*" | \
    grep -v "^assert_"; # confirm no output
grep -Irl assert_ test/ | xargs sed -i 's/assert_/expect_/g';

grep -Irl witness_do_not_replace test/ | xargs sed -i \
    's/witness_do_not_replace/witness_assert/g';
grep -Irl malloc_mutex_do_not_replace_owner test/ | xargs sed -i \
    's/malloc_mutex_do_not_replace_owner/malloc_mutex_assert_owner/g';
```
This commit is contained in:
Yinan Zhang 2020-02-18 14:39:06 -08:00
parent 162c2bcf31
commit 21dfa4300d
85 changed files with 1854 additions and 1854 deletions

View File

@ -25,6 +25,6 @@ btalloc_##n(size_t size, unsigned bits) { \
} \
} \
/* Intentionally sabotage tail call optimization. */ \
assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
expect_ptr_not_null(p, "Unexpected mallocx() failure"); \
return p; \
}

View File

@ -86,9 +86,9 @@ extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
"*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks,
new_addr, size, alignment, *zero ? "true" : "false", *commit ?
"true" : "false", arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->alloc, extent_alloc_hook,
expect_ptr_eq(extent_hooks->alloc, extent_alloc_hook,
"Wrong hook function");
called_alloc = true;
if (!try_alloc) {
@ -108,9 +108,9 @@ extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
"arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
"true" : "false", arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook,
expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook,
"Wrong hook function");
called_dalloc = true;
if (!try_dalloc) {
@ -127,9 +127,9 @@ extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
"arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
"true" : "false", arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->destroy, extent_destroy_hook,
expect_ptr_eq(extent_hooks->destroy, extent_destroy_hook,
"Wrong hook function");
called_destroy = true;
if (!try_destroy) {
@ -147,9 +147,9 @@ extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
"length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size,
offset, length, arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->commit, extent_commit_hook,
expect_ptr_eq(extent_hooks->commit, extent_commit_hook,
"Wrong hook function");
called_commit = true;
if (!try_commit) {
@ -169,9 +169,9 @@ extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
"length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size,
offset, length, arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->decommit, extent_decommit_hook,
expect_ptr_eq(extent_hooks->decommit, extent_decommit_hook,
"Wrong hook function");
called_decommit = true;
if (!try_decommit) {
@ -191,9 +191,9 @@ extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
"length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size,
offset, length, arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook,
expect_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook,
"Wrong hook function");
called_purge_lazy = true;
if (!try_purge_lazy) {
@ -214,9 +214,9 @@ extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
"length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size,
offset, length, arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook,
expect_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook,
"Wrong hook function");
called_purge_forced = true;
if (!try_purge_forced) {
@ -238,9 +238,9 @@ extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
"size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks,
addr, size, size_a, size_b, committed ? "true" : "false",
arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->split, extent_split_hook,
expect_ptr_eq(extent_hooks->split, extent_split_hook,
"Wrong hook function");
called_split = true;
if (!try_split) {
@ -262,11 +262,11 @@ extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
"size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks,
addr_a, size_a, addr_b, size_b, committed ? "true" : "false",
arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->merge, extent_merge_hook,
expect_ptr_eq(extent_hooks->merge, extent_merge_hook,
"Wrong hook function");
assert_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b,
expect_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b,
"Extents not mergeable");
called_merge = true;
if (!try_merge) {
@ -284,6 +284,6 @@ extent_hooks_prep(void) {
size_t sz;
sz = sizeof(default_hooks);
assert_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz,
expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz,
NULL, 0), 0, "Unexpected mallctl() error");
}

View File

@ -139,7 +139,7 @@ static const bool config_debug =
#undef assert
#undef not_reached
#undef not_implemented
#undef assert_not_implemented
#undef expect_not_implemented
#define assert(e) do { \
if (!(e)) { \
@ -163,7 +163,7 @@ static const bool config_debug =
abort(); \
} while (0)
#define assert_not_implemented(e) do { \
#define expect_not_implemented(e) do { \
if (!(e)) { \
not_implemented(); \
} \

View File

@ -1,6 +1,6 @@
#define ASSERT_BUFSIZE 256
#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \
#define expect_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \
const t a_ = (a); \
const t b_ = (b); \
if (!(a_ cmp b_)) { \
@ -17,200 +17,200 @@
} \
} while (0)
#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \
#define expect_ptr_eq(a, b, ...) expect_cmp(void *, a, b, ==, \
!=, "p", __VA_ARGS__)
#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \
#define expect_ptr_ne(a, b, ...) expect_cmp(void *, a, b, !=, \
==, "p", __VA_ARGS__)
#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \
#define expect_ptr_null(a, ...) expect_cmp(void *, a, NULL, ==, \
!=, "p", __VA_ARGS__)
#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \
#define expect_ptr_not_null(a, ...) expect_cmp(void *, a, NULL, !=, \
==, "p", __VA_ARGS__)
#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
#define expect_c_eq(a, b, ...) expect_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
#define expect_c_ne(a, b, ...) expect_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
#define expect_c_lt(a, b, ...) expect_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
#define expect_c_le(a, b, ...) expect_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
#define expect_c_ge(a, b, ...) expect_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
#define expect_c_gt(a, b, ...) expect_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
#define expect_x_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
#define expect_x_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
#define expect_x_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
#define expect_x_le(a, b, ...) expect_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
#define expect_x_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
#define expect_x_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
#define expect_d_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
#define expect_d_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
#define expect_d_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
#define expect_d_le(a, b, ...) expect_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
#define expect_d_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
#define expect_d_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
#define expect_u_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
#define expect_u_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
#define expect_u_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
#define expect_u_le(a, b, ...) expect_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
#define expect_u_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
#define expect_u_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \
#define expect_ld_eq(a, b, ...) expect_cmp(long, a, b, ==, \
!=, "ld", __VA_ARGS__)
#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \
#define expect_ld_ne(a, b, ...) expect_cmp(long, a, b, !=, \
==, "ld", __VA_ARGS__)
#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \
#define expect_ld_lt(a, b, ...) expect_cmp(long, a, b, <, \
>=, "ld", __VA_ARGS__)
#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \
#define expect_ld_le(a, b, ...) expect_cmp(long, a, b, <=, \
>, "ld", __VA_ARGS__)
#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \
#define expect_ld_ge(a, b, ...) expect_cmp(long, a, b, >=, \
<, "ld", __VA_ARGS__)
#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \
#define expect_ld_gt(a, b, ...) expect_cmp(long, a, b, >, \
<=, "ld", __VA_ARGS__)
#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \
#define expect_lu_eq(a, b, ...) expect_cmp(unsigned long, \
a, b, ==, !=, "lu", __VA_ARGS__)
#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \
#define expect_lu_ne(a, b, ...) expect_cmp(unsigned long, \
a, b, !=, ==, "lu", __VA_ARGS__)
#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \
#define expect_lu_lt(a, b, ...) expect_cmp(unsigned long, \
a, b, <, >=, "lu", __VA_ARGS__)
#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \
#define expect_lu_le(a, b, ...) expect_cmp(unsigned long, \
a, b, <=, >, "lu", __VA_ARGS__)
#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \
#define expect_lu_ge(a, b, ...) expect_cmp(unsigned long, \
a, b, >=, <, "lu", __VA_ARGS__)
#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \
#define expect_lu_gt(a, b, ...) expect_cmp(unsigned long, \
a, b, >, <=, "lu", __VA_ARGS__)
#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \
#define expect_qd_eq(a, b, ...) expect_cmp(long long, a, b, ==, \
!=, "qd", __VA_ARGS__)
#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \
#define expect_qd_ne(a, b, ...) expect_cmp(long long, a, b, !=, \
==, "qd", __VA_ARGS__)
#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \
#define expect_qd_lt(a, b, ...) expect_cmp(long long, a, b, <, \
>=, "qd", __VA_ARGS__)
#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \
#define expect_qd_le(a, b, ...) expect_cmp(long long, a, b, <=, \
>, "qd", __VA_ARGS__)
#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \
#define expect_qd_ge(a, b, ...) expect_cmp(long long, a, b, >=, \
<, "qd", __VA_ARGS__)
#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \
#define expect_qd_gt(a, b, ...) expect_cmp(long long, a, b, >, \
<=, "qd", __VA_ARGS__)
#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \
#define expect_qu_eq(a, b, ...) expect_cmp(unsigned long long, \
a, b, ==, !=, "qu", __VA_ARGS__)
#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \
#define expect_qu_ne(a, b, ...) expect_cmp(unsigned long long, \
a, b, !=, ==, "qu", __VA_ARGS__)
#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \
#define expect_qu_lt(a, b, ...) expect_cmp(unsigned long long, \
a, b, <, >=, "qu", __VA_ARGS__)
#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \
#define expect_qu_le(a, b, ...) expect_cmp(unsigned long long, \
a, b, <=, >, "qu", __VA_ARGS__)
#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \
#define expect_qu_ge(a, b, ...) expect_cmp(unsigned long long, \
a, b, >=, <, "qu", __VA_ARGS__)
#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \
#define expect_qu_gt(a, b, ...) expect_cmp(unsigned long long, \
a, b, >, <=, "qu", __VA_ARGS__)
#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \
#define expect_jd_eq(a, b, ...) expect_cmp(intmax_t, a, b, ==, \
!=, "jd", __VA_ARGS__)
#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \
#define expect_jd_ne(a, b, ...) expect_cmp(intmax_t, a, b, !=, \
==, "jd", __VA_ARGS__)
#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \
#define expect_jd_lt(a, b, ...) expect_cmp(intmax_t, a, b, <, \
>=, "jd", __VA_ARGS__)
#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \
#define expect_jd_le(a, b, ...) expect_cmp(intmax_t, a, b, <=, \
>, "jd", __VA_ARGS__)
#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \
#define expect_jd_ge(a, b, ...) expect_cmp(intmax_t, a, b, >=, \
<, "jd", __VA_ARGS__)
#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \
#define expect_jd_gt(a, b, ...) expect_cmp(intmax_t, a, b, >, \
<=, "jd", __VA_ARGS__)
#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \
#define expect_ju_eq(a, b, ...) expect_cmp(uintmax_t, a, b, ==, \
!=, "ju", __VA_ARGS__)
#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \
#define expect_ju_ne(a, b, ...) expect_cmp(uintmax_t, a, b, !=, \
==, "ju", __VA_ARGS__)
#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \
#define expect_ju_lt(a, b, ...) expect_cmp(uintmax_t, a, b, <, \
>=, "ju", __VA_ARGS__)
#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \
#define expect_ju_le(a, b, ...) expect_cmp(uintmax_t, a, b, <=, \
>, "ju", __VA_ARGS__)
#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \
#define expect_ju_ge(a, b, ...) expect_cmp(uintmax_t, a, b, >=, \
<, "ju", __VA_ARGS__)
#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \
#define expect_ju_gt(a, b, ...) expect_cmp(uintmax_t, a, b, >, \
<=, "ju", __VA_ARGS__)
#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \
#define expect_zd_eq(a, b, ...) expect_cmp(ssize_t, a, b, ==, \
!=, "zd", __VA_ARGS__)
#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \
#define expect_zd_ne(a, b, ...) expect_cmp(ssize_t, a, b, !=, \
==, "zd", __VA_ARGS__)
#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \
#define expect_zd_lt(a, b, ...) expect_cmp(ssize_t, a, b, <, \
>=, "zd", __VA_ARGS__)
#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \
#define expect_zd_le(a, b, ...) expect_cmp(ssize_t, a, b, <=, \
>, "zd", __VA_ARGS__)
#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \
#define expect_zd_ge(a, b, ...) expect_cmp(ssize_t, a, b, >=, \
<, "zd", __VA_ARGS__)
#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \
#define expect_zd_gt(a, b, ...) expect_cmp(ssize_t, a, b, >, \
<=, "zd", __VA_ARGS__)
#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \
#define expect_zu_eq(a, b, ...) expect_cmp(size_t, a, b, ==, \
!=, "zu", __VA_ARGS__)
#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \
#define expect_zu_ne(a, b, ...) expect_cmp(size_t, a, b, !=, \
==, "zu", __VA_ARGS__)
#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \
#define expect_zu_lt(a, b, ...) expect_cmp(size_t, a, b, <, \
>=, "zu", __VA_ARGS__)
#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \
#define expect_zu_le(a, b, ...) expect_cmp(size_t, a, b, <=, \
>, "zu", __VA_ARGS__)
#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \
#define expect_zu_ge(a, b, ...) expect_cmp(size_t, a, b, >=, \
<, "zu", __VA_ARGS__)
#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \
#define expect_zu_gt(a, b, ...) expect_cmp(size_t, a, b, >, \
<=, "zu", __VA_ARGS__)
#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \
#define expect_d32_eq(a, b, ...) expect_cmp(int32_t, a, b, ==, \
!=, FMTd32, __VA_ARGS__)
#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \
#define expect_d32_ne(a, b, ...) expect_cmp(int32_t, a, b, !=, \
==, FMTd32, __VA_ARGS__)
#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \
#define expect_d32_lt(a, b, ...) expect_cmp(int32_t, a, b, <, \
>=, FMTd32, __VA_ARGS__)
#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \
#define expect_d32_le(a, b, ...) expect_cmp(int32_t, a, b, <=, \
>, FMTd32, __VA_ARGS__)
#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \
#define expect_d32_ge(a, b, ...) expect_cmp(int32_t, a, b, >=, \
<, FMTd32, __VA_ARGS__)
#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \
#define expect_d32_gt(a, b, ...) expect_cmp(int32_t, a, b, >, \
<=, FMTd32, __VA_ARGS__)
#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \
#define expect_u32_eq(a, b, ...) expect_cmp(uint32_t, a, b, ==, \
!=, FMTu32, __VA_ARGS__)
#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \
#define expect_u32_ne(a, b, ...) expect_cmp(uint32_t, a, b, !=, \
==, FMTu32, __VA_ARGS__)
#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \
#define expect_u32_lt(a, b, ...) expect_cmp(uint32_t, a, b, <, \
>=, FMTu32, __VA_ARGS__)
#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \
#define expect_u32_le(a, b, ...) expect_cmp(uint32_t, a, b, <=, \
>, FMTu32, __VA_ARGS__)
#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \
#define expect_u32_ge(a, b, ...) expect_cmp(uint32_t, a, b, >=, \
<, FMTu32, __VA_ARGS__)
#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \
#define expect_u32_gt(a, b, ...) expect_cmp(uint32_t, a, b, >, \
<=, FMTu32, __VA_ARGS__)
#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \
#define expect_d64_eq(a, b, ...) expect_cmp(int64_t, a, b, ==, \
!=, FMTd64, __VA_ARGS__)
#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \
#define expect_d64_ne(a, b, ...) expect_cmp(int64_t, a, b, !=, \
==, FMTd64, __VA_ARGS__)
#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \
#define expect_d64_lt(a, b, ...) expect_cmp(int64_t, a, b, <, \
>=, FMTd64, __VA_ARGS__)
#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \
#define expect_d64_le(a, b, ...) expect_cmp(int64_t, a, b, <=, \
>, FMTd64, __VA_ARGS__)
#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \
#define expect_d64_ge(a, b, ...) expect_cmp(int64_t, a, b, >=, \
<, FMTd64, __VA_ARGS__)
#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \
#define expect_d64_gt(a, b, ...) expect_cmp(int64_t, a, b, >, \
<=, FMTd64, __VA_ARGS__)
#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \
#define expect_u64_eq(a, b, ...) expect_cmp(uint64_t, a, b, ==, \
!=, FMTu64, __VA_ARGS__)
#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \
#define expect_u64_ne(a, b, ...) expect_cmp(uint64_t, a, b, !=, \
==, FMTu64, __VA_ARGS__)
#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \
#define expect_u64_lt(a, b, ...) expect_cmp(uint64_t, a, b, <, \
>=, FMTu64, __VA_ARGS__)
#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \
#define expect_u64_le(a, b, ...) expect_cmp(uint64_t, a, b, <=, \
>, FMTu64, __VA_ARGS__)
#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \
#define expect_u64_ge(a, b, ...) expect_cmp(uint64_t, a, b, >=, \
<, FMTu64, __VA_ARGS__)
#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \
#define expect_u64_gt(a, b, ...) expect_cmp(uint64_t, a, b, >, \
<=, FMTu64, __VA_ARGS__)
#define assert_b_eq(a, b, ...) do { \
#define expect_b_eq(a, b, ...) do { \
bool a_ = (a); \
bool b_ = (b); \
if (!(a_ == b_)) { \
@ -226,7 +226,7 @@
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_b_ne(a, b, ...) do { \
#define expect_b_ne(a, b, ...) do { \
bool a_ = (a); \
bool b_ = (b); \
if (!(a_ != b_)) { \
@ -242,10 +242,10 @@
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__)
#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__)
#define expect_true(a, ...) expect_b_eq(a, true, __VA_ARGS__)
#define expect_false(a, ...) expect_b_eq(a, false, __VA_ARGS__)
#define assert_str_eq(a, b, ...) do { \
#define expect_str_eq(a, b, ...) do { \
if (strcmp((a), (b))) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
@ -258,7 +258,7 @@
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_str_ne(a, b, ...) do { \
#define expect_str_ne(a, b, ...) do { \
if (!strcmp((a), (b))) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
@ -272,7 +272,7 @@
} \
} while (0)
#define assert_not_reached(...) do { \
#define expect_not_reached(...) do { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \

View File

@ -18,7 +18,7 @@ thd_start(void *arg) {
size_t sz;
sz = sizeof(arena_ind);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Error in arenas.create");
if (thread_ind % 4 != 3) {
@ -29,16 +29,16 @@ thd_start(void *arg) {
(sizeof(dss_precs)/sizeof(char*));
const char *dss = dss_precs[prec_ind];
int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
"Error in mallctlnametomib()");
mib[1] = arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
sizeof(const char *)), expected_err,
"Error in mallctlbymib()");
}
p = mallocx(1, MALLOCX_ARENA(arena_ind));
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
dallocx(p, 0);
return NULL;

View File

@ -9,7 +9,7 @@
*/
static void
purge(void) {
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
@ -20,14 +20,14 @@ TEST_BEGIN(test_alignment_errors) {
alignment = 0;
set_errno(0);
p = aligned_alloc(alignment, 1);
assert_false(p != NULL || get_errno() != EINVAL,
expect_false(p != NULL || get_errno() != EINVAL,
"Expected error for invalid alignment %zu", alignment);
for (alignment = sizeof(size_t); alignment < MAXALIGN;
alignment <<= 1) {
set_errno(0);
p = aligned_alloc(alignment + 1, 1);
assert_false(p != NULL || get_errno() != EINVAL,
expect_false(p != NULL || get_errno() != EINVAL,
"Expected error for invalid alignment %zu",
alignment + 1);
}
@ -58,7 +58,7 @@ TEST_BEGIN(test_oom_errors) {
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
assert_false(p != NULL || get_errno() != ENOMEM,
expect_false(p != NULL || get_errno() != ENOMEM,
"Expected error for aligned_alloc(%zu, %zu)",
alignment, size);
@ -71,7 +71,7 @@ TEST_BEGIN(test_oom_errors) {
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
assert_false(p != NULL || get_errno() != ENOMEM,
expect_false(p != NULL || get_errno() != ENOMEM,
"Expected error for aligned_alloc(%zu, %zu)",
alignment, size);
@ -83,7 +83,7 @@ TEST_BEGIN(test_oom_errors) {
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
assert_false(p != NULL || get_errno() != ENOMEM,
expect_false(p != NULL || get_errno() != ENOMEM,
"Expected error for aligned_alloc(&p, %zu, %zu)",
alignment, size);
}

View File

@ -32,7 +32,7 @@ thd_start(void *arg) {
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
assert_u64_eq(*ap0, a0,
expect_u64_eq(*ap0, a0,
"\"thread.allocatedp\" should provide a pointer to internal "
"storage");
@ -53,25 +53,25 @@ thd_start(void *arg) {
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
assert_u64_eq(*dp0, d0,
expect_u64_eq(*dp0, d0,
"\"thread.deallocatedp\" should provide a pointer to internal "
"storage");
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() error");
expect_ptr_not_null(p, "Unexpected malloc() error");
sz = sizeof(a1);
mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0);
sz = sizeof(ap1);
mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0);
assert_u64_eq(*ap1, a1,
expect_u64_eq(*ap1, a1,
"Dereferenced \"thread.allocatedp\" value should equal "
"\"thread.allocated\" value");
assert_ptr_eq(ap0, ap1,
expect_ptr_eq(ap0, ap1,
"Pointer returned by \"thread.allocatedp\" should not change");
usize = malloc_usable_size(p);
assert_u64_le(a0 + usize, a1,
expect_u64_le(a0 + usize, a1,
"Allocated memory counter should increase by at least the amount "
"explicitly allocated");
@ -81,19 +81,19 @@ thd_start(void *arg) {
mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0);
sz = sizeof(dp1);
mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0);
assert_u64_eq(*dp1, d1,
expect_u64_eq(*dp1, d1,
"Dereferenced \"thread.deallocatedp\" value should equal "
"\"thread.deallocated\" value");
assert_ptr_eq(dp0, dp1,
expect_ptr_eq(dp0, dp1,
"Pointer returned by \"thread.deallocatedp\" should not change");
assert_u64_le(d0 + usize, d1,
expect_u64_le(d0 + usize, d1,
"Deallocated memory counter should increase by at least the amount "
"explicitly deallocated");
return NULL;
label_ENOENT:
assert_false(config_stats,
expect_false(config_stats,
"ENOENT should only be returned if stats are disabled");
test_skip("\"thread.allocated\" mallctl not available");
return NULL;

View File

@ -3,14 +3,14 @@
TEST_BEGIN(test_basic) {
auto foo = new long(4);
assert_ptr_not_null(foo, "Unexpected new[] failure");
expect_ptr_not_null(foo, "Unexpected new[] failure");
delete foo;
// Test nullptr handling.
foo = nullptr;
delete foo;
auto bar = new long;
assert_ptr_not_null(bar, "Unexpected new failure");
expect_ptr_not_null(bar, "Unexpected new failure");
delete bar;
// Test nullptr handling.
bar = nullptr;

View File

@ -10,7 +10,7 @@ check_background_thread_enabled(void) {
if (ret == ENOENT) {
return false;
}
assert_d_eq(ret, 0, "Unexpected mallctl error");
expect_d_eq(ret, 0, "Unexpected mallctl error");
return enabled;
}
@ -27,16 +27,16 @@ test_extent_body(unsigned arena_ind) {
/* Get large size classes. */
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
0), 0, "Unexpected arenas.lextent.0.size failure");
assert_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL,
expect_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL,
0), 0, "Unexpected arenas.lextent.1.size failure");
assert_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL,
expect_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL,
0), 0, "Unexpected arenas.lextent.2.size failure");
/* Test dalloc/decommit/purge cascade. */
purge_miblen = sizeof(purge_mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
expect_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
0, "Unexpected mallctlnametomib() failure");
purge_mib[1] = (size_t)arena_ind;
called_alloc = false;
@ -44,22 +44,22 @@ test_extent_body(unsigned arena_ind) {
try_dalloc = false;
try_decommit = false;
p = mallocx(large0 * 2, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
assert_true(called_alloc, "Expected alloc call");
expect_ptr_not_null(p, "Unexpected mallocx() error");
expect_true(called_alloc, "Expected alloc call");
called_dalloc = false;
called_decommit = false;
did_purge_lazy = false;
did_purge_forced = false;
called_split = false;
xallocx_success_a = (xallocx(p, large0, 0, flags) == large0);
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
expect_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
0, "Unexpected arena.%u.purge error", arena_ind);
if (xallocx_success_a) {
assert_true(called_dalloc, "Expected dalloc call");
assert_true(called_decommit, "Expected decommit call");
assert_true(did_purge_lazy || did_purge_forced,
expect_true(called_dalloc, "Expected dalloc call");
expect_true(called_decommit, "Expected decommit call");
expect_true(did_purge_lazy || did_purge_forced,
"Expected purge");
assert_true(called_split, "Expected split call");
expect_true(called_split, "Expected split call");
}
dallocx(p, flags);
try_dalloc = true;
@ -68,25 +68,25 @@ test_extent_body(unsigned arena_ind) {
try_dalloc = false;
try_decommit = true;
p = mallocx(large0 * 2, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
did_decommit = false;
did_commit = false;
called_split = false;
did_split = false;
did_merge = false;
xallocx_success_b = (xallocx(p, large0, 0, flags) == large0);
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
expect_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
0, "Unexpected arena.%u.purge error", arena_ind);
if (xallocx_success_b) {
assert_true(did_split, "Expected split");
expect_true(did_split, "Expected split");
}
xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2);
if (did_split) {
assert_b_eq(did_decommit, did_commit,
expect_b_eq(did_decommit, did_commit,
"Expected decommit/commit match");
}
if (xallocx_success_b && xallocx_success_c) {
assert_true(did_merge, "Expected merge");
expect_true(did_merge, "Expected merge");
}
dallocx(p, flags);
try_dalloc = true;
@ -94,7 +94,7 @@ test_extent_body(unsigned arena_ind) {
/* Make sure non-large allocation succeeds. */
p = mallocx(42, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
dallocx(p, flags);
}
@ -110,7 +110,7 @@ test_manual_hook_auto_arena(void) {
sz = sizeof(unsigned);
/* Get number of auto arenas. */
assert_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0),
expect_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
if (narenas == 1) {
return;
@ -118,18 +118,18 @@ test_manual_hook_auto_arena(void) {
/* Install custom extent hooks on arena 1 (might not be initialized). */
hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
expect_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
&hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
hooks_mib[1] = 1;
old_size = sizeof(extent_hooks_t *);
new_hooks = &hooks;
new_size = sizeof(extent_hooks_t *);
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
&old_size, (void *)&new_hooks, new_size), 0,
"Unexpected extent_hooks error");
static bool auto_arena_created = false;
if (old_hooks != &hooks) {
assert_b_eq(auto_arena_created, false,
expect_b_eq(auto_arena_created, false,
"Expected auto arena 1 created only once.");
auto_arena_created = true;
}
@ -146,35 +146,35 @@ test_manual_hook_body(void) {
extent_hooks_prep();
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
/* Install custom extent hooks. */
hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
expect_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
&hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
hooks_mib[1] = (size_t)arena_ind;
old_size = sizeof(extent_hooks_t *);
new_hooks = &hooks;
new_size = sizeof(extent_hooks_t *);
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
&old_size, (void *)&new_hooks, new_size), 0,
"Unexpected extent_hooks error");
assert_ptr_ne(old_hooks->alloc, extent_alloc_hook,
expect_ptr_ne(old_hooks->alloc, extent_alloc_hook,
"Unexpected extent_hooks error");
assert_ptr_ne(old_hooks->dalloc, extent_dalloc_hook,
expect_ptr_ne(old_hooks->dalloc, extent_dalloc_hook,
"Unexpected extent_hooks error");
assert_ptr_ne(old_hooks->commit, extent_commit_hook,
expect_ptr_ne(old_hooks->commit, extent_commit_hook,
"Unexpected extent_hooks error");
assert_ptr_ne(old_hooks->decommit, extent_decommit_hook,
expect_ptr_ne(old_hooks->decommit, extent_decommit_hook,
"Unexpected extent_hooks error");
assert_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook,
expect_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook,
"Unexpected extent_hooks error");
assert_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook,
expect_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook,
"Unexpected extent_hooks error");
assert_ptr_ne(old_hooks->split, extent_split_hook,
expect_ptr_ne(old_hooks->split, extent_split_hook,
"Unexpected extent_hooks error");
assert_ptr_ne(old_hooks->merge, extent_merge_hook,
expect_ptr_ne(old_hooks->merge, extent_merge_hook,
"Unexpected extent_hooks error");
if (!check_background_thread_enabled()) {
@ -182,26 +182,26 @@ test_manual_hook_body(void) {
}
/* Restore extent hooks. */
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
(void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error");
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
&old_size, NULL, 0), 0, "Unexpected extent_hooks error");
assert_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error");
assert_ptr_eq(old_hooks->alloc, default_hooks->alloc,
expect_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error");
expect_ptr_eq(old_hooks->alloc, default_hooks->alloc,
"Unexpected extent_hooks error");
assert_ptr_eq(old_hooks->dalloc, default_hooks->dalloc,
expect_ptr_eq(old_hooks->dalloc, default_hooks->dalloc,
"Unexpected extent_hooks error");
assert_ptr_eq(old_hooks->commit, default_hooks->commit,
expect_ptr_eq(old_hooks->commit, default_hooks->commit,
"Unexpected extent_hooks error");
assert_ptr_eq(old_hooks->decommit, default_hooks->decommit,
expect_ptr_eq(old_hooks->decommit, default_hooks->decommit,
"Unexpected extent_hooks error");
assert_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy,
expect_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy,
"Unexpected extent_hooks error");
assert_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced,
expect_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced,
"Unexpected extent_hooks error");
assert_ptr_eq(old_hooks->split, default_hooks->split,
expect_ptr_eq(old_hooks->split, default_hooks->split,
"Unexpected extent_hooks error");
assert_ptr_eq(old_hooks->merge, default_hooks->merge,
expect_ptr_eq(old_hooks->merge, default_hooks->merge,
"Unexpected extent_hooks error");
}
@ -232,7 +232,7 @@ TEST_BEGIN(test_extent_auto_hook) {
sz = sizeof(unsigned);
new_hooks = &hooks;
new_size = sizeof(extent_hooks_t *);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
(void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure");
test_skip_if(check_background_thread_enabled());

View File

@ -6,7 +6,7 @@ get_nsizes_impl(const char *cmd) {
size_t z;
z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return ret;
@ -25,11 +25,11 @@ get_size_impl(const char *cmd, size_t ind) {
size_t miblen = 4;
z = sizeof(size_t);
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return ret;
@ -47,7 +47,7 @@ get_large_size(size_t ind) {
*/
static void
purge(void) {
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
@ -66,16 +66,16 @@ TEST_BEGIN(test_overflow) {
largemax = get_large_size(get_nlarge()-1);
assert_ptr_null(mallocx(largemax+1, 0),
expect_ptr_null(mallocx(largemax+1, 0),
"Expected OOM for mallocx(size=%#zx, 0)", largemax+1);
assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
expect_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
"Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
assert_ptr_null(mallocx(SIZE_T_MAX, 0),
expect_ptr_null(mallocx(SIZE_T_MAX, 0),
"Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
expect_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
"Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
ZU(PTRDIFF_MAX)+1);
}
@ -85,11 +85,11 @@ static void *
remote_alloc(void *arg) {
unsigned arena;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
size_t large_sz;
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
NULL, 0), 0, "Unexpected mallctl failure");
void *ptr = mallocx(large_sz, MALLOCX_ARENA(arena)
@ -105,7 +105,7 @@ TEST_BEGIN(test_remote_free) {
void *ret;
thd_create(&thd, remote_alloc, (void *)&ret);
thd_join(thd, NULL);
assert_ptr_not_null(ret, "Unexpected mallocx failure");
expect_ptr_not_null(ret, "Unexpected mallocx failure");
/* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
dallocx(ret, 0);
@ -131,7 +131,7 @@ TEST_BEGIN(test_oom) {
oom = true;
}
}
assert_true(oom,
expect_true(oom,
"Expected OOM during series of calls to mallocx(size=%zu, 0)",
largemax);
for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
@ -142,14 +142,14 @@ TEST_BEGIN(test_oom) {
purge();
#if LG_SIZEOF_PTR == 3
assert_ptr_null(mallocx(0x8000000000000000ULL,
expect_ptr_null(mallocx(0x8000000000000000ULL,
MALLOCX_ALIGN(0x8000000000000000ULL)),
"Expected OOM for mallocx()");
assert_ptr_null(mallocx(0x8000000000000000ULL,
expect_ptr_null(mallocx(0x8000000000000000ULL,
MALLOCX_ALIGN(0x80000000)),
"Expected OOM for mallocx()");
#else
assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
expect_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
"Expected OOM for mallocx()");
#endif
}
@ -166,28 +166,28 @@ TEST_BEGIN(test_basic) {
size_t nsz, rsz;
void *p;
nsz = nallocx(sz, 0);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, 0);
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"Unexpected mallocx(size=%zx, flags=0) error", sz);
rsz = sallocx(p, 0);
assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
expect_zu_ge(rsz, sz, "Real size smaller than expected");
expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
dallocx(p, 0);
p = mallocx(sz, 0);
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"Unexpected mallocx(size=%zx, flags=0) error", sz);
dallocx(p, 0);
nsz = nallocx(sz, MALLOCX_ZERO);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, MALLOCX_ZERO);
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
nsz);
rsz = sallocx(p, 0);
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
expect_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
dallocx(p, 0);
purge();
}
@ -224,22 +224,22 @@ TEST_BEGIN(test_alignment_and_size) {
for (i = 0; i < NITER; i++) {
nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO | MALLOCX_ARENA(0));
assert_zu_ne(nsz, 0,
expect_zu_ne(nsz, 0,
"nallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO | MALLOCX_ARENA(0));
assert_ptr_not_null(ps[i],
expect_ptr_not_null(ps[i],
"mallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
rsz = sallocx(ps[i], 0);
assert_zu_ge(rsz, sz,
expect_zu_ge(rsz, sz,
"Real size smaller than expected for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, rsz,
expect_zu_eq(nsz, rsz,
"nallocx()/sallocx() size mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_ptr_null(
expect_ptr_null(
(void *)((uintptr_t)ps[i] & (alignment-1)),
"%p inadequately aligned for"
" alignment=%zu, size=%zu", ps[i],

View File

@ -17,33 +17,33 @@ TEST_BEGIN(test_overflow) {
void *p;
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
0), 0, "Unexpected mallctl() error");
miblen = sizeof(mib) / sizeof(size_t);
assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib() error");
mib[2] = nlextents - 1;
sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
NULL, 0), 0, "Unexpected mallctlbymib() error");
assert_ptr_null(malloc(max_size_class + 1),
expect_ptr_null(malloc(max_size_class + 1),
"Expected OOM due to over-sized allocation request");
assert_ptr_null(malloc(SIZE_T_MAX),
expect_ptr_null(malloc(SIZE_T_MAX),
"Expected OOM due to over-sized allocation request");
assert_ptr_null(calloc(1, max_size_class + 1),
expect_ptr_null(calloc(1, max_size_class + 1),
"Expected OOM due to over-sized allocation request");
assert_ptr_null(calloc(1, SIZE_T_MAX),
expect_ptr_null(calloc(1, SIZE_T_MAX),
"Expected OOM due to over-sized allocation request");
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() OOM");
assert_ptr_null(realloc(p, max_size_class + 1),
expect_ptr_not_null(p, "Unexpected malloc() OOM");
expect_ptr_null(realloc(p, max_size_class + 1),
"Expected OOM due to over-sized allocation request");
assert_ptr_null(realloc(p, SIZE_T_MAX),
expect_ptr_null(realloc(p, SIZE_T_MAX),
"Expected OOM due to over-sized allocation request");
free(p);
}

View File

@ -9,7 +9,7 @@
*/
static void
purge(void) {
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
@ -18,14 +18,14 @@ TEST_BEGIN(test_alignment_errors) {
void *p;
for (alignment = 0; alignment < sizeof(void *); alignment++) {
assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
expect_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
"Expected error for invalid alignment %zu",
alignment);
}
for (alignment = sizeof(size_t); alignment < MAXALIGN;
alignment <<= 1) {
assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
expect_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
"Expected error for invalid alignment %zu",
alignment + 1);
}
@ -43,7 +43,7 @@ TEST_BEGIN(test_oom_errors) {
alignment = 0x80000000LU;
size = 0x80000000LU;
#endif
assert_d_ne(posix_memalign(&p, alignment, size), 0,
expect_d_ne(posix_memalign(&p, alignment, size), 0,
"Expected error for posix_memalign(&p, %zu, %zu)",
alignment, size);
@ -54,7 +54,7 @@ TEST_BEGIN(test_oom_errors) {
alignment = 0x40000000LU;
size = 0xc0000001LU;
#endif
assert_d_ne(posix_memalign(&p, alignment, size), 0,
expect_d_ne(posix_memalign(&p, alignment, size), 0,
"Expected error for posix_memalign(&p, %zu, %zu)",
alignment, size);
@ -64,7 +64,7 @@ TEST_BEGIN(test_oom_errors) {
#else
size = 0xfffffff0LU;
#endif
assert_d_ne(posix_memalign(&p, alignment, size), 0,
expect_d_ne(posix_memalign(&p, alignment, size), 0,
"Expected error for posix_memalign(&p, %zu, %zu)",
alignment, size);
}

View File

@ -6,7 +6,7 @@ get_nsizes_impl(const char *cmd) {
size_t z;
z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return ret;
@ -25,11 +25,11 @@ get_size_impl(const char *cmd, size_t ind) {
size_t miblen = 4;
z = sizeof(size_t);
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return ret;
@ -50,28 +50,28 @@ TEST_BEGIN(test_grow_and_shrink) {
#define MAXSZ ZU(12 * 1024 * 1024)
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
szs[0] = sallocx(p, 0);
for (i = 0; i < NCYCLES; i++) {
for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
q = rallocx(p, szs[j-1]+1, 0);
assert_ptr_not_null(q,
expect_ptr_not_null(q,
"Unexpected rallocx() error for size=%zu-->%zu",
szs[j-1], szs[j-1]+1);
szs[j] = sallocx(q, 0);
assert_zu_ne(szs[j], szs[j-1]+1,
expect_zu_ne(szs[j], szs[j-1]+1,
"Expected size to be at least: %zu", szs[j-1]+1);
p = q;
}
for (j--; j > 0; j--) {
q = rallocx(p, szs[j-1], 0);
assert_ptr_not_null(q,
expect_ptr_not_null(q,
"Unexpected rallocx() error for size=%zu-->%zu",
szs[j], szs[j-1]);
tsz = sallocx(q, 0);
assert_zu_eq(tsz, szs[j-1],
expect_zu_eq(tsz, szs[j-1],
"Expected size=%zu, got size=%zu", szs[j-1], tsz);
p = q;
}
@ -113,23 +113,23 @@ TEST_BEGIN(test_zero) {
for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
size_t start_size = start_sizes[i];
p = mallocx(start_size, MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
psz = sallocx(p, 0);
assert_false(validate_fill(p, 0, 0, psz),
expect_false(validate_fill(p, 0, 0, psz),
"Expected zeroed memory");
memset(p, FILL_BYTE, psz);
assert_false(validate_fill(p, FILL_BYTE, 0, psz),
expect_false(validate_fill(p, FILL_BYTE, 0, psz),
"Expected filled memory");
for (j = 1; j < RANGE; j++) {
q = rallocx(p, start_size+j, MALLOCX_ZERO);
assert_ptr_not_null(q, "Unexpected rallocx() error");
expect_ptr_not_null(q, "Unexpected rallocx() error");
qsz = sallocx(q, 0);
if (q != p || qsz != psz) {
assert_false(validate_fill(q, FILL_BYTE, 0,
expect_false(validate_fill(q, FILL_BYTE, 0,
psz), "Expected filled memory");
assert_false(validate_fill(q, 0, psz, qsz-psz),
expect_false(validate_fill(q, 0, psz, qsz-psz),
"Expected zeroed memory");
}
if (psz != qsz) {
@ -139,7 +139,7 @@ TEST_BEGIN(test_zero) {
}
p = q;
}
assert_false(validate_fill(p, FILL_BYTE, 0, psz),
expect_false(validate_fill(p, FILL_BYTE, 0, psz),
"Expected filled memory");
dallocx(p, 0);
}
@ -154,13 +154,13 @@ TEST_BEGIN(test_align) {
align = ZU(1);
p = mallocx(1, MALLOCX_ALIGN(align));
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
q = rallocx(p, 1, MALLOCX_ALIGN(align));
assert_ptr_not_null(q,
expect_ptr_not_null(q,
"Unexpected rallocx() error for align=%zu", align);
assert_ptr_null(
expect_ptr_null(
(void *)((uintptr_t)q & (align-1)),
"%p inadequately aligned for align=%zu",
q, align);
@ -180,23 +180,23 @@ TEST_BEGIN(test_lg_align_and_zero) {
lg_align = 0;
p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(q,
expect_ptr_not_null(q,
"Unexpected rallocx() error for lg_align=%u", lg_align);
assert_ptr_null(
expect_ptr_null(
(void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
"%p inadequately aligned for lg_align=%u", q, lg_align);
sz = sallocx(q, 0);
if ((sz << 1) <= MAX_VALIDATE) {
assert_false(validate_fill(q, 0, 0, sz),
expect_false(validate_fill(q, 0, 0, sz),
"Expected zeroed memory");
} else {
assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
expect_false(validate_fill(q, 0, 0, MAX_VALIDATE),
"Expected zeroed memory");
assert_false(validate_fill(
expect_false(validate_fill(
(void *)((uintptr_t)q+sz-MAX_VALIDATE),
0, 0, MAX_VALIDATE), "Expected zeroed memory");
}
@ -225,18 +225,18 @@ TEST_BEGIN(test_overflow) {
largemax = get_large_size(get_nlarge()-1);
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
assert_ptr_null(rallocx(p, largemax+1, 0),
expect_ptr_null(rallocx(p, largemax+1, 0),
"Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1);
assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
expect_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
"Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
assert_ptr_null(rallocx(p, SIZE_T_MAX, 0),
expect_ptr_null(rallocx(p, SIZE_T_MAX, 0),
"Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
expect_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
"Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
ZU(PTRDIFF_MAX)+1);

View File

@ -10,19 +10,19 @@ TEST_BEGIN(test_slab_sizes) {
size_t len;
len = sizeof(nbins);
assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
expect_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
"nbins mallctl failure");
len = sizeof(page);
assert_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0,
expect_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0,
"page mallctl failure");
len = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0,
expect_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0,
"bin size mallctlnametomib failure");
len = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len),
expect_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len),
0, "slab size mallctlnametomib failure");
size_t biggest_slab_seen = 0;
@ -33,11 +33,11 @@ TEST_BEGIN(test_slab_sizes) {
len = sizeof(size_t);
sizemib[2] = i;
slabmib[2] = i;
assert_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len,
expect_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len,
NULL, 0), 0, "bin size mallctlbymib failure");
len = sizeof(size_t);
assert_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len,
expect_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len,
NULL, 0), 0, "slab size mallctlbymib failure");
if (bin_size < 100) {
@ -48,19 +48,19 @@ TEST_BEGIN(test_slab_sizes) {
* should at least make sure that the number of pages
* goes up.
*/
assert_zu_ge(slab_size, biggest_slab_seen,
expect_zu_ge(slab_size, biggest_slab_seen,
"Slab sizes should go up");
biggest_slab_seen = slab_size;
} else if (
(100 <= bin_size && bin_size < 128)
|| (128 < bin_size && bin_size <= 200)) {
assert_zu_eq(slab_size, page,
expect_zu_eq(slab_size, page,
"Forced-small slabs should be small");
} else if (bin_size == 128) {
assert_zu_eq(slab_size, 2 * page,
expect_zu_eq(slab_size, 2 * page,
"Forced-2-page slab should be 2 pages");
} else if (200 < bin_size && bin_size <= 4096) {
assert_zu_ge(slab_size, biggest_slab_seen,
expect_zu_ge(slab_size, biggest_slab_seen,
"Slab sizes should go up");
biggest_slab_seen = slab_size;
}
@ -69,7 +69,7 @@ TEST_BEGIN(test_slab_sizes) {
* For any reasonable configuration, 17 pages should be a valid slab
* size for 4096-byte items.
*/
assert_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target");
expect_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target");
}
TEST_END

View File

@ -26,7 +26,7 @@ get_nsizes_impl(const char *cmd) {
size_t z;
z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return ret;
@ -45,11 +45,11 @@ get_size_impl(const char *cmd, size_t ind) {
size_t miblen = 4;
z = sizeof(size_t);
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return ret;
@ -67,7 +67,7 @@ get_large_size(size_t ind) {
*/
static void
purge(void) {
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
@ -86,16 +86,16 @@ TEST_BEGIN(test_overflow) {
largemax = get_large_size(get_nlarge()-1);
assert_ptr_null(smallocx(largemax+1, 0).ptr,
expect_ptr_null(smallocx(largemax+1, 0).ptr,
"Expected OOM for smallocx(size=%#zx, 0)", largemax+1);
assert_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr,
expect_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr,
"Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
assert_ptr_null(smallocx(SIZE_T_MAX, 0).ptr,
expect_ptr_null(smallocx(SIZE_T_MAX, 0).ptr,
"Expected OOM for smallocx(size=%#zx, 0)", SIZE_T_MAX);
assert_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr,
expect_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr,
"Expected OOM for smallocx(size=1, MALLOCX_ALIGN(%#zx))",
ZU(PTRDIFF_MAX)+1);
}
@ -105,17 +105,17 @@ static void *
remote_alloc(void *arg) {
unsigned arena;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
size_t large_sz;
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
NULL, 0), 0, "Unexpected mallctl failure");
smallocx_return_t r
= smallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
void *ptr = r.ptr;
assert_zu_eq(r.size,
expect_zu_eq(r.size,
nallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE),
"Expected smalloc(size,flags).size == nallocx(size,flags)");
void **ret = (void **)arg;
@ -129,7 +129,7 @@ TEST_BEGIN(test_remote_free) {
void *ret;
thd_create(&thd, remote_alloc, (void *)&ret);
thd_join(thd, NULL);
assert_ptr_not_null(ret, "Unexpected smallocx failure");
expect_ptr_not_null(ret, "Unexpected smallocx failure");
/* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
dallocx(ret, 0);
@ -155,7 +155,7 @@ TEST_BEGIN(test_oom) {
oom = true;
}
}
assert_true(oom,
expect_true(oom,
"Expected OOM during series of calls to smallocx(size=%zu, 0)",
largemax);
for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
@ -166,14 +166,14 @@ TEST_BEGIN(test_oom) {
purge();
#if LG_SIZEOF_PTR == 3
assert_ptr_null(smallocx(0x8000000000000000ULL,
expect_ptr_null(smallocx(0x8000000000000000ULL,
MALLOCX_ALIGN(0x8000000000000000ULL)).ptr,
"Expected OOM for smallocx()");
assert_ptr_null(smallocx(0x8000000000000000ULL,
expect_ptr_null(smallocx(0x8000000000000000ULL,
MALLOCX_ALIGN(0x80000000)).ptr,
"Expected OOM for smallocx()");
#else
assert_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr,
expect_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr,
"Expected OOM for smallocx()");
#endif
}
@ -191,36 +191,36 @@ TEST_BEGIN(test_basic) {
size_t nsz, rsz, smz;
void *p;
nsz = nallocx(sz, 0);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
ret = smallocx(sz, 0);
p = ret.ptr;
smz = ret.size;
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"Unexpected smallocx(size=%zx, flags=0) error", sz);
rsz = sallocx(p, 0);
assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
expect_zu_ge(rsz, sz, "Real size smaller than expected");
expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
dallocx(p, 0);
ret = smallocx(sz, 0);
p = ret.ptr;
smz = ret.size;
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"Unexpected smallocx(size=%zx, flags=0) error", sz);
dallocx(p, 0);
nsz = nallocx(sz, MALLOCX_ZERO);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
assert_zu_ne(smz, 0, "Unexpected smallocx() error");
expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
expect_zu_ne(smz, 0, "Unexpected smallocx() error");
ret = smallocx(sz, MALLOCX_ZERO);
p = ret.ptr;
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"Unexpected smallocx(size=%zx, flags=MALLOCX_ZERO) error",
nsz);
rsz = sallocx(p, 0);
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
expect_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
dallocx(p, 0);
purge();
}
@ -257,27 +257,27 @@ TEST_BEGIN(test_alignment_and_size) {
for (i = 0; i < NITER; i++) {
nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO);
assert_zu_ne(nsz, 0,
expect_zu_ne(nsz, 0,
"nallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
smallocx_return_t ret
= smallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO);
ps[i] = ret.ptr;
assert_ptr_not_null(ps[i],
expect_ptr_not_null(ps[i],
"smallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
rsz = sallocx(ps[i], 0);
smz = ret.size;
assert_zu_ge(rsz, sz,
expect_zu_ge(rsz, sz,
"Real size smaller than expected for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, rsz,
expect_zu_eq(nsz, rsz,
"nallocx()/sallocx() size mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, smz,
expect_zu_eq(nsz, smz,
"nallocx()/smallocx() size mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_ptr_null(
expect_ptr_null(
(void *)((uintptr_t)ps[i] & (alignment-1)),
"%p inadequately aligned for"
" alignment=%zu, size=%zu", ps[i],

View File

@ -11,7 +11,7 @@ thd_start(void *arg) {
int err;
p = malloc(1);
assert_ptr_not_null(p, "Error in malloc()");
expect_ptr_not_null(p, "Error in malloc()");
free(p);
size = sizeof(arena_ind);
@ -31,7 +31,7 @@ thd_start(void *arg) {
buferror(err, buf, sizeof(buf));
test_fail("Error in mallctl(): %s", buf);
}
assert_u_eq(arena_ind, main_arena_ind,
expect_u_eq(arena_ind, main_arena_ind,
"Arena index should be same as for main thread");
return NULL;
@ -52,11 +52,11 @@ TEST_BEGIN(test_thread_arena) {
unsigned i;
p = malloc(1);
assert_ptr_not_null(p, "Error in malloc()");
expect_ptr_not_null(p, "Error in malloc()");
unsigned arena_ind, old_arena_ind;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Arena creation failure");
size_t size = sizeof(arena_ind);
@ -73,7 +73,7 @@ TEST_BEGIN(test_thread_arena) {
for (i = 0; i < NTHREADS; i++) {
intptr_t join_ret;
thd_join(thds[i], (void *)&join_ret);
assert_zd_eq(join_ret, 0, "Unexpected thread join error");
expect_zd_eq(join_ret, 0, "Unexpected thread join error");
}
free(p);
}

View File

@ -4,59 +4,59 @@ void *
thd_start(void *arg) {
bool e0, e1;
size_t sz = sizeof(bool);
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
0), 0, "Unexpected mallctl failure");
if (e0) {
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
(void *)&e1, sz), 0, "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
expect_true(e0, "tcache should be enabled");
}
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
(void *)&e1, sz), 0, "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
expect_false(e0, "tcache should be disabled");
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
(void *)&e1, sz), 0, "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
expect_true(e0, "tcache should be enabled");
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
(void *)&e1, sz), 0, "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
expect_true(e0, "tcache should be enabled");
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
(void *)&e1, sz), 0, "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
expect_false(e0, "tcache should be disabled");
free(malloc(1));
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
(void *)&e1, sz), 0, "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
expect_false(e0, "tcache should be disabled");
free(malloc(1));
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
(void *)&e1, sz), 0, "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
expect_true(e0, "tcache should be enabled");
free(malloc(1));
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
(void *)&e1, sz), 0, "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
expect_true(e0, "tcache should be enabled");
free(malloc(1));
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
(void *)&e1, sz), 0, "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
expect_false(e0, "tcache should be disabled");
free(malloc(1));
return NULL;

View File

@ -11,7 +11,7 @@ arena_ind(void) {
if (ind == 0) {
size_t sz = sizeof(ind);
assert_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL,
expect_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL,
0), 0, "Unexpected mallctl failure creating arena");
}
@ -23,11 +23,11 @@ TEST_BEGIN(test_same_size) {
size_t sz, tsz;
p = mallocx(42, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
sz = sallocx(p, 0);
tsz = xallocx(p, sz, 0, 0);
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
dallocx(p, 0);
}
@ -38,11 +38,11 @@ TEST_BEGIN(test_extra_no_move) {
size_t sz, tsz;
p = mallocx(42, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
sz = sallocx(p, 0);
tsz = xallocx(p, sz, sz-42, 0);
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
dallocx(p, 0);
}
@ -53,11 +53,11 @@ TEST_BEGIN(test_no_move_fail) {
size_t sz, tsz;
p = mallocx(42, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
sz = sallocx(p, 0);
tsz = xallocx(p, sz + 5, 0, 0);
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
dallocx(p, 0);
}
@ -69,7 +69,7 @@ get_nsizes_impl(const char *cmd) {
size_t z;
z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return ret;
@ -93,11 +93,11 @@ get_size_impl(const char *cmd, size_t ind) {
size_t miblen = 4;
z = sizeof(size_t);
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return ret;
@ -122,20 +122,20 @@ TEST_BEGIN(test_size) {
largemax = get_large_size(get_nlarge()-1);
p = mallocx(small0, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
/* Test smallest supported size. */
assert_zu_eq(xallocx(p, 1, 0, 0), small0,
expect_zu_eq(xallocx(p, 1, 0, 0), small0,
"Unexpected xallocx() behavior");
/* Test largest supported size. */
assert_zu_le(xallocx(p, largemax, 0, 0), largemax,
expect_zu_le(xallocx(p, largemax, 0, 0), largemax,
"Unexpected xallocx() behavior");
/* Test size overflow. */
assert_zu_le(xallocx(p, largemax+1, 0, 0), largemax,
expect_zu_le(xallocx(p, largemax+1, 0, 0), largemax,
"Unexpected xallocx() behavior");
assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax,
expect_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax,
"Unexpected xallocx() behavior");
dallocx(p, 0);
@ -151,22 +151,22 @@ TEST_BEGIN(test_size_extra_overflow) {
largemax = get_large_size(get_nlarge()-1);
p = mallocx(small0, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
/* Test overflows that can be resolved by clamping extra. */
assert_zu_le(xallocx(p, largemax-1, 2, 0), largemax,
expect_zu_le(xallocx(p, largemax-1, 2, 0), largemax,
"Unexpected xallocx() behavior");
assert_zu_le(xallocx(p, largemax, 1, 0), largemax,
expect_zu_le(xallocx(p, largemax, 1, 0), largemax,
"Unexpected xallocx() behavior");
/* Test overflow such that largemax-size underflows. */
assert_zu_le(xallocx(p, largemax+1, 2, 0), largemax,
expect_zu_le(xallocx(p, largemax+1, 2, 0), largemax,
"Unexpected xallocx() behavior");
assert_zu_le(xallocx(p, largemax+2, 3, 0), largemax,
expect_zu_le(xallocx(p, largemax+2, 3, 0), largemax,
"Unexpected xallocx() behavior");
assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax,
expect_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax,
"Unexpected xallocx() behavior");
assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax,
expect_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax,
"Unexpected xallocx() behavior");
dallocx(p, 0);
@ -183,21 +183,21 @@ TEST_BEGIN(test_extra_small) {
largemax = get_large_size(get_nlarge()-1);
p = mallocx(small0, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
assert_zu_eq(xallocx(p, small1, 0, 0), small0,
expect_zu_eq(xallocx(p, small1, 0, 0), small0,
"Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, small1, 0, 0), small0,
expect_zu_eq(xallocx(p, small1, 0, 0), small0,
"Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
expect_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
"Unexpected xallocx() behavior");
/* Test size+extra overflow. */
assert_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0,
expect_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0,
"Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
expect_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
"Unexpected xallocx() behavior");
dallocx(p, 0);
@ -217,56 +217,56 @@ TEST_BEGIN(test_extra_large) {
largemax = get_large_size(get_nlarge()-1);
p = mallocx(large3, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
expect_ptr_not_null(p, "Unexpected mallocx() error");
assert_zu_eq(xallocx(p, large3, 0, flags), large3,
expect_zu_eq(xallocx(p, large3, 0, flags), large3,
"Unexpected xallocx() behavior");
/* Test size decrease with zero extra. */
assert_zu_ge(xallocx(p, large1, 0, flags), large1,
expect_zu_ge(xallocx(p, large1, 0, flags), large1,
"Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, smallmax, 0, flags), large1,
expect_zu_ge(xallocx(p, smallmax, 0, flags), large1,
"Unexpected xallocx() behavior");
if (xallocx(p, large3, 0, flags) != large3) {
p = rallocx(p, large3, flags);
assert_ptr_not_null(p, "Unexpected rallocx() failure");
expect_ptr_not_null(p, "Unexpected rallocx() failure");
}
/* Test size decrease with non-zero extra. */
assert_zu_eq(xallocx(p, large1, large3 - large1, flags), large3,
expect_zu_eq(xallocx(p, large1, large3 - large1, flags), large3,
"Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, large2, large3 - large2, flags), large3,
expect_zu_eq(xallocx(p, large2, large3 - large2, flags), large3,
"Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, large1, large2 - large1, flags), large2,
expect_zu_ge(xallocx(p, large1, large2 - large1, flags), large2,
"Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1,
expect_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1,
"Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, large1, 0, flags), large1,
expect_zu_ge(xallocx(p, large1, 0, flags), large1,
"Unexpected xallocx() behavior");
/* Test size increase with zero extra. */
assert_zu_le(xallocx(p, large3, 0, flags), large3,
expect_zu_le(xallocx(p, large3, 0, flags), large3,
"Unexpected xallocx() behavior");
assert_zu_le(xallocx(p, largemax+1, 0, flags), large3,
expect_zu_le(xallocx(p, largemax+1, 0, flags), large3,
"Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, large1, 0, flags), large1,
expect_zu_ge(xallocx(p, large1, 0, flags), large1,
"Unexpected xallocx() behavior");
/* Test size increase with non-zero extra. */
assert_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax,
expect_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax,
"Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, large1, 0, flags), large1,
expect_zu_ge(xallocx(p, large1, 0, flags), large1,
"Unexpected xallocx() behavior");
/* Test size increase with non-zero extra. */
assert_zu_le(xallocx(p, large1, large3 - large1, flags), large3,
expect_zu_le(xallocx(p, large1, large3 - large1, flags), large3,
"Unexpected xallocx() behavior");
if (xallocx(p, large3, 0, flags) != large3) {
p = rallocx(p, large3, flags);
assert_ptr_not_null(p, "Unexpected rallocx() failure");
expect_ptr_not_null(p, "Unexpected rallocx() failure");
}
/* Test size+extra overflow. */
assert_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax,
expect_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax,
"Unexpected xallocx() behavior");
dallocx(p, flags);
@ -320,8 +320,8 @@ test_zero(size_t szmin, size_t szmax) {
sz = szmax;
p = mallocx(sz, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
expect_ptr_not_null(p, "Unexpected mallocx() error");
expect_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
sz);
/*
@ -329,30 +329,30 @@ test_zero(size_t szmin, size_t szmax) {
* errors.
*/
memset(p, FILL_BYTE, sz);
assert_false(validate_fill(p, FILL_BYTE, 0, sz),
expect_false(validate_fill(p, FILL_BYTE, 0, sz),
"Memory not filled: sz=%zu", sz);
/* Shrink in place so that we can expect growing in place to succeed. */
sz = szmin;
if (xallocx(p, sz, 0, flags) != sz) {
p = rallocx(p, sz, flags);
assert_ptr_not_null(p, "Unexpected rallocx() failure");
expect_ptr_not_null(p, "Unexpected rallocx() failure");
}
assert_false(validate_fill(p, FILL_BYTE, 0, sz),
expect_false(validate_fill(p, FILL_BYTE, 0, sz),
"Memory not filled: sz=%zu", sz);
for (sz = szmin; sz < szmax; sz = nsz) {
nsz = nallocx(sz+1, flags);
if (xallocx(p, sz+1, 0, flags) != nsz) {
p = rallocx(p, sz+1, flags);
assert_ptr_not_null(p, "Unexpected rallocx() failure");
expect_ptr_not_null(p, "Unexpected rallocx() failure");
}
assert_false(validate_fill(p, FILL_BYTE, 0, sz),
expect_false(validate_fill(p, FILL_BYTE, 0, sz),
"Memory not filled: sz=%zu", sz);
assert_false(validate_fill(p, 0x00, sz, nsz-sz),
expect_false(validate_fill(p, 0x00, sz, nsz-sz),
"Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
assert_false(validate_fill(p, FILL_BYTE, 0, nsz),
expect_false(validate_fill(p, FILL_BYTE, 0, nsz),
"Memory not filled: nsz=%zu", nsz);
}

View File

@ -1456,7 +1456,7 @@ TEST_BEGIN(test_gen_rand_32) {
uint32_t r32;
sfmt_t *ctx;
assert_d_le(get_min_array_size32(), BLOCK_SIZE,
expect_d_le(get_min_array_size32(), BLOCK_SIZE,
"Array size too small");
ctx = init_gen_rand(1234);
fill_array32(ctx, array32, BLOCK_SIZE);
@ -1466,16 +1466,16 @@ TEST_BEGIN(test_gen_rand_32) {
ctx = init_gen_rand(1234);
for (i = 0; i < BLOCK_SIZE; i++) {
if (i < COUNT_1) {
assert_u32_eq(array32[i], init_gen_rand_32_expected[i],
expect_u32_eq(array32[i], init_gen_rand_32_expected[i],
"Output mismatch for i=%d", i);
}
r32 = gen_rand32(ctx);
assert_u32_eq(r32, array32[i],
expect_u32_eq(r32, array32[i],
"Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
}
for (i = 0; i < COUNT_2; i++) {
r32 = gen_rand32(ctx);
assert_u32_eq(r32, array32_2[i],
expect_u32_eq(r32, array32_2[i],
"Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
r32);
}
@ -1491,7 +1491,7 @@ TEST_BEGIN(test_by_array_32) {
uint32_t r32;
sfmt_t *ctx;
assert_d_le(get_min_array_size32(), BLOCK_SIZE,
expect_d_le(get_min_array_size32(), BLOCK_SIZE,
"Array size too small");
ctx = init_by_array(ini, 4);
fill_array32(ctx, array32, BLOCK_SIZE);
@ -1501,16 +1501,16 @@ TEST_BEGIN(test_by_array_32) {
ctx = init_by_array(ini, 4);
for (i = 0; i < BLOCK_SIZE; i++) {
if (i < COUNT_1) {
assert_u32_eq(array32[i], init_by_array_32_expected[i],
expect_u32_eq(array32[i], init_by_array_32_expected[i],
"Output mismatch for i=%d", i);
}
r32 = gen_rand32(ctx);
assert_u32_eq(r32, array32[i],
expect_u32_eq(r32, array32[i],
"Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
}
for (i = 0; i < COUNT_2; i++) {
r32 = gen_rand32(ctx);
assert_u32_eq(r32, array32_2[i],
expect_u32_eq(r32, array32_2[i],
"Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
r32);
}
@ -1525,7 +1525,7 @@ TEST_BEGIN(test_gen_rand_64) {
uint64_t r;
sfmt_t *ctx;
assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
expect_d_le(get_min_array_size64(), BLOCK_SIZE64,
"Array size too small");
ctx = init_gen_rand(4321);
fill_array64(ctx, array64, BLOCK_SIZE64);
@ -1535,17 +1535,17 @@ TEST_BEGIN(test_gen_rand_64) {
ctx = init_gen_rand(4321);
for (i = 0; i < BLOCK_SIZE64; i++) {
if (i < COUNT_1) {
assert_u64_eq(array64[i], init_gen_rand_64_expected[i],
expect_u64_eq(array64[i], init_gen_rand_64_expected[i],
"Output mismatch for i=%d", i);
}
r = gen_rand64(ctx);
assert_u64_eq(r, array64[i],
expect_u64_eq(r, array64[i],
"Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i,
array64[i], r);
}
for (i = 0; i < COUNT_2; i++) {
r = gen_rand64(ctx);
assert_u64_eq(r, array64_2[i],
expect_u64_eq(r, array64_2[i],
"Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i,
array64_2[i], r);
}
@ -1561,7 +1561,7 @@ TEST_BEGIN(test_by_array_64) {
uint32_t ini[] = {5, 4, 3, 2, 1};
sfmt_t *ctx;
assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
expect_d_le(get_min_array_size64(), BLOCK_SIZE64,
"Array size too small");
ctx = init_by_array(ini, 5);
fill_array64(ctx, array64, BLOCK_SIZE64);
@ -1571,17 +1571,17 @@ TEST_BEGIN(test_by_array_64) {
ctx = init_by_array(ini, 5);
for (i = 0; i < BLOCK_SIZE64; i++) {
if (i < COUNT_1) {
assert_u64_eq(array64[i], init_by_array_64_expected[i],
expect_u64_eq(array64[i], init_by_array_64_expected[i],
"Output mismatch for i=%d", i);
}
r = gen_rand64(ctx);
assert_u64_eq(r, array64[i],
expect_u64_eq(r, array64[i],
"Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i,
array64[i], r);
}
for (i = 0; i < COUNT_2; i++) {
r = gen_rand64(ctx);
assert_u64_eq(r, array64_2[i],
expect_u64_eq(r, array64_2[i],
"Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i,
array64_2[i], r);
}

View File

@ -4,7 +4,7 @@ TEST_BEGIN(test_a0) {
void *p;
p = a0malloc(1);
assert_ptr_not_null(p, "Unexpected a0malloc() error");
expect_ptr_not_null(p, "Unexpected a0malloc() error");
a0dalloc(p);
}
TEST_END

View File

@ -13,7 +13,7 @@ get_nsizes_impl(const char *cmd) {
size_t z;
z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return ret;
@ -37,11 +37,11 @@ get_size_impl(const char *cmd, size_t ind) {
size_t miblen = 4;
z = sizeof(size_t);
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return ret;
@ -85,7 +85,7 @@ static unsigned
do_arena_create(extent_hooks_t *h) {
unsigned arena_ind;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
(void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
"Unexpected mallctl() failure");
return arena_ind;
@ -105,19 +105,19 @@ do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) {
nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge();
*nptrs = nsmall + nlarge;
*ptrs = (void **)malloc(*nptrs * sizeof(void *));
assert_ptr_not_null(*ptrs, "Unexpected malloc() failure");
expect_ptr_not_null(*ptrs, "Unexpected malloc() failure");
/* Allocate objects with a wide range of sizes. */
for (i = 0; i < nsmall; i++) {
sz = get_small_size(i);
(*ptrs)[i] = mallocx(sz, flags);
assert_ptr_not_null((*ptrs)[i],
expect_ptr_not_null((*ptrs)[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
for (i = 0; i < nlarge; i++) {
sz = get_large_size(i);
(*ptrs)[nsmall + i] = mallocx(sz, flags);
assert_ptr_not_null((*ptrs)[i],
expect_ptr_not_null((*ptrs)[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
@ -125,7 +125,7 @@ do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) {
/* Verify allocations. */
for (i = 0; i < *nptrs; i++) {
assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
expect_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
"Allocation should have queryable size");
}
}
@ -143,7 +143,7 @@ do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) {
}
/* Verify allocations no longer exist. */
for (i = 0; i < nptrs; i++) {
assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0,
expect_zu_eq(vsalloc(tsdn, ptrs[i]), 0,
"Allocation should no longer exist");
}
if (have_background_thread) {
@ -160,10 +160,10 @@ do_arena_reset_destroy(const char *name, unsigned arena_ind) {
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib(name, mib, &miblen), 0,
expect_d_eq(mallctlnametomib(name, mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
@ -197,23 +197,23 @@ arena_i_initialized(unsigned arena_ind, bool refresh) {
if (refresh) {
uint64_t epoch = 1;
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
sizeof(epoch)), 0, "Unexpected mallctl() failure");
}
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
sz = sizeof(initialized);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
0), 0, "Unexpected mallctlbymib() failure");
return initialized;
}
TEST_BEGIN(test_arena_destroy_initial) {
assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
expect_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should not be initialized");
}
TEST_END
@ -226,9 +226,9 @@ TEST_BEGIN(test_arena_destroy_hooks_default) {
arena_ind = do_arena_create(NULL);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
assert_false(arena_i_initialized(arena_ind, false),
expect_false(arena_i_initialized(arena_ind, false),
"Arena stats should not be initialized");
assert_true(arena_i_initialized(arena_ind, true),
expect_true(arena_i_initialized(arena_ind, true),
"Arena stats should be initialized");
/*
@ -239,9 +239,9 @@ TEST_BEGIN(test_arena_destroy_hooks_default) {
do_arena_destroy(arena_ind);
assert_false(arena_i_initialized(arena_ind, true),
expect_false(arena_i_initialized(arena_ind, true),
"Arena stats should not be initialized");
assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should be initialized");
do_arena_reset_post(ptrs, nptrs, arena_ind);
@ -249,7 +249,7 @@ TEST_BEGIN(test_arena_destroy_hooks_default) {
arena_ind_prev = arena_ind;
arena_ind = do_arena_create(NULL);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
assert_u_eq(arena_ind, arena_ind_prev,
expect_u_eq(arena_ind, arena_ind_prev,
"Arena index should have been recycled");
do_arena_destroy(arena_ind);
do_arena_reset_post(ptrs, nptrs, arena_ind);
@ -268,9 +268,9 @@ extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
"arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
"true" : "false", arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
"Wrong hook function");
called_dalloc = true;
if (!try_dalloc) {
@ -314,20 +314,20 @@ TEST_BEGIN(test_arena_destroy_hooks_unmap) {
arena_ind = do_arena_create(&hooks);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
assert_true(did_alloc, "Expected alloc");
expect_true(did_alloc, "Expected alloc");
assert_false(arena_i_initialized(arena_ind, false),
expect_false(arena_i_initialized(arena_ind, false),
"Arena stats should not be initialized");
assert_true(arena_i_initialized(arena_ind, true),
expect_true(arena_i_initialized(arena_ind, true),
"Arena stats should be initialized");
did_dalloc = false;
do_arena_destroy(arena_ind);
assert_true(did_dalloc, "Expected dalloc");
expect_true(did_dalloc, "Expected dalloc");
assert_false(arena_i_initialized(arena_ind, true),
expect_false(arena_i_initialized(arena_ind, true),
"Arena stats should not be initialized");
assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should be initialized");
do_arena_reset_post(ptrs, nptrs, arena_ind);

View File

@ -6,7 +6,7 @@
* some places and "ptr" in others. In the long run it would be nice to unify
* these, but in the short run we'll use this shim.
*/
#define assert_p_eq assert_ptr_eq
#define expect_p_eq expect_ptr_eq
/*
* t: the non-atomic type, like "uint32_t".
@ -24,20 +24,20 @@
\
/* ATOMIC_INIT and load. */ \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
assert_##ta##_eq(val1, val, "Load or init failed"); \
expect_##ta##_eq(val1, val, "Load or init failed"); \
\
/* Store. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
atomic_store_##ta(&atom, val2, ATOMIC_RELAXED); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
assert_##ta##_eq(val2, val, "Store failed"); \
expect_##ta##_eq(val2, val, "Store failed"); \
\
/* Exchange. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED); \
assert_##ta##_eq(val1, val, "Exchange returned invalid value"); \
expect_##ta##_eq(val1, val, "Exchange returned invalid value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
assert_##ta##_eq(val2, val, "Exchange store invalid value"); \
expect_##ta##_eq(val2, val, "Exchange store invalid value"); \
\
/* \
* Weak CAS. Spurious failures are allowed, so we loop a few \
@ -49,17 +49,17 @@
expected = val2; \
success = atomic_compare_exchange_weak_##ta(&atom, \
&expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
assert_##ta##_eq(val1, expected, \
expect_##ta##_eq(val1, expected, \
"CAS should update expected"); \
} \
assert_b_eq(val1 == val2, success, \
expect_b_eq(val1 == val2, success, \
"Weak CAS did the wrong state update"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
if (success) { \
assert_##ta##_eq(val3, val, \
expect_##ta##_eq(val3, val, \
"Successful CAS should update atomic"); \
} else { \
assert_##ta##_eq(val1, val, \
expect_##ta##_eq(val1, val, \
"Unsuccessful CAS should not update atomic"); \
} \
\
@ -68,14 +68,14 @@
expected = val2; \
success = atomic_compare_exchange_strong_##ta(&atom, &expected, \
val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
assert_b_eq(val1 == val2, success, \
expect_b_eq(val1 == val2, success, \
"Strong CAS did the wrong state update"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
if (success) { \
assert_##ta##_eq(val3, val, \
expect_##ta##_eq(val3, val, \
"Successful CAS should update atomic"); \
} else { \
assert_##ta##_eq(val1, val, \
expect_##ta##_eq(val1, val, \
"Unsuccessful CAS should not update atomic"); \
} \
\
@ -89,46 +89,46 @@
/* Fetch-add. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED); \
assert_##ta##_eq(val1, val, \
expect_##ta##_eq(val1, val, \
"Fetch-add should return previous value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
assert_##ta##_eq(val1 + val2, val, \
expect_##ta##_eq(val1 + val2, val, \
"Fetch-add should update atomic"); \
\
/* Fetch-sub. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED); \
assert_##ta##_eq(val1, val, \
expect_##ta##_eq(val1, val, \
"Fetch-sub should return previous value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
assert_##ta##_eq(val1 - val2, val, \
expect_##ta##_eq(val1 - val2, val, \
"Fetch-sub should update atomic"); \
\
/* Fetch-and. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED); \
assert_##ta##_eq(val1, val, \
expect_##ta##_eq(val1, val, \
"Fetch-and should return previous value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
assert_##ta##_eq(val1 & val2, val, \
expect_##ta##_eq(val1 & val2, val, \
"Fetch-and should update atomic"); \
\
/* Fetch-or. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED); \
assert_##ta##_eq(val1, val, \
expect_##ta##_eq(val1, val, \
"Fetch-or should return previous value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
assert_##ta##_eq(val1 | val2, val, \
expect_##ta##_eq(val1 | val2, val, \
"Fetch-or should update atomic"); \
\
/* Fetch-xor. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED); \
assert_##ta##_eq(val1, val, \
expect_##ta##_eq(val1, val, \
"Fetch-xor should return previous value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
assert_##ta##_eq(val1 ^ val2, val, \
expect_##ta##_eq(val1 ^ val2, val, \
"Fetch-xor should update atomic"); \
} while (0)

View File

@ -8,15 +8,15 @@ test_switch_background_thread_ctl(bool new_val) {
size_t sz = sizeof(bool);
e1 = new_val;
assert_d_eq(mallctl("background_thread", (void *)&e0, &sz,
expect_d_eq(mallctl("background_thread", (void *)&e0, &sz,
&e1, sz), 0, "Unexpected mallctl() failure");
assert_b_eq(e0, !e1,
expect_b_eq(e0, !e1,
"background_thread should be %d before.\n", !e1);
if (e1) {
assert_zu_gt(n_background_threads, 0,
expect_zu_gt(n_background_threads, 0,
"Number of background threads should be non zero.\n");
} else {
assert_zu_eq(n_background_threads, 0,
expect_zu_eq(n_background_threads, 0,
"Number of background threads should be zero.\n");
}
}
@ -27,15 +27,15 @@ test_repeat_background_thread_ctl(bool before) {
size_t sz = sizeof(bool);
e1 = before;
assert_d_eq(mallctl("background_thread", (void *)&e0, &sz,
expect_d_eq(mallctl("background_thread", (void *)&e0, &sz,
&e1, sz), 0, "Unexpected mallctl() failure");
assert_b_eq(e0, before,
expect_b_eq(e0, before,
"background_thread should be %d.\n", before);
if (e1) {
assert_zu_gt(n_background_threads, 0,
expect_zu_gt(n_background_threads, 0,
"Number of background threads should be non zero.\n");
} else {
assert_zu_eq(n_background_threads, 0,
expect_zu_eq(n_background_threads, 0,
"Number of background threads should be zero.\n");
}
}
@ -46,16 +46,16 @@ TEST_BEGIN(test_background_thread_ctl) {
bool e0, e1;
size_t sz = sizeof(bool);
assert_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz,
expect_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
assert_d_eq(mallctl("background_thread", (void *)&e1, &sz,
expect_d_eq(mallctl("background_thread", (void *)&e1, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
assert_b_eq(e0, e1,
expect_b_eq(e0, e1,
"Default and opt.background_thread does not match.\n");
if (e0) {
test_switch_background_thread_ctl(false);
}
assert_zu_eq(n_background_threads, 0,
expect_zu_eq(n_background_threads, 0,
"Number of background threads should be 0.\n");
for (unsigned i = 0; i < 4; i++) {
@ -80,7 +80,7 @@ TEST_BEGIN(test_background_thread_running) {
test_repeat_background_thread_ctl(false);
test_switch_background_thread_ctl(true);
assert_b_eq(info->state, background_thread_started,
expect_b_eq(info->state, background_thread_started,
"Background_thread did not start.\n");
nstime_t start;
@ -100,7 +100,7 @@ TEST_BEGIN(test_background_thread_running) {
nstime_t now;
nstime_init_update(&now);
nstime_subtract(&now, &start);
assert_u64_lt(nstime_sec(&now), 1000,
expect_u64_lt(nstime_sec(&now), 1000,
"Background threads did not run for 1000 seconds.");
sleep(1);
}

View File

@ -16,16 +16,16 @@ TEST_BEGIN(test_deferred) {
* approximation.
*/
for (unsigned i = 0; i < 10 * ncpus; i++) {
assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
"Failed to create arena");
}
bool enable = true;
size_t sz_b = sizeof(bool);
assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
"Failed to enable background threads");
enable = false;
assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
"Failed to disable background threads");
}
TEST_END
@ -36,43 +36,43 @@ TEST_BEGIN(test_max_background_threads) {
size_t max_n_thds;
size_t opt_max_n_thds;
size_t sz_m = sizeof(max_n_thds);
assert_d_eq(mallctl("opt.max_background_threads",
expect_d_eq(mallctl("opt.max_background_threads",
&opt_max_n_thds, &sz_m, NULL, 0), 0,
"Failed to get opt.max_background_threads");
assert_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL,
expect_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL,
0), 0, "Failed to get max background threads");
assert_zu_eq(opt_max_n_thds, max_n_thds,
expect_zu_eq(opt_max_n_thds, max_n_thds,
"max_background_threads and "
"opt.max_background_threads should match");
assert_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds,
expect_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds,
sz_m), 0, "Failed to set max background threads");
unsigned id;
size_t sz_u = sizeof(unsigned);
for (unsigned i = 0; i < 10 * ncpus; i++) {
assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
"Failed to create arena");
}
bool enable = true;
size_t sz_b = sizeof(bool);
assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
"Failed to enable background threads");
assert_zu_eq(n_background_threads, max_n_thds,
expect_zu_eq(n_background_threads, max_n_thds,
"Number of background threads should not change.\n");
size_t new_max_thds = max_n_thds - 1;
if (new_max_thds > 0) {
assert_d_eq(mallctl("max_background_threads", NULL, NULL,
expect_d_eq(mallctl("max_background_threads", NULL, NULL,
&new_max_thds, sz_m), 0,
"Failed to set max background threads");
assert_zu_eq(n_background_threads, new_max_thds,
expect_zu_eq(n_background_threads, new_max_thds,
"Number of background threads should decrease by 1.\n");
}
new_max_thds = 1;
assert_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds,
expect_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds,
sz_m), 0, "Failed to set max background threads");
assert_zu_eq(n_background_threads, new_max_thds,
expect_zu_eq(n_background_threads, new_max_thds,
"Number of background threads should be 1.\n");
}
TEST_END

View File

@ -37,21 +37,21 @@ TEST_BEGIN(test_base_hooks_default) {
if (config_stats) {
base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
&n_thp);
assert_zu_ge(allocated0, sizeof(base_t),
expect_zu_ge(allocated0, sizeof(base_t),
"Base header should count as allocated");
if (opt_metadata_thp == metadata_thp_always) {
assert_zu_gt(n_thp, 0,
expect_zu_gt(n_thp, 0,
"Base should have 1 THP at least.");
}
}
assert_ptr_not_null(base_alloc(tsdn, base, 42, 1),
expect_ptr_not_null(base_alloc(tsdn, base, 42, 1),
"Unexpected base_alloc() failure");
if (config_stats) {
base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
&n_thp);
assert_zu_ge(allocated1 - allocated0, 42,
expect_zu_ge(allocated1 - allocated0, 42,
"At least 42 bytes were allocated by base_alloc()");
}
@ -75,26 +75,26 @@ TEST_BEGIN(test_base_hooks_null) {
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
base = base_new(tsdn, 0, &hooks);
assert_ptr_not_null(base, "Unexpected base_new() failure");
expect_ptr_not_null(base, "Unexpected base_new() failure");
if (config_stats) {
base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
&n_thp);
assert_zu_ge(allocated0, sizeof(base_t),
expect_zu_ge(allocated0, sizeof(base_t),
"Base header should count as allocated");
if (opt_metadata_thp == metadata_thp_always) {
assert_zu_gt(n_thp, 0,
expect_zu_gt(n_thp, 0,
"Base should have 1 THP at least.");
}
}
assert_ptr_not_null(base_alloc(tsdn, base, 42, 1),
expect_ptr_not_null(base_alloc(tsdn, base, 42, 1),
"Unexpected base_alloc() failure");
if (config_stats) {
base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
&n_thp);
assert_zu_ge(allocated1 - allocated0, 42,
expect_zu_ge(allocated1 - allocated0, 42,
"At least 42 bytes were allocated by base_alloc()");
}
@ -121,8 +121,8 @@ TEST_BEGIN(test_base_hooks_not_null) {
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
did_alloc = false;
base = base_new(tsdn, 0, &hooks);
assert_ptr_not_null(base, "Unexpected base_new() failure");
assert_true(did_alloc, "Expected alloc");
expect_ptr_not_null(base, "Unexpected base_new() failure");
expect_true(did_alloc, "Expected alloc");
/*
* Check for tight packing at specified alignment under simple
@ -143,21 +143,21 @@ TEST_BEGIN(test_base_hooks_not_null) {
size_t align_ceil = ALIGNMENT_CEILING(alignment,
QUANTUM);
p = base_alloc(tsdn, base, 1, alignment);
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"Unexpected base_alloc() failure");
assert_ptr_eq(p,
expect_ptr_eq(p,
(void *)(ALIGNMENT_CEILING((uintptr_t)p,
alignment)), "Expected quantum alignment");
q = base_alloc(tsdn, base, alignment, alignment);
assert_ptr_not_null(q,
expect_ptr_not_null(q,
"Unexpected base_alloc() failure");
assert_ptr_eq((void *)((uintptr_t)p + align_ceil), q,
expect_ptr_eq((void *)((uintptr_t)p + align_ceil), q,
"Minimal allocation should take up %zu bytes",
align_ceil);
r = base_alloc(tsdn, base, 1, alignment);
assert_ptr_not_null(r,
expect_ptr_not_null(r,
"Unexpected base_alloc() failure");
assert_ptr_eq((void *)((uintptr_t)q + align_ceil), r,
expect_ptr_eq((void *)((uintptr_t)q + align_ceil), r,
"Minimal allocation should take up %zu bytes",
align_ceil);
}
@ -168,23 +168,23 @@ TEST_BEGIN(test_base_hooks_not_null) {
* that the first block's remaining space is considered for subsequent
* allocation.
*/
assert_zu_ge(edata_bsize_get(&base->blocks->edata), QUANTUM,
expect_zu_ge(edata_bsize_get(&base->blocks->edata), QUANTUM,
"Remainder insufficient for test");
/* Use up all but one quantum of block. */
while (edata_bsize_get(&base->blocks->edata) > QUANTUM) {
p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
assert_ptr_not_null(p, "Unexpected base_alloc() failure");
expect_ptr_not_null(p, "Unexpected base_alloc() failure");
}
r_exp = edata_addr_get(&base->blocks->edata);
assert_zu_eq(base->extent_sn_next, 1, "One extant block expected");
expect_zu_eq(base->extent_sn_next, 1, "One extant block expected");
q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
assert_ptr_not_null(q, "Unexpected base_alloc() failure");
assert_ptr_ne(q, r_exp, "Expected allocation from new block");
assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
expect_ptr_not_null(q, "Unexpected base_alloc() failure");
expect_ptr_ne(q, r_exp, "Expected allocation from new block");
expect_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
r = base_alloc(tsdn, base, QUANTUM, QUANTUM);
assert_ptr_not_null(r, "Unexpected base_alloc() failure");
assert_ptr_eq(r, r_exp, "Expected allocation from first block");
assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
expect_ptr_not_null(r, "Unexpected base_alloc() failure");
expect_ptr_eq(r, r_exp, "Expected allocation from first block");
expect_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
/*
* Check for proper alignment support when normal blocks are too small.
@ -199,9 +199,9 @@ TEST_BEGIN(test_base_hooks_not_null) {
for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
size_t alignment = alignments[i];
p = base_alloc(tsdn, base, QUANTUM, alignment);
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"Unexpected base_alloc() failure");
assert_ptr_eq(p,
expect_ptr_eq(p,
(void *)(ALIGNMENT_CEILING((uintptr_t)p,
alignment)), "Expected %zu-byte alignment",
alignment);
@ -211,11 +211,11 @@ TEST_BEGIN(test_base_hooks_not_null) {
called_dalloc = called_destroy = called_decommit = called_purge_lazy =
called_purge_forced = false;
base_delete(tsdn, base);
assert_true(called_dalloc, "Expected dalloc call");
assert_true(!called_destroy, "Unexpected destroy call");
assert_true(called_decommit, "Expected decommit call");
assert_true(called_purge_lazy, "Expected purge_lazy call");
assert_true(called_purge_forced, "Expected purge_forced call");
expect_true(called_dalloc, "Expected dalloc call");
expect_true(!called_destroy, "Unexpected destroy call");
expect_true(called_decommit, "Expected decommit call");
expect_true(called_purge_lazy, "Expected purge_lazy call");
expect_true(called_purge_forced, "Expected purge_forced call");
try_dalloc = true;
try_destroy = true;

View File

@ -13,7 +13,7 @@ thd_producer(void *varg) {
sz = sizeof(arena);
/* Remote arena. */
assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
for (i = 0; i < REMOTE_NALLOC / 2; i++) {
mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena));
@ -42,7 +42,7 @@ TEST_BEGIN(test_producer_consumer) {
/* Remote deallocation by the current thread. */
for (i = 0; i < NTHREADS; i++) {
for (unsigned j = 0; j < REMOTE_NALLOC; j++) {
assert_ptr_not_null(mem[i][j],
expect_ptr_not_null(mem[i][j],
"Unexpected remote allocation failure");
dallocx(mem[i][j], 0);
}
@ -65,12 +65,12 @@ thd_start(void *varg) {
edata = emap_edata_lookup(tsdn, &emap_global, ptr);
shard1 = edata_binshard_get(edata);
dallocx(ptr, 0);
assert_u_lt(shard1, 16, "Unexpected bin shard used");
expect_u_lt(shard1, 16, "Unexpected bin shard used");
edata = emap_edata_lookup(tsdn, &emap_global, ptr2);
shard2 = edata_binshard_get(edata);
dallocx(ptr2, 0);
assert_u_lt(shard2, 4, "Unexpected bin shard used");
expect_u_lt(shard2, 4, "Unexpected bin shard used");
if (shard1 > 0 || shard2 > 0) {
/* Triggered sharded bin usage. */
@ -98,7 +98,7 @@ TEST_BEGIN(test_bin_shard_mt) {
sharded = true;
}
}
assert_b_eq(sharded, true, "Did not find sharded bins");
expect_b_eq(sharded, true, "Did not find sharded bins");
}
TEST_END
@ -108,14 +108,14 @@ TEST_BEGIN(test_bin_shard) {
size_t miblen, miblen2, len;
len = sizeof(nbins);
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
"Unexpected mallctl() failure");
miblen = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
miblen2 = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0,
expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0,
"Unexpected mallctlnametomib() failure");
for (i = 0; i < nbins; i++) {
@ -124,22 +124,22 @@ TEST_BEGIN(test_bin_shard) {
mib[2] = i;
sz1 = sizeof(nshards);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1,
NULL, 0), 0, "Unexpected mallctlbymib() failure");
mib2[2] = i;
sz2 = sizeof(size);
assert_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2,
expect_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2,
NULL, 0), 0, "Unexpected mallctlbymib() failure");
if (size >= 1 && size <= 128) {
assert_u_eq(nshards, 16, "Unexpected nshards");
expect_u_eq(nshards, 16, "Unexpected nshards");
} else if (size == 256) {
assert_u_eq(nshards, 8, "Unexpected nshards");
expect_u_eq(nshards, 8, "Unexpected nshards");
} else if (size > 128 && size <= 512) {
assert_u_eq(nshards, 4, "Unexpected nshards");
expect_u_eq(nshards, 4, "Unexpected nshards");
} else {
assert_u_eq(nshards, 1, "Unexpected nshards");
expect_u_eq(nshards, 1, "Unexpected nshards");
}
}
}

View File

@ -6,27 +6,27 @@
unsigned i, pow2; \
t x; \
\
assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
expect_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
\
for (i = 0; i < sizeof(t) * 8; i++) { \
assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
expect_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
<< i, "Unexpected result"); \
} \
\
for (i = 2; i < sizeof(t) * 8; i++) { \
assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
((t)1) << i, "Unexpected result"); \
} \
\
for (i = 0; i < sizeof(t) * 8 - 1; i++) { \
assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
((t)1) << (i+1), "Unexpected result"); \
} \
\
for (pow2 = 1; pow2 < 25; pow2++) { \
for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
x++) { \
assert_##suf##_eq(pow2_ceil_##suf(x), \
expect_##suf##_eq(pow2_ceil_##suf(x), \
((t)1) << pow2, \
"Unexpected result, x=%"pri, x); \
} \
@ -49,35 +49,35 @@ TEST_BEGIN(test_pow2_ceil_zu) {
TEST_END
void
assert_lg_ceil_range(size_t input, unsigned answer) {
expect_lg_ceil_range(size_t input, unsigned answer) {
if (input == 1) {
assert_u_eq(0, answer, "Got %u as lg_ceil of 1", answer);
expect_u_eq(0, answer, "Got %u as lg_ceil of 1", answer);
return;
}
assert_zu_le(input, (ZU(1) << answer),
expect_zu_le(input, (ZU(1) << answer),
"Got %u as lg_ceil of %zu", answer, input);
assert_zu_gt(input, (ZU(1) << (answer - 1)),
expect_zu_gt(input, (ZU(1) << (answer - 1)),
"Got %u as lg_ceil of %zu", answer, input);
}
void
assert_lg_floor_range(size_t input, unsigned answer) {
expect_lg_floor_range(size_t input, unsigned answer) {
if (input == 1) {
assert_u_eq(0, answer, "Got %u as lg_floor of 1", answer);
expect_u_eq(0, answer, "Got %u as lg_floor of 1", answer);
return;
}
assert_zu_ge(input, (ZU(1) << answer),
expect_zu_ge(input, (ZU(1) << answer),
"Got %u as lg_floor of %zu", answer, input);
assert_zu_lt(input, (ZU(1) << (answer + 1)),
expect_zu_lt(input, (ZU(1) << (answer + 1)),
"Got %u as lg_floor of %zu", answer, input);
}
TEST_BEGIN(test_lg_ceil_floor) {
for (size_t i = 1; i < 10 * 1000 * 1000; i++) {
assert_lg_ceil_range(i, lg_ceil(i));
assert_lg_ceil_range(i, LG_CEIL(i));
assert_lg_floor_range(i, lg_floor(i));
assert_lg_floor_range(i, LG_FLOOR(i));
expect_lg_ceil_range(i, lg_ceil(i));
expect_lg_ceil_range(i, LG_CEIL(i));
expect_lg_floor_range(i, lg_floor(i));
expect_lg_floor_range(i, LG_FLOOR(i));
}
for (int i = 10; i < 8 * (1 << LG_SIZEOF_PTR) - 5; i++) {
for (size_t j = 0; j < (1 << 4); j++) {
@ -85,17 +85,17 @@ TEST_BEGIN(test_lg_ceil_floor) {
- j * ((size_t)1 << (i - 4));
size_t num2 = ((size_t)1 << i)
+ j * ((size_t)1 << (i - 4));
assert_zu_ne(num1, 0, "Invalid lg argument");
assert_zu_ne(num2, 0, "Invalid lg argument");
assert_lg_ceil_range(num1, lg_ceil(num1));
assert_lg_ceil_range(num1, LG_CEIL(num1));
assert_lg_ceil_range(num2, lg_ceil(num2));
assert_lg_ceil_range(num2, LG_CEIL(num2));
expect_zu_ne(num1, 0, "Invalid lg argument");
expect_zu_ne(num2, 0, "Invalid lg argument");
expect_lg_ceil_range(num1, lg_ceil(num1));
expect_lg_ceil_range(num1, LG_CEIL(num1));
expect_lg_ceil_range(num2, lg_ceil(num2));
expect_lg_ceil_range(num2, LG_CEIL(num2));
assert_lg_floor_range(num1, lg_floor(num1));
assert_lg_floor_range(num1, LG_FLOOR(num1));
assert_lg_floor_range(num2, lg_floor(num2));
assert_lg_floor_range(num2, LG_FLOOR(num2));
expect_lg_floor_range(num1, lg_floor(num1));
expect_lg_floor_range(num1, LG_FLOOR(num1));
expect_lg_floor_range(num2, lg_floor(num2));
expect_lg_floor_range(num2, LG_FLOOR(num2));
}
}
}

View File

@ -97,28 +97,28 @@ test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) {
bitmap_info_t binfo_dyn;
bitmap_info_init(&binfo_dyn, nbits);
assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
expect_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
"Unexpected difference between static and dynamic initialization, "
"nbits=%zu", nbits);
assert_zu_eq(binfo->nbits, binfo_dyn.nbits,
expect_zu_eq(binfo->nbits, binfo_dyn.nbits,
"Unexpected difference between static and dynamic initialization, "
"nbits=%zu", nbits);
#ifdef BITMAP_USE_TREE
assert_u_eq(binfo->nlevels, binfo_dyn.nlevels,
expect_u_eq(binfo->nlevels, binfo_dyn.nlevels,
"Unexpected difference between static and dynamic initialization, "
"nbits=%zu", nbits);
{
unsigned i;
for (i = 0; i < binfo->nlevels; i++) {
assert_zu_eq(binfo->levels[i].group_offset,
expect_zu_eq(binfo->levels[i].group_offset,
binfo_dyn.levels[i].group_offset,
"Unexpected difference between static and dynamic "
"initialization, nbits=%zu, level=%u", nbits, i);
}
}
#else
assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
expect_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
"Unexpected difference between static and dynamic initialization");
#endif
}
@ -140,9 +140,9 @@ static size_t
test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
size_t prev_size) {
size_t size = bitmap_size(binfo);
assert_zu_ge(size, (nbits >> 3),
expect_zu_ge(size, (nbits >> 3),
"Bitmap size is smaller than expected");
assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
expect_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
return size;
}
@ -170,17 +170,17 @@ static void
test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) {
size_t i;
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
bitmap_init(bitmap, binfo, false);
for (i = 0; i < nbits; i++) {
assert_false(bitmap_get(bitmap, binfo, i),
expect_false(bitmap_get(bitmap, binfo, i),
"Bit should be unset");
}
bitmap_init(bitmap, binfo, true);
for (i = 0; i < nbits; i++) {
assert_true(bitmap_get(bitmap, binfo, i), "Bit should be set");
expect_true(bitmap_get(bitmap, binfo, i), "Bit should be set");
}
free(bitmap);
@ -207,13 +207,13 @@ static void
test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) {
size_t i;
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
bitmap_init(bitmap, binfo, false);
for (i = 0; i < nbits; i++) {
bitmap_set(bitmap, binfo, i);
}
assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
free(bitmap);
}
@ -238,20 +238,20 @@ static void
test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) {
size_t i;
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
bitmap_init(bitmap, binfo, false);
for (i = 0; i < nbits; i++) {
bitmap_set(bitmap, binfo, i);
}
assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
for (i = 0; i < nbits; i++) {
bitmap_unset(bitmap, binfo, i);
}
for (i = 0; i < nbits; i++) {
bitmap_set(bitmap, binfo, i);
}
assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
free(bitmap);
}
@ -275,25 +275,25 @@ TEST_END
static void
test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
bitmap_init(bitmap, binfo, false);
/* Iteratively set bits starting at the beginning. */
for (size_t i = 0; i < nbits; i++) {
assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
"First unset bit should be just after previous first unset "
"bit");
assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
"First unset bit should be just after previous first unset "
"bit");
assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
"First unset bit should be just after previous first unset "
"bit");
assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
"First unset bit should be just after previous first unset "
"bit");
}
assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
/*
* Iteratively unset bits starting at the end, and verify that
@ -301,17 +301,17 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
*/
for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */
bitmap_unset(bitmap, binfo, i);
assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
"First unset bit should the bit previously unset");
assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
"First unset bit should the bit previously unset");
assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
"First unset bit should the bit previously unset");
assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
"First unset bit should the bit previously unset");
bitmap_unset(bitmap, binfo, i);
}
assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");
expect_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");
/*
* Iteratively set bits starting at the beginning, and verify that
@ -319,29 +319,29 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
*/
for (size_t i = 1; i < nbits; i++) {
bitmap_set(bitmap, binfo, i - 1);
assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
"First unset bit should be just after the bit previously "
"set");
assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
"First unset bit should be just after the bit previously "
"set");
assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
"First unset bit should be just after the bit previously "
"set");
assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
"First unset bit should be just after the bit previously "
"set");
bitmap_unset(bitmap, binfo, i);
}
assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1,
expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1,
"First unset bit should be the last bit");
assert_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1),
expect_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1),
nbits - 1, "First unset bit should be the last bit");
assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1,
expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1,
"First unset bit should be the last bit");
assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
"First unset bit should be the last bit");
assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
/*
* Bubble a "usu" pattern through the bitmap and verify that
@ -352,22 +352,22 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
bitmap_unset(bitmap, binfo, i);
bitmap_unset(bitmap, binfo, i+2);
if (i > 0) {
assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
"Unexpected first unset bit");
}
assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
"Unexpected first unset bit");
assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2,
expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2,
"Unexpected first unset bit");
assert_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2,
expect_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2,
"Unexpected first unset bit");
if (i + 3 < nbits) {
assert_zu_eq(bitmap_ffu(bitmap, binfo, i+3),
expect_zu_eq(bitmap_ffu(bitmap, binfo, i+3),
nbits, "Unexpected first unset bit");
}
assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
"Unexpected first unset bit");
assert_zu_eq(bitmap_sfu(bitmap, binfo), i+2,
expect_zu_eq(bitmap_sfu(bitmap, binfo), i+2,
"Unexpected first unset bit");
}
}
@ -382,20 +382,20 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
for (size_t i = 0; i < nbits-1; i++) {
bitmap_unset(bitmap, binfo, i);
if (i > 0) {
assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
"Unexpected first unset bit");
}
assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
"Unexpected first unset bit");
assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1,
expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1,
"Unexpected first unset bit");
assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1),
expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1),
nbits-1, "Unexpected first unset bit");
assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
"Unexpected first unset bit");
}
assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1,
expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1,
"Unexpected first unset bit");
}

View File

@ -14,7 +14,7 @@ static void test_write_cb(void *cbopaque, const char *s) {
size_t prev_test_write_len = test_write_len;
test_write_len += strlen(s); /* only increase the length */
arg_store = *(uint64_t *)cbopaque; /* only pass along the argument */
assert_zu_le(prev_test_write_len, test_write_len,
expect_zu_le(prev_test_write_len, test_write_len,
"Test write overflowed");
}
@ -22,7 +22,7 @@ static void test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
char s[UNIT_MAX + 1];
size_t n_unit, remain, i;
ssize_t unit;
assert_ptr_not_null(buf_writer->buf, "Buffer is null");
expect_ptr_not_null(buf_writer->buf, "Buffer is null");
write_cb_t *write_cb = buf_writer_get_write_cb(buf_writer);
void *cbopaque = buf_writer_get_cbopaque(buf_writer);
@ -41,7 +41,7 @@ static void test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
remain += unit;
if (remain > buf_writer->buf_size) {
/* Flushes should have happened. */
assert_u64_eq(arg_store, arg, "Call "
expect_u64_eq(arg_store, arg, "Call "
"back argument didn't get through");
remain %= buf_writer->buf_size;
if (remain == 0) {
@ -49,12 +49,12 @@ static void test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
remain += buf_writer->buf_size;
}
}
assert_zu_eq(test_write_len + remain, i * unit,
expect_zu_eq(test_write_len + remain, i * unit,
"Incorrect length after writing %zu strings"
" of length %zu", i, unit);
}
buf_writer_flush(buf_writer);
assert_zu_eq(test_write_len, n_unit * unit,
expect_zu_eq(test_write_len, n_unit * unit,
"Incorrect length after flushing at the end of"
" writing %zu strings of length %zu", n_unit, unit);
}
@ -65,7 +65,7 @@ static void test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
TEST_BEGIN(test_buf_write_static) {
buf_writer_t buf_writer;
tsdn_t *tsdn = tsdn_fetch();
assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
expect_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
test_buf, TEST_BUF_SIZE),
"buf_writer_init() should not encounter error on static buffer");
test_buf_writer_body(tsdn, &buf_writer);
@ -75,7 +75,7 @@ TEST_END
TEST_BEGIN(test_buf_write_dynamic) {
buf_writer_t buf_writer;
tsdn_t *tsdn = tsdn_fetch();
assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
expect_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
NULL, TEST_BUF_SIZE), "buf_writer_init() should not OOM");
test_buf_writer_body(tsdn, &buf_writer);
}
@ -84,13 +84,13 @@ TEST_END
TEST_BEGIN(test_buf_write_oom) {
buf_writer_t buf_writer;
tsdn_t *tsdn = tsdn_fetch();
assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
expect_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM");
assert_ptr_null(buf_writer.buf, "Buffer should be null");
expect_ptr_null(buf_writer.buf, "Buffer should be null");
write_cb_t *write_cb = buf_writer_get_write_cb(&buf_writer);
assert_ptr_eq(write_cb, test_write_cb, "Should use test_write_cb");
expect_ptr_eq(write_cb, test_write_cb, "Should use test_write_cb");
void *cbopaque = buf_writer_get_cbopaque(&buf_writer);
assert_ptr_eq(cbopaque, &arg, "Should use arg");
expect_ptr_eq(cbopaque, &arg, "Should use arg");
char s[UNIT_MAX + 1];
size_t n_unit, i;
@ -107,14 +107,14 @@ TEST_BEGIN(test_buf_write_oom) {
for (i = 1; i <= n_unit; ++i) {
arg = prng_lg_range_u64(&arg, 64);
write_cb(cbopaque, s);
assert_u64_eq(arg_store, arg,
expect_u64_eq(arg_store, arg,
"Call back argument didn't get through");
assert_zu_eq(test_write_len, i * unit,
expect_zu_eq(test_write_len, i * unit,
"Incorrect length after writing %zu strings"
" of length %zu", i, unit);
}
buf_writer_flush(&buf_writer);
assert_zu_eq(test_write_len, n_unit * unit,
expect_zu_eq(test_write_len, n_unit * unit,
"Incorrect length after flushing at the end of"
" writing %zu strings of length %zu", n_unit, unit);
}

View File

@ -8,52 +8,52 @@ TEST_BEGIN(test_cache_bin) {
/* Page aligned to make sure lowbits not overflowable. */
void **stack = mallocx(PAGE, MALLOCX_TCACHE_NONE | MALLOCX_ALIGN(PAGE));
assert_ptr_not_null(stack, "Unexpected mallocx failure");
expect_ptr_not_null(stack, "Unexpected mallocx failure");
/* Initialize to empty; bin 0. */
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get(0);
void **empty_position = stack + ncached_max;
bin->cur_ptr.ptr = empty_position;
bin->low_water_position = bin->cur_ptr.lowbits;
bin->full_position = (uint32_t)(uintptr_t)stack;
assert_ptr_eq(cache_bin_empty_position_get(bin, 0), empty_position,
expect_ptr_eq(cache_bin_empty_position_get(bin, 0), empty_position,
"Incorrect empty position");
/* Not using assert_zu etc on cache_bin_sz_t since it may change. */
assert_true(cache_bin_ncached_get(bin, 0) == 0, "Incorrect cache size");
/* Not using expect_zu etc on cache_bin_sz_t since it may change. */
expect_true(cache_bin_ncached_get(bin, 0) == 0, "Incorrect cache size");
bool success;
void *ret = cache_bin_alloc_easy(bin, &success, 0);
assert_false(success, "Empty cache bin should not alloc");
assert_true(cache_bin_low_water_get(bin, 0) == 0,
expect_false(success, "Empty cache bin should not alloc");
expect_true(cache_bin_low_water_get(bin, 0) == 0,
"Incorrect low water mark");
cache_bin_ncached_set(bin, 0, 0);
assert_ptr_eq(bin->cur_ptr.ptr, empty_position, "Bin should be empty");
expect_ptr_eq(bin->cur_ptr.ptr, empty_position, "Bin should be empty");
for (cache_bin_sz_t i = 1; i < ncached_max + 1; i++) {
success = cache_bin_dalloc_easy(bin, (void *)(uintptr_t)i);
assert_true(success && cache_bin_ncached_get(bin, 0) == i,
expect_true(success && cache_bin_ncached_get(bin, 0) == i,
"Bin dalloc failure");
}
success = cache_bin_dalloc_easy(bin, (void *)1);
assert_false(success, "Bin should be full");
assert_ptr_eq(bin->cur_ptr.ptr, stack, "Incorrect bin cur_ptr");
expect_false(success, "Bin should be full");
expect_ptr_eq(bin->cur_ptr.ptr, stack, "Incorrect bin cur_ptr");
cache_bin_ncached_set(bin, 0, ncached_max);
assert_ptr_eq(bin->cur_ptr.ptr, stack, "cur_ptr should not change");
expect_ptr_eq(bin->cur_ptr.ptr, stack, "cur_ptr should not change");
/* Emulate low water after refill. */
bin->low_water_position = bin->full_position;
for (cache_bin_sz_t i = ncached_max; i > 0; i--) {
ret = cache_bin_alloc_easy(bin, &success, 0);
cache_bin_sz_t ncached = cache_bin_ncached_get(bin, 0);
assert_true(success && ncached == i - 1,
expect_true(success && ncached == i - 1,
"Cache bin alloc failure");
assert_ptr_eq(ret, (void *)(uintptr_t)i, "Bin alloc failure");
assert_true(cache_bin_low_water_get(bin, 0) == ncached,
expect_ptr_eq(ret, (void *)(uintptr_t)i, "Bin alloc failure");
expect_true(cache_bin_low_water_get(bin, 0) == ncached,
"Incorrect low water mark");
}
ret = cache_bin_alloc_easy(bin, &success, 0);
assert_false(success, "Empty cache bin should not alloc.");
assert_ptr_eq(bin->cur_ptr.ptr, stack + ncached_max,
expect_false(success, "Empty cache bin should not alloc.");
expect_ptr_eq(bin->cur_ptr.ptr, stack + ncached_max,
"Bin should be empty");
}
TEST_END

View File

@ -6,11 +6,11 @@ TEST_BEGIN(test_new_delete) {
tsd = tsd_fetch();
assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
ckh_string_keycomp), "Unexpected ckh_new() error");
ckh_delete(tsd, &ckh);
assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
expect_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
ckh_pointer_keycomp), "Unexpected ckh_new() error");
ckh_delete(tsd, &ckh);
}
@ -30,16 +30,16 @@ TEST_BEGIN(test_count_insert_search_remove) {
tsd = tsd_fetch();
assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
ckh_string_keycomp), "Unexpected ckh_new() error");
assert_zu_eq(ckh_count(&ckh), 0,
expect_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
/* Insert. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
ckh_insert(tsd, &ckh, strs[i], strs[i]);
assert_zu_eq(ckh_count(&ckh), i+1,
expect_zu_eq(ckh_count(&ckh), i+1,
"ckh_count() should return %zu, but it returned %zu", i+1,
ckh_count(&ckh));
}
@ -57,17 +57,17 @@ TEST_BEGIN(test_count_insert_search_remove) {
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
assert_false(ckh_search(&ckh, strs[i], kp, vp),
expect_false(ckh_search(&ckh, strs[i], kp, vp),
"Unexpected ckh_search() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
i);
assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
i);
}
assert_true(ckh_search(&ckh, missing, NULL, NULL),
expect_true(ckh_search(&ckh, missing, NULL, NULL),
"Unexpected ckh_search() success");
/* Remove. */
@ -83,16 +83,16 @@ TEST_BEGIN(test_count_insert_search_remove) {
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
expect_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
"Unexpected ckh_remove() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
i);
assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
i);
assert_zu_eq(ckh_count(&ckh),
expect_zu_eq(ckh_count(&ckh),
sizeof(strs)/sizeof(const char *) - i - 1,
"ckh_count() should return %zu, but it returned %zu",
sizeof(strs)/sizeof(const char *) - i - 1,
@ -113,40 +113,40 @@ TEST_BEGIN(test_insert_iter_remove) {
tsd = tsd_fetch();
assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
expect_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
ckh_pointer_keycomp), "Unexpected ckh_new() error");
for (i = 0; i < NITEMS; i++) {
p[i] = mallocx(i+1, 0);
assert_ptr_not_null(p[i], "Unexpected mallocx() failure");
expect_ptr_not_null(p[i], "Unexpected mallocx() failure");
}
for (i = 0; i < NITEMS; i++) {
size_t j;
for (j = i; j < NITEMS; j++) {
assert_false(ckh_insert(tsd, &ckh, p[j], p[j]),
expect_false(ckh_insert(tsd, &ckh, p[j], p[j]),
"Unexpected ckh_insert() failure");
assert_false(ckh_search(&ckh, p[j], &q, &r),
expect_false(ckh_search(&ckh, p[j], &q, &r),
"Unexpected ckh_search() failure");
assert_ptr_eq(p[j], q, "Key pointer mismatch");
assert_ptr_eq(p[j], r, "Value pointer mismatch");
expect_ptr_eq(p[j], q, "Key pointer mismatch");
expect_ptr_eq(p[j], r, "Value pointer mismatch");
}
assert_zu_eq(ckh_count(&ckh), NITEMS,
expect_zu_eq(ckh_count(&ckh), NITEMS,
"ckh_count() should return %zu, but it returned %zu",
NITEMS, ckh_count(&ckh));
for (j = i + 1; j < NITEMS; j++) {
assert_false(ckh_search(&ckh, p[j], NULL, NULL),
expect_false(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() failure");
assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
expect_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
"Unexpected ckh_remove() failure");
assert_ptr_eq(p[j], q, "Key pointer mismatch");
assert_ptr_eq(p[j], r, "Value pointer mismatch");
assert_true(ckh_search(&ckh, p[j], NULL, NULL),
expect_ptr_eq(p[j], q, "Key pointer mismatch");
expect_ptr_eq(p[j], r, "Value pointer mismatch");
expect_true(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() success");
assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
expect_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
"Unexpected ckh_remove() success");
}
@ -159,11 +159,11 @@ TEST_BEGIN(test_insert_iter_remove) {
for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) {
size_t k;
assert_ptr_eq(q, r, "Key and val not equal");
expect_ptr_eq(q, r, "Key and val not equal");
for (k = 0; k < NITEMS; k++) {
if (p[k] == q) {
assert_false(seen[k],
expect_false(seen[k],
"Item %zu already seen", k);
seen[k] = true;
break;
@ -172,29 +172,29 @@ TEST_BEGIN(test_insert_iter_remove) {
}
for (j = 0; j < i + 1; j++) {
assert_true(seen[j], "Item %zu not seen", j);
expect_true(seen[j], "Item %zu not seen", j);
}
for (; j < NITEMS; j++) {
assert_false(seen[j], "Item %zu seen", j);
expect_false(seen[j], "Item %zu seen", j);
}
}
}
for (i = 0; i < NITEMS; i++) {
assert_false(ckh_search(&ckh, p[i], NULL, NULL),
expect_false(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() failure");
assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
expect_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
"Unexpected ckh_remove() failure");
assert_ptr_eq(p[i], q, "Key pointer mismatch");
assert_ptr_eq(p[i], r, "Value pointer mismatch");
assert_true(ckh_search(&ckh, p[i], NULL, NULL),
expect_ptr_eq(p[i], q, "Key pointer mismatch");
expect_ptr_eq(p[i], r, "Value pointer mismatch");
expect_true(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() success");
assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
expect_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
"Unexpected ckh_remove() success");
dallocx(p[i], 0);
}
assert_zu_eq(ckh_count(&ckh), 0,
expect_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu",
ZU(0), ckh_count(&ckh));
ckh_delete(tsd, &ckh);

View File

@ -16,24 +16,24 @@ TEST_BEGIN(test_counter_accum) {
trigger = counter_accum(tsd_tsdn(tsd), &c, increment);
accum += increment;
if (accum < interval) {
assert_b_eq(trigger, false, "Should not trigger");
expect_b_eq(trigger, false, "Should not trigger");
} else {
assert_b_eq(trigger, true, "Should have triggered");
expect_b_eq(trigger, true, "Should have triggered");
}
}
assert_b_eq(trigger, true, "Should have triggered");
expect_b_eq(trigger, true, "Should have triggered");
}
TEST_END
void
assert_counter_value(counter_accum_t *c, uint64_t v) {
expect_counter_value(counter_accum_t *c, uint64_t v) {
uint64_t accum;
#ifdef JEMALLOC_ATOMIC_U64
accum = atomic_load_u64(&(c->accumbytes), ATOMIC_RELAXED);
#else
accum = c->accumbytes;
#endif
assert_u64_eq(accum, v, "Counter value mismatch");
expect_u64_eq(accum, v, "Counter value mismatch");
}
TEST_BEGIN(test_counter_rollback) {
@ -47,34 +47,34 @@ TEST_BEGIN(test_counter_rollback) {
bool trigger;
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
assert_b_eq(trigger, false, "Should not trigger");
expect_b_eq(trigger, false, "Should not trigger");
counter_rollback(tsd_tsdn(tsd), &c, half_interval + 1);
assert_counter_value(&c, 0);
expect_counter_value(&c, 0);
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
assert_b_eq(trigger, false, "Should not trigger");
expect_b_eq(trigger, false, "Should not trigger");
counter_rollback(tsd_tsdn(tsd), &c, half_interval - 1);
assert_counter_value(&c, 1);
expect_counter_value(&c, 1);
counter_rollback(tsd_tsdn(tsd), &c, 1);
assert_counter_value(&c, 0);
expect_counter_value(&c, 0);
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
assert_b_eq(trigger, false, "Should not trigger");
expect_b_eq(trigger, false, "Should not trigger");
counter_rollback(tsd_tsdn(tsd), &c, 1);
assert_counter_value(&c, half_interval - 1);
expect_counter_value(&c, half_interval - 1);
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
assert_b_eq(trigger, false, "Should not trigger");
assert_counter_value(&c, interval - 1);
expect_b_eq(trigger, false, "Should not trigger");
expect_counter_value(&c, interval - 1);
trigger = counter_accum(tsd_tsdn(tsd), &c, 1);
assert_b_eq(trigger, true, "Should have triggered");
assert_counter_value(&c, 0);
expect_b_eq(trigger, true, "Should have triggered");
expect_counter_value(&c, 0);
trigger = counter_accum(tsd_tsdn(tsd), &c, interval + 1);
assert_b_eq(trigger, true, "Should have triggered");
assert_counter_value(&c, 1);
expect_b_eq(trigger, true, "Should have triggered");
expect_counter_value(&c, 1);
}
TEST_END
@ -114,7 +114,7 @@ TEST_BEGIN(test_counter_mt) {
thd_join(thds[i], &ret);
sum += (uintptr_t)ret;
}
assert_u64_eq(sum, N_THDS * N_ITER_THD / (interval / ITER_INCREMENT),
expect_u64_eq(sum, N_THDS * N_ITER_THD / (interval / ITER_INCREMENT),
"Incorrect number of triggers");
}
TEST_END

View File

@ -17,7 +17,7 @@ check_background_thread_enabled(void) {
if (ret == ENOENT) {
return false;
}
assert_d_eq(ret, 0, "Unexpected mallctl error");
expect_d_eq(ret, 0, "Unexpected mallctl error");
return enabled;
}
@ -39,22 +39,22 @@ static unsigned
do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
unsigned arena_ind;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
expect_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
0, "Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
(void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
"Unexpected mallctlbymib() failure");
assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
expect_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
0, "Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
(void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
"Unexpected mallctlbymib() failure");
@ -65,17 +65,17 @@ static void
do_arena_destroy(unsigned arena_ind) {
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
void
do_epoch(void) {
uint64_t epoch = 1;
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
}
@ -83,10 +83,10 @@ void
do_purge(unsigned arena_ind) {
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
@ -94,10 +94,10 @@ void
do_decay(unsigned arena_ind) {
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
@ -105,12 +105,12 @@ static uint64_t
get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
size_t mib[4];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
expect_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[2] = (size_t)arena_ind;
uint64_t npurge = 0;
size_t sz = sizeof(npurge);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
expect_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
return npurge;
}
@ -145,12 +145,12 @@ get_arena_pdirty(unsigned arena_ind) {
do_epoch();
size_t mib[4];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[2] = (size_t)arena_ind;
size_t pdirty;
size_t sz = sizeof(pdirty);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
return pdirty;
}
@ -160,12 +160,12 @@ get_arena_pmuzzy(unsigned arena_ind) {
do_epoch();
size_t mib[4];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[2] = (size_t)arena_ind;
size_t pmuzzy;
size_t sz = sizeof(pmuzzy);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
return pmuzzy;
}
@ -173,7 +173,7 @@ get_arena_pmuzzy(unsigned arena_ind) {
static void *
do_mallocx(size_t size, int flags) {
void *p = mallocx(size, flags);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
return p;
}
@ -193,7 +193,7 @@ TEST_BEGIN(test_decay_ticks) {
void *p;
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
0), 0, "Unexpected mallctl failure");
/* Set up a manually managed arena for test. */
@ -202,11 +202,11 @@ TEST_BEGIN(test_decay_ticks) {
/* Migrate to the new arena, and get the ticker. */
unsigned old_arena_ind;
size_t sz_arena_ind = sizeof(old_arena_ind);
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
&sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
"Unexpected mallctl() failure");
decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind);
assert_ptr_not_null(decay_ticker,
expect_ptr_not_null(decay_ticker,
"Unexpected failure getting decay ticker");
/*
@ -218,38 +218,38 @@ TEST_BEGIN(test_decay_ticks) {
/* malloc(). */
tick0 = ticker_read(decay_ticker);
p = malloc(large0);
assert_ptr_not_null(p, "Unexpected malloc() failure");
expect_ptr_not_null(p, "Unexpected malloc() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
expect_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
/* free(). */
tick0 = ticker_read(decay_ticker);
free(p);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
expect_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
/* calloc(). */
tick0 = ticker_read(decay_ticker);
p = calloc(1, large0);
assert_ptr_not_null(p, "Unexpected calloc() failure");
expect_ptr_not_null(p, "Unexpected calloc() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
expect_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
free(p);
/* posix_memalign(). */
tick0 = ticker_read(decay_ticker);
assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
"Unexpected posix_memalign() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
expect_u32_ne(tick1, tick0,
"Expected ticker to tick during posix_memalign()");
free(p);
/* aligned_alloc(). */
tick0 = ticker_read(decay_ticker);
p = aligned_alloc(sizeof(size_t), large0);
assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
expect_ptr_not_null(p, "Unexpected aligned_alloc() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
expect_u32_ne(tick1, tick0,
"Expected ticker to tick during aligned_alloc()");
free(p);
@ -257,20 +257,20 @@ TEST_BEGIN(test_decay_ticks) {
/* Allocate. */
tick0 = ticker_read(decay_ticker);
p = realloc(NULL, large0);
assert_ptr_not_null(p, "Unexpected realloc() failure");
expect_ptr_not_null(p, "Unexpected realloc() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
/* Reallocate. */
tick0 = ticker_read(decay_ticker);
p = realloc(p, large0);
assert_ptr_not_null(p, "Unexpected realloc() failure");
expect_ptr_not_null(p, "Unexpected realloc() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
/* Deallocate. */
tick0 = ticker_read(decay_ticker);
realloc(p, 0);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
/*
* Test the *allocx() APIs using large and small size classes, with
@ -288,40 +288,40 @@ TEST_BEGIN(test_decay_ticks) {
/* mallocx(). */
tick0 = ticker_read(decay_ticker);
p = mallocx(sz, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
expect_u32_ne(tick1, tick0,
"Expected ticker to tick during mallocx() (sz=%zu)",
sz);
/* rallocx(). */
tick0 = ticker_read(decay_ticker);
p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected rallocx() failure");
expect_ptr_not_null(p, "Unexpected rallocx() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
expect_u32_ne(tick1, tick0,
"Expected ticker to tick during rallocx() (sz=%zu)",
sz);
/* xallocx(). */
tick0 = ticker_read(decay_ticker);
xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
expect_u32_ne(tick1, tick0,
"Expected ticker to tick during xallocx() (sz=%zu)",
sz);
/* dallocx(). */
tick0 = ticker_read(decay_ticker);
dallocx(p, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
expect_u32_ne(tick1, tick0,
"Expected ticker to tick during dallocx() (sz=%zu)",
sz);
/* sdallocx(). */
p = mallocx(sz, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
tick0 = ticker_read(decay_ticker);
sdallocx(p, sz, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
expect_u32_ne(tick1, tick0,
"Expected ticker to tick during sdallocx() "
"(sz=%zu)", sz);
}
@ -338,11 +338,11 @@ TEST_BEGIN(test_decay_ticks) {
size_t tcache_max, sz_tcache_max;
sz_tcache_max = sizeof(tcache_max);
assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
expect_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
&sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
sz = sizeof(unsigned);
assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
expect_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
NULL, 0), 0, "Unexpected mallctl failure");
for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
@ -351,26 +351,26 @@ TEST_BEGIN(test_decay_ticks) {
/* tcache fill. */
tick0 = ticker_read(decay_ticker);
p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
expect_u32_ne(tick1, tick0,
"Expected ticker to tick during tcache fill "
"(sz=%zu)", sz);
/* tcache flush. */
dallocx(p, MALLOCX_TCACHE(tcache_ind));
tick0 = ticker_read(decay_ticker);
assert_d_eq(mallctl("tcache.flush", NULL, NULL,
expect_d_eq(mallctl("tcache.flush", NULL, NULL,
(void *)&tcache_ind, sizeof(unsigned)), 0,
"Unexpected mallctl failure");
tick1 = ticker_read(decay_ticker);
/* Will only tick if it's in tcache. */
if (sz <= tcache_max) {
assert_u32_ne(tick1, tick0,
expect_u32_ne(tick1, tick0,
"Expected ticker to tick during tcache "
"flush (sz=%zu)", sz);
} else {
assert_u32_eq(tick1, tick0,
expect_u32_eq(tick1, tick0,
"Unexpected ticker tick during tcache "
"flush (sz=%zu)", sz);
}
@ -417,7 +417,7 @@ decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
dallocx(p, flags);
if (config_stats) {
assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
expect_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
muzzy_npurge0, "Expected purging to occur");
}
#undef NINTERVALS
@ -442,7 +442,7 @@ TEST_BEGIN(test_decay_ticker) {
size_t tcache_max;
size_t sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL,
expect_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL,
0), 0, "Unexpected mallctl failure");
large = nallocx(tcache_max + 1, flags);
@ -467,7 +467,7 @@ TEST_BEGIN(test_decay_ticker) {
dallocx(ps[i], flags);
unsigned nupdates0 = nupdates_mock;
do_decay(arena_ind);
assert_u_gt(nupdates_mock, nupdates0,
expect_u_gt(nupdates_mock, nupdates0,
"Expected nstime_update() to be called");
}
@ -495,10 +495,10 @@ TEST_BEGIN(test_decay_nonmonotonic) {
unsigned i, nupdates0;
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
0), 0, "Unexpected mallctl failure");
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure");
do_epoch();
sz = sizeof(uint64_t);
@ -515,15 +515,15 @@ TEST_BEGIN(test_decay_nonmonotonic) {
for (i = 0; i < NPS; i++) {
ps[i] = mallocx(large0, flags);
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
expect_ptr_not_null(ps[i], "Unexpected mallocx() failure");
}
for (i = 0; i < NPS; i++) {
dallocx(ps[i], flags);
nupdates0 = nupdates_mock;
assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
"Unexpected arena.0.decay failure");
assert_u_gt(nupdates_mock, nupdates0,
expect_u_gt(nupdates_mock, nupdates0,
"Expected nstime_update() to be called");
}
@ -532,7 +532,7 @@ TEST_BEGIN(test_decay_nonmonotonic) {
npurge1 = get_arena_npurge(0);
if (config_stats) {
assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
expect_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
}
nstime_monotonic = nstime_monotonic_orig;
@ -545,16 +545,16 @@ TEST_BEGIN(test_decay_now) {
test_skip_if(check_background_thread_enabled());
unsigned arena_ind = do_arena_create(0, 0);
assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
/* Verify that dirty/muzzy pages never linger after deallocation. */
for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
size_t size = sizes[i];
generate_dirty(arena_ind, size);
assert_zu_eq(get_arena_pdirty(arena_ind), 0,
expect_zu_eq(get_arena_pdirty(arena_ind), 0,
"Unexpected dirty pages");
assert_zu_eq(get_arena_pmuzzy(arena_ind), 0,
expect_zu_eq(get_arena_pmuzzy(arena_ind), 0,
"Unexpected muzzy pages");
}
do_arena_destroy(arena_ind);
@ -566,8 +566,8 @@ TEST_BEGIN(test_decay_never) {
unsigned arena_ind = do_arena_create(-1, -1);
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
void *ptrs[sizeof(sizes)/sizeof(size_t)];
for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
@ -576,15 +576,15 @@ TEST_BEGIN(test_decay_never) {
/* Verify that each deallocation generates additional dirty pages. */
size_t pdirty_prev = get_arena_pdirty(arena_ind);
size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
expect_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
expect_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
dallocx(ptrs[i], flags);
size_t pdirty = get_arena_pdirty(arena_ind);
size_t pmuzzy = get_arena_pmuzzy(arena_ind);
assert_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
expect_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
pdirty_prev, "Expected dirty pages to increase.");
assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
expect_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
pdirty_prev = pdirty;
}
do_arena_destroy(arena_ind);

View File

@ -14,7 +14,7 @@ TEST_BEGIN(test_div_exhaustive) {
dividend += divisor) {
size_t quotient = div_compute(
&div_info, dividend);
assert_zu_eq(dividend, quotient * divisor,
expect_zu_eq(dividend, quotient * divisor,
"With divisor = %zu, dividend = %zu, "
"got quotient %zu", divisor, dividend, quotient);
}

View File

@ -58,14 +58,14 @@ forwarding_cb(void *buf_descriptor_v, const char *str) {
size_t written = malloc_snprintf(buf_descriptor->buf,
buf_descriptor->len, "%s", str);
assert_zu_eq(written, strlen(str), "Buffer overflow!");
expect_zu_eq(written, strlen(str), "Buffer overflow!");
buf_descriptor->buf += written;
buf_descriptor->len -= written;
assert_zu_gt(buf_descriptor->len, 0, "Buffer out of space!");
expect_zu_gt(buf_descriptor->len, 0, "Buffer out of space!");
}
static void
assert_emit_output(void (*emit_fn)(emitter_t *),
expect_emit_output(void (*emit_fn)(emitter_t *),
const char *expected_json_output,
const char *expected_json_compact_output,
const char *expected_table_output) {
@ -80,7 +80,7 @@ assert_emit_output(void (*emit_fn)(emitter_t *),
emitter_init(&emitter, emitter_output_json, &forwarding_cb,
&buf_descriptor);
(*emit_fn)(&emitter);
assert_str_eq(expected_json_output, buf, "json output failure");
expect_str_eq(expected_json_output, buf, "json output failure");
buf_descriptor.buf = buf;
buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
@ -89,7 +89,7 @@ assert_emit_output(void (*emit_fn)(emitter_t *),
emitter_init(&emitter, emitter_output_json_compact, &forwarding_cb,
&buf_descriptor);
(*emit_fn)(&emitter);
assert_str_eq(expected_json_compact_output, buf,
expect_str_eq(expected_json_compact_output, buf,
"compact json output failure");
buf_descriptor.buf = buf;
@ -99,7 +99,7 @@ assert_emit_output(void (*emit_fn)(emitter_t *),
emitter_init(&emitter, emitter_output_table, &forwarding_cb,
&buf_descriptor);
(*emit_fn)(&emitter);
assert_str_eq(expected_table_output, buf, "table output failure");
expect_str_eq(expected_table_output, buf, "table output failure");
}
static void
@ -505,7 +505,7 @@ static const char *table_row_table =
#define GENERATE_TEST(feature) \
TEST_BEGIN(test_##feature) { \
assert_emit_output(emit_##feature, feature##_json, \
expect_emit_output(emit_##feature, feature##_json, \
feature##_json_compact, feature##_table); \
} \
TEST_END

View File

@ -12,21 +12,21 @@ TEST_BEGIN(test_small_extent_size) {
*/
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
for (i = 0; i < nbins; i++) {
mib[2] = i;
sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz,
NULL, 0), 0, "Unexpected mallctlbymib failure");
assert_zu_eq(extent_size,
expect_zu_eq(extent_size,
sz_psz_quantize_floor(extent_size),
"Small extent quantization should be a no-op "
"(extent_size=%zu)", extent_size);
assert_zu_eq(extent_size,
expect_zu_eq(extent_size,
sz_psz_quantize_ceil(extent_size),
"Small extent quantization should be a no-op "
"(extent_size=%zu)", extent_size);
@ -47,42 +47,42 @@ TEST_BEGIN(test_large_extent_size) {
*/
sz = sizeof(bool);
assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious,
expect_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious,
&sz, NULL, 0), 0, "Unexpected mallctl failure");
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
0), 0, "Unexpected mallctl failure");
assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
for (i = 0; i < nlextents; i++) {
size_t lextent_size, extent_size, floor, ceil;
mib[2] = i;
sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size,
&sz, NULL, 0), 0, "Unexpected mallctlbymib failure");
extent_size = cache_oblivious ? lextent_size + PAGE :
lextent_size;
floor = sz_psz_quantize_floor(extent_size);
ceil = sz_psz_quantize_ceil(extent_size);
assert_zu_eq(extent_size, floor,
expect_zu_eq(extent_size, floor,
"Extent quantization should be a no-op for precise size "
"(lextent_size=%zu, extent_size=%zu)", lextent_size,
extent_size);
assert_zu_eq(extent_size, ceil,
expect_zu_eq(extent_size, ceil,
"Extent quantization should be a no-op for precise size "
"(lextent_size=%zu, extent_size=%zu)", lextent_size,
extent_size);
if (i > 0) {
assert_zu_eq(extent_size_prev,
expect_zu_eq(extent_size_prev,
sz_psz_quantize_floor(extent_size - PAGE),
"Floor should be a precise size");
if (extent_size_prev < ceil_prev) {
assert_zu_eq(ceil_prev, extent_size,
expect_zu_eq(ceil_prev, extent_size,
"Ceiling should be a precise size "
"(extent_size_prev=%zu, ceil_prev=%zu, "
"extent_size=%zu)", extent_size_prev,
@ -112,17 +112,17 @@ TEST_BEGIN(test_monotonic) {
floor = sz_psz_quantize_floor(extent_size);
ceil = sz_psz_quantize_ceil(extent_size);
assert_zu_le(floor, extent_size,
expect_zu_le(floor, extent_size,
"Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)",
floor, extent_size, ceil);
assert_zu_ge(ceil, extent_size,
expect_zu_ge(ceil, extent_size,
"Ceiling should be >= (floor=%zu, extent_size=%zu, "
"ceil=%zu)", floor, extent_size, ceil);
assert_zu_le(floor_prev, floor, "Floor should be monotonic "
expect_zu_le(floor_prev, floor, "Floor should be monotonic "
"(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)",
floor_prev, floor, extent_size, ceil);
assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
expect_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
"(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)",
floor, extent_size, ceil_prev, ceil);

View File

@ -36,25 +36,25 @@ TEST_BEGIN(test_fork) {
/* Set up a manually managed arena for test. */
unsigned arena_ind;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
/* Migrate to the new arena. */
unsigned old_arena_ind;
sz = sizeof(old_arena_ind);
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
(void *)&arena_ind, sizeof(arena_ind)), 0,
"Unexpected mallctl() failure");
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() failure");
expect_ptr_not_null(p, "Unexpected malloc() failure");
pid = fork();
free(p);
p = malloc(64);
assert_ptr_not_null(p, "Unexpected malloc() failure");
expect_ptr_not_null(p, "Unexpected malloc() failure");
free(p);
if (pid == -1) {

View File

@ -131,7 +131,7 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key) {
default: not_reached();
}
assert_u32_eq(computed, expected,
expect_u32_eq(computed, expected,
"Hash mismatch for %s(): expected %#x but got %#x",
hash_variant_string(variant), expected, computed);
}

View File

@ -70,10 +70,10 @@ set_args_raw(uintptr_t *args_raw, int nargs) {
}
static void
assert_args_raw(uintptr_t *args_raw_expected, int nargs) {
expect_args_raw(uintptr_t *args_raw_expected, int nargs) {
int cmp = memcmp(args_raw_expected, arg_args_raw,
sizeof(uintptr_t) * nargs);
assert_d_eq(cmp, 0, "Raw args mismatch");
expect_d_eq(cmp, 0, "Raw args mismatch");
}
static void
@ -132,34 +132,34 @@ TEST_BEGIN(test_hooks_basic) {
reset_args();
hook_invoke_alloc(hook_alloc_posix_memalign, (void *)222, 333,
args_raw);
assert_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer");
assert_d_eq((int)hook_alloc_posix_memalign, arg_type,
expect_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer");
expect_d_eq((int)hook_alloc_posix_memalign, arg_type,
"Passed wrong alloc type");
assert_ptr_eq((void *)222, arg_result, "Passed wrong result address");
assert_u64_eq(333, arg_result_raw, "Passed wrong result");
assert_args_raw(args_raw, 3);
expect_ptr_eq((void *)222, arg_result, "Passed wrong result address");
expect_u64_eq(333, arg_result_raw, "Passed wrong result");
expect_args_raw(args_raw, 3);
/* Dalloc */
reset_args();
hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw);
assert_d_eq((int)hook_dalloc_sdallocx, arg_type,
expect_d_eq((int)hook_dalloc_sdallocx, arg_type,
"Passed wrong dalloc type");
assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
assert_ptr_eq((void *)222, arg_address, "Passed wrong address");
assert_args_raw(args_raw, 3);
expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
expect_args_raw(args_raw, 3);
/* Expand */
reset_args();
hook_invoke_expand(hook_expand_xallocx, (void *)222, 333, 444, 555,
args_raw);
assert_d_eq((int)hook_expand_xallocx, arg_type,
expect_d_eq((int)hook_expand_xallocx, arg_type,
"Passed wrong expand type");
assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
assert_ptr_eq((void *)222, arg_address, "Passed wrong address");
assert_zu_eq(333, arg_old_usize, "Passed wrong old usize");
assert_zu_eq(444, arg_new_usize, "Passed wrong new usize");
assert_zu_eq(555, arg_result_raw, "Passed wrong result");
assert_args_raw(args_raw, 4);
expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
expect_zu_eq(333, arg_old_usize, "Passed wrong old usize");
expect_zu_eq(444, arg_new_usize, "Passed wrong new usize");
expect_zu_eq(555, arg_result_raw, "Passed wrong result");
expect_args_raw(args_raw, 4);
hook_remove(TSDN_NULL, handle);
}
@ -177,24 +177,24 @@ TEST_BEGIN(test_hooks_null) {
void *handle3 = hook_install(TSDN_NULL, &hooks3);
void *handle4 = hook_install(TSDN_NULL, &hooks4);
assert_ptr_ne(handle1, NULL, "Hook installation failed");
assert_ptr_ne(handle2, NULL, "Hook installation failed");
assert_ptr_ne(handle3, NULL, "Hook installation failed");
assert_ptr_ne(handle4, NULL, "Hook installation failed");
expect_ptr_ne(handle1, NULL, "Hook installation failed");
expect_ptr_ne(handle2, NULL, "Hook installation failed");
expect_ptr_ne(handle3, NULL, "Hook installation failed");
expect_ptr_ne(handle4, NULL, "Hook installation failed");
uintptr_t args_raw[4] = {10, 20, 30, 40};
call_count = 0;
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
assert_d_eq(call_count, 1, "Called wrong number of times");
expect_d_eq(call_count, 1, "Called wrong number of times");
call_count = 0;
hook_invoke_dalloc(hook_dalloc_free, NULL, args_raw);
assert_d_eq(call_count, 1, "Called wrong number of times");
expect_d_eq(call_count, 1, "Called wrong number of times");
call_count = 0;
hook_invoke_expand(hook_expand_realloc, NULL, 0, 0, 0, args_raw);
assert_d_eq(call_count, 1, "Called wrong number of times");
expect_d_eq(call_count, 1, "Called wrong number of times");
hook_remove(TSDN_NULL, handle1);
hook_remove(TSDN_NULL, handle2);
@ -206,16 +206,16 @@ TEST_END
TEST_BEGIN(test_hooks_remove) {
hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
expect_ptr_ne(handle, NULL, "Hook installation failed");
call_count = 0;
uintptr_t args_raw[4] = {10, 20, 30, 40};
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
assert_d_eq(call_count, 1, "Hook not invoked");
expect_d_eq(call_count, 1, "Hook not invoked");
call_count = 0;
hook_remove(TSDN_NULL, handle);
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL);
assert_d_eq(call_count, 0, "Hook invoked after removal");
expect_d_eq(call_count, 0, "Hook invoked after removal");
}
TEST_END
@ -224,7 +224,7 @@ TEST_BEGIN(test_hooks_alloc_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
expect_ptr_ne(handle, NULL, "Hook installation failed");
/* Stop malloc from being optimized away. */
volatile int err;
@ -233,69 +233,69 @@ TEST_BEGIN(test_hooks_alloc_simple) {
/* malloc */
reset();
ptr = malloc(1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
free(ptr);
/* posix_memalign */
reset();
err = posix_memalign((void **)&ptr, 1024, 1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_posix_memalign,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_posix_memalign,
"Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw,
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument");
assert_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument");
expect_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument");
expect_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument");
free(ptr);
/* aligned_alloc */
reset();
ptr = aligned_alloc(1024, 1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_aligned_alloc,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_aligned_alloc,
"Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
/* calloc */
reset();
ptr = calloc(11, 13);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument");
expect_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument");
free(ptr);
/* memalign */
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
reset();
ptr = memalign(1024, 1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
#endif /* JEMALLOC_OVERRIDE_MEMALIGN */
@ -303,27 +303,27 @@ TEST_BEGIN(test_hooks_alloc_simple) {
#ifdef JEMALLOC_OVERRIDE_VALLOC
reset();
ptr = valloc(1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
free(ptr);
#endif /* JEMALLOC_OVERRIDE_VALLOC */
/* mallocx */
reset();
ptr = mallocx(1, MALLOCX_LG_ALIGN(10));
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1],
expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1],
"Wrong flags");
free(ptr);
@ -335,7 +335,7 @@ TEST_BEGIN(test_hooks_dalloc_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
@ -343,35 +343,35 @@ TEST_BEGIN(test_hooks_dalloc_simple) {
reset();
ptr = malloc(1);
free(ptr);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
/* dallocx() */
reset();
ptr = malloc(1);
dallocx(ptr, MALLOCX_TCACHE_NONE);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1],
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1],
"Wrong raw arg");
/* sdallocx() */
reset();
ptr = malloc(1);
sdallocx(ptr, 1, MALLOCX_TCACHE_NONE);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg");
assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2],
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg");
expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2],
"Wrong raw arg");
hook_remove(TSDN_NULL, handle);
@ -382,7 +382,7 @@ TEST_BEGIN(test_hooks_expand_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
@ -390,17 +390,17 @@ TEST_BEGIN(test_hooks_expand_simple) {
reset();
ptr = malloc(1);
size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
assert_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
assert_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
assert_u64_eq(new_usize, arg_result_raw, "Wrong result");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
assert_u64_eq(100, arg_args_raw[1], "Wrong arg");
assert_u64_eq(200, arg_args_raw[2], "Wrong arg");
assert_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
expect_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
expect_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
expect_u64_eq(new_usize, arg_result_raw, "Wrong result");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
expect_u64_eq(100, arg_args_raw[1], "Wrong arg");
expect_u64_eq(200, arg_args_raw[2], "Wrong arg");
expect_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");
hook_remove(TSDN_NULL, handle);
}
@ -410,21 +410,21 @@ TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) {
hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
&test_expand_hook, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
/* realloc(NULL, size) as malloc */
reset();
ptr = realloc(NULL, 1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
/* realloc(ptr, 0) as free */
@ -432,29 +432,29 @@ TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) {
ptr = malloc(1);
reset();
realloc(ptr, 0);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_dalloc_realloc,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_dalloc_realloc,
"Wrong hook type");
assert_ptr_eq(ptr, arg_address,
expect_ptr_eq(ptr, arg_address,
"Wrong pointer freed");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0],
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0],
"Wrong raw arg");
assert_u64_eq((uintptr_t)0, arg_args_raw[1],
expect_u64_eq((uintptr_t)0, arg_args_raw[1],
"Wrong raw arg");
}
/* realloc(NULL, 0) as malloc(0) */
reset();
ptr = realloc(NULL, 0);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument");
expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument");
free(ptr);
hook_remove(TSDN_NULL, handle);
@ -467,7 +467,7 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
&test_expand_hook, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
void *volatile ptr2;
@ -476,16 +476,16 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
ptr = malloc(129);
reset();
ptr2 = ralloc(ptr, 130, flags);
assert_ptr_eq(ptr, ptr2, "Small realloc moved");
expect_ptr_eq(ptr, ptr2, "Small realloc moved");
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, expand_type, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong address");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, expand_type, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong address");
expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument");
free(ptr);
/*
@ -499,19 +499,19 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
ptr = ralloc(ptr2, 2 * 1024 * 1024, flags);
/* ptr is the new address, ptr2 is the old address. */
if (ptr == ptr2) {
assert_d_eq(call_count, 1, "Hook not called");
assert_d_eq(arg_type, expand_type, "Wrong hook type");
expect_d_eq(call_count, 1, "Hook not called");
expect_d_eq(arg_type, expand_type, "Wrong hook type");
} else {
assert_d_eq(call_count, 2, "Wrong hooks called");
assert_ptr_eq(ptr, arg_result, "Wrong address");
assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
expect_d_eq(call_count, 2, "Wrong hooks called");
expect_ptr_eq(ptr, arg_result, "Wrong address");
expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
}
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_ptr_eq(ptr2, arg_address, "Wrong address");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_ptr_eq(ptr2, arg_address, "Wrong address");
expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
expect_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
"Wrong argument");
free(ptr);
@ -519,34 +519,34 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
ptr = malloc(8);
reset();
ptr2 = ralloc(ptr, 128, flags);
assert_ptr_ne(ptr, ptr2, "Small realloc didn't move");
expect_ptr_ne(ptr, ptr2, "Small realloc didn't move");
assert_d_eq(call_count, 2, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong address");
assert_ptr_eq(ptr2, arg_result, "Wrong address");
assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
expect_d_eq(call_count, 2, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong address");
expect_ptr_eq(ptr2, arg_result, "Wrong address");
expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument");
free(ptr2);
/* Realloc with move, large. */
ptr = malloc(1);
reset();
ptr2 = ralloc(ptr, 2 * 1024 * 1024, flags);
assert_ptr_ne(ptr, ptr2, "Large realloc didn't move");
expect_ptr_ne(ptr, ptr2, "Large realloc didn't move");
assert_d_eq(call_count, 2, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong address");
assert_ptr_eq(ptr2, arg_result, "Wrong address");
assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
expect_d_eq(call_count, 2, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong address");
expect_ptr_eq(ptr2, arg_result, "Wrong address");
expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
"Wrong argument");
free(ptr2);

View File

@ -11,37 +11,37 @@ TEST_BEGIN(huge_bind_thread) {
size_t sz = sizeof(unsigned);
/* Bind to a manual arena. */
assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
"Failed to create arena");
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena1,
expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena1,
sizeof(arena1)), 0, "Fail to bind thread");
void *ptr = mallocx(HUGE_SZ, 0);
assert_ptr_not_null(ptr, "Fail to allocate huge size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
expect_ptr_not_null(ptr, "Fail to allocate huge size");
expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
assert_u_eq(arena1, arena2, "Wrong arena used after binding");
expect_u_eq(arena1, arena2, "Wrong arena used after binding");
dallocx(ptr, 0);
/* Switch back to arena 0. */
test_skip_if(have_percpu_arena &&
PERCPU_ARENA_ENABLED(opt_percpu_arena));
arena2 = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena2,
expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena2,
sizeof(arena2)), 0, "Fail to bind thread");
ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
assert_u_eq(arena2, 0, "Wrong arena used after binding");
expect_u_eq(arena2, 0, "Wrong arena used after binding");
dallocx(ptr, MALLOCX_TCACHE_NONE);
/* Then huge allocation should use the huge arena. */
ptr = mallocx(HUGE_SZ, 0);
assert_ptr_not_null(ptr, "Fail to allocate huge size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
expect_ptr_not_null(ptr, "Fail to allocate huge size");
expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
assert_u_ne(arena2, 0, "Wrong arena used after binding");
assert_u_ne(arena1, arena2, "Wrong arena used after binding");
expect_u_ne(arena2, 0, "Wrong arena used after binding");
expect_u_ne(arena1, arena2, "Wrong arena used after binding");
dallocx(ptr, 0);
}
TEST_END
@ -50,22 +50,22 @@ TEST_BEGIN(huge_mallocx) {
unsigned arena1, arena2;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
"Failed to create arena");
void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1));
assert_ptr_not_null(huge, "Fail to allocate huge size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge,
expect_ptr_not_null(huge, "Fail to allocate huge size");
expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge,
sizeof(huge)), 0, "Unexpected mallctl() failure");
assert_u_eq(arena1, arena2, "Wrong arena used for mallocx");
expect_u_eq(arena1, arena2, "Wrong arena used for mallocx");
dallocx(huge, MALLOCX_ARENA(arena1));
void *huge2 = mallocx(HUGE_SZ, 0);
assert_ptr_not_null(huge, "Fail to allocate huge size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2,
expect_ptr_not_null(huge, "Fail to allocate huge size");
expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2,
sizeof(huge2)), 0, "Unexpected mallctl() failure");
assert_u_ne(arena1, arena2,
expect_u_ne(arena1, arena2,
"Huge allocation should not come from the manual arena.");
assert_u_ne(arena2, 0,
expect_u_ne(arena2, 0,
"Huge allocation should not come from the arena 0.");
dallocx(huge2, 0);
}
@ -75,25 +75,25 @@ TEST_BEGIN(huge_allocation) {
unsigned arena1, arena2;
void *ptr = mallocx(HUGE_SZ, 0);
assert_ptr_not_null(ptr, "Fail to allocate huge size");
expect_ptr_not_null(ptr, "Fail to allocate huge size");
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
0, "Unexpected mallctl() failure");
assert_u_gt(arena1, 0, "Huge allocation should not come from arena 0");
expect_u_gt(arena1, 0, "Huge allocation should not come from arena 0");
dallocx(ptr, 0);
ptr = mallocx(HUGE_SZ >> 1, 0);
assert_ptr_not_null(ptr, "Fail to allocate half huge size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
expect_ptr_not_null(ptr, "Fail to allocate half huge size");
expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
assert_u_ne(arena1, arena2, "Wrong arena used for half huge");
expect_u_ne(arena1, arena2, "Wrong arena used for half huge");
dallocx(ptr, 0);
ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(ptr, "Fail to allocate small size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
expect_ptr_not_null(ptr, "Fail to allocate small size");
expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
assert_u_ne(arena1, arena2,
expect_u_ne(arena1, arena2,
"Huge and small should be from different arenas");
dallocx(ptr, 0);
}

View File

@ -1,11 +1,11 @@
#include "test/jemalloc_test.h"
#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \
assert_d_eq(mallctl("experimental.utilization." node, \
expect_d_eq(mallctl("experimental.utilization." node, \
a, b, c, d), EINVAL, "Should fail when " why_inval); \
assert_zu_eq(out_sz, out_sz_ref, \
expect_zu_eq(out_sz, out_sz_ref, \
"Output size touched when given invalid arguments"); \
assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \
expect_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \
"Output content touched when given invalid arguments"); \
} while (0)
@ -15,11 +15,11 @@
TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval)
#define TEST_UTIL_VALID(node) do { \
assert_d_eq(mallctl("experimental.utilization." node, \
expect_d_eq(mallctl("experimental.utilization." node, \
out, &out_sz, in, in_sz), 0, \
"Should return 0 on correct arguments"); \
assert_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \
assert_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \
expect_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \
expect_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \
"Output content should be changed"); \
} while (0)
@ -43,11 +43,11 @@ TEST_BEGIN(test_query) {
void *out_ref = mallocx(out_sz, 0);
size_t out_sz_ref = out_sz;
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"test pointer allocation failed");
assert_ptr_not_null(out,
expect_ptr_not_null(out,
"test output allocation failed");
assert_ptr_not_null(out_ref,
expect_ptr_not_null(out_ref,
"test reference output allocation failed");
#define SLABCUR_READ(out) (*(void **)out)
@ -83,60 +83,60 @@ TEST_BEGIN(test_query) {
/* Examine output for valid call */
TEST_UTIL_VALID("query");
assert_zu_le(sz, SIZE_READ(out),
expect_zu_le(sz, SIZE_READ(out),
"Extent size should be at least allocation size");
assert_zu_eq(SIZE_READ(out) & (PAGE - 1), 0,
expect_zu_eq(SIZE_READ(out) & (PAGE - 1), 0,
"Extent size should be a multiple of page size");
if (sz <= SC_SMALL_MAXCLASS) {
assert_zu_le(NFREE_READ(out), NREGS_READ(out),
expect_zu_le(NFREE_READ(out), NREGS_READ(out),
"Extent free count exceeded region count");
assert_zu_le(NREGS_READ(out), SIZE_READ(out),
expect_zu_le(NREGS_READ(out), SIZE_READ(out),
"Extent region count exceeded size");
assert_zu_ne(NREGS_READ(out), 0,
expect_zu_ne(NREGS_READ(out), 0,
"Extent region count must be positive");
assert_true(NFREE_READ(out) == 0 || (SLABCUR_READ(out)
expect_true(NFREE_READ(out) == 0 || (SLABCUR_READ(out)
!= NULL && SLABCUR_READ(out) <= p),
"Allocation should follow first fit principle");
if (config_stats) {
assert_zu_le(BIN_NFREE_READ(out),
expect_zu_le(BIN_NFREE_READ(out),
BIN_NREGS_READ(out),
"Bin free count exceeded region count");
assert_zu_ne(BIN_NREGS_READ(out), 0,
expect_zu_ne(BIN_NREGS_READ(out), 0,
"Bin region count must be positive");
assert_zu_le(NFREE_READ(out),
expect_zu_le(NFREE_READ(out),
BIN_NFREE_READ(out),
"Extent free count exceeded bin free count");
assert_zu_le(NREGS_READ(out),
expect_zu_le(NREGS_READ(out),
BIN_NREGS_READ(out),
"Extent region count exceeded "
"bin region count");
assert_zu_eq(BIN_NREGS_READ(out)
expect_zu_eq(BIN_NREGS_READ(out)
% NREGS_READ(out), 0,
"Bin region count isn't a multiple of "
"extent region count");
assert_zu_le(
expect_zu_le(
BIN_NFREE_READ(out) - NFREE_READ(out),
BIN_NREGS_READ(out) - NREGS_READ(out),
"Free count in other extents in the bin "
"exceeded region count in other extents "
"in the bin");
assert_zu_le(NREGS_READ(out) - NFREE_READ(out),
expect_zu_le(NREGS_READ(out) - NFREE_READ(out),
BIN_NREGS_READ(out) - BIN_NFREE_READ(out),
"Extent utilized count exceeded "
"bin utilized count");
}
} else {
assert_zu_eq(NFREE_READ(out), 0,
expect_zu_eq(NFREE_READ(out), 0,
"Extent free count should be zero");
assert_zu_eq(NREGS_READ(out), 1,
expect_zu_eq(NREGS_READ(out), 1,
"Extent region count should be one");
assert_ptr_null(SLABCUR_READ(out),
expect_ptr_null(SLABCUR_READ(out),
"Current slab must be null for large size classes");
if (config_stats) {
assert_zu_eq(BIN_NFREE_READ(out), 0,
expect_zu_eq(BIN_NFREE_READ(out), 0,
"Bin free count must be zero for "
"large sizes");
assert_zu_eq(BIN_NREGS_READ(out), 0,
expect_zu_eq(BIN_NREGS_READ(out), 0,
"Bin region count must be zero for "
"large sizes");
}
@ -174,8 +174,8 @@ TEST_BEGIN(test_batch) {
size_t out_ref[] = {-1, -1, -1, -1, -1, -1};
size_t out_sz_ref = out_sz;
assert_ptr_not_null(p, "test pointer allocation failed");
assert_ptr_not_null(q, "test pointer allocation failed");
expect_ptr_not_null(p, "test pointer allocation failed");
expect_ptr_not_null(q, "test pointer allocation failed");
/* Test invalid argument(s) errors */
TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz,
@ -201,7 +201,7 @@ TEST_BEGIN(test_batch) {
/* Examine output for valid calls */
#define TEST_EQUAL_REF(i, message) \
assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message)
expect_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message)
#define NFREE_READ(out, i) out[(i) * 3]
#define NREGS_READ(out, i) out[(i) * 3 + 1]
@ -210,21 +210,21 @@ TEST_BEGIN(test_batch) {
out_sz_ref = out_sz /= 2;
in_sz /= 2;
TEST_UTIL_BATCH_VALID;
assert_zu_le(sz, SIZE_READ(out, 0),
expect_zu_le(sz, SIZE_READ(out, 0),
"Extent size should be at least allocation size");
assert_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0,
expect_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0,
"Extent size should be a multiple of page size");
if (sz <= SC_SMALL_MAXCLASS) {
assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0),
expect_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0),
"Extent free count exceeded region count");
assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0),
expect_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0),
"Extent region count exceeded size");
assert_zu_ne(NREGS_READ(out, 0), 0,
expect_zu_ne(NREGS_READ(out, 0), 0,
"Extent region count must be positive");
} else {
assert_zu_eq(NFREE_READ(out, 0), 0,
expect_zu_eq(NFREE_READ(out, 0), 0,
"Extent free count should be zero");
assert_zu_eq(NREGS_READ(out, 0), 1,
expect_zu_eq(NREGS_READ(out, 0), 1,
"Extent region count should be one");
}
TEST_EQUAL_REF(1,
@ -236,15 +236,15 @@ TEST_BEGIN(test_batch) {
TEST_UTIL_BATCH_VALID;
TEST_EQUAL_REF(0, "Statistics should be stable across calls");
if (sz <= SC_SMALL_MAXCLASS) {
assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1),
expect_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1),
"Extent free count exceeded region count");
} else {
assert_zu_eq(NFREE_READ(out, 0), 0,
expect_zu_eq(NFREE_READ(out, 0), 0,
"Extent free count should be zero");
}
assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1),
expect_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1),
"Extent region count should be same for same region size");
assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1),
expect_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1),
"Extent size should be same for same region size");
#undef SIZE_READ
@ -261,7 +261,7 @@ TEST_END
int
main(void) {
assert_zu_lt(SC_SMALL_MAXCLASS, TEST_MAX_SIZE,
expect_zu_lt(SC_SMALL_MAXCLASS, TEST_MAX_SIZE,
"Test case cannot cover large classes");
return test(test_query, test_batch);
}

View File

@ -20,7 +20,7 @@ arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) {
arena_dalloc_junk_small_orig(ptr, bin_info);
for (i = 0; i < bin_info->reg_size; i++) {
assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
expect_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, bin_info->reg_size);
}
@ -35,7 +35,7 @@ large_dalloc_junk_intercept(void *ptr, size_t usize) {
large_dalloc_junk_orig(ptr, usize);
for (i = 0; i < usize; i++) {
assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
expect_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, usize);
}
@ -68,22 +68,22 @@ test_junk(size_t sz_min, size_t sz_max) {
sz_prev = 0;
s = (uint8_t *)mallocx(sz_min, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
expect_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
assert_u_eq(s[0], 'a',
expect_u_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
assert_u_eq(s[sz_prev-1], 'a',
expect_u_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
if (opt_junk_alloc) {
assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
expect_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
"Newly allocated byte %zu/%zu isn't "
"junk-filled", i, sz);
}
@ -94,14 +94,14 @@ test_junk(size_t sz_min, size_t sz_max) {
uint8_t *t;
watch_junking(s);
t = (uint8_t *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)t,
expect_ptr_not_null((void *)t,
"Unexpected rallocx() failure");
assert_zu_ge(sallocx(t, 0), sz+1,
expect_zu_ge(sallocx(t, 0), sz+1,
"Unexpectedly small rallocx() result");
if (!background_thread_enabled()) {
assert_ptr_ne(s, t,
expect_ptr_ne(s, t,
"Unexpected in-place rallocx()");
assert_true(!opt_junk_free || saw_junking,
expect_true(!opt_junk_free || saw_junking,
"Expected region of size %zu to be "
"junk-filled", sz);
}
@ -111,7 +111,7 @@ test_junk(size_t sz_min, size_t sz_max) {
watch_junking(s);
dallocx(s, 0);
assert_true(!opt_junk_free || saw_junking,
expect_true(!opt_junk_free || saw_junking,
"Expected region of size %zu to be junk-filled", sz);
if (opt_junk_free) {

View File

@ -30,7 +30,7 @@ expect_no_logging(const char *names) {
count++;
log_do_end(log_l2_a)
}
assert_d_eq(count, 0, "Disabled logging not ignored!");
expect_d_eq(count, 0, "Disabled logging not ignored!");
}
TEST_BEGIN(test_log_disabled) {
@ -61,7 +61,7 @@ TEST_BEGIN(test_log_enabled_direct) {
count++;
log_do_end(log_l1)
}
assert_d_eq(count, 10, "Mis-logged!");
expect_d_eq(count, 10, "Mis-logged!");
count = 0;
update_log_var_names("l1.a");
@ -70,7 +70,7 @@ TEST_BEGIN(test_log_enabled_direct) {
count++;
log_do_end(log_l1_a)
}
assert_d_eq(count, 10, "Mis-logged!");
expect_d_eq(count, 10, "Mis-logged!");
count = 0;
update_log_var_names("l1.a|abc|l2|def");
@ -83,7 +83,7 @@ TEST_BEGIN(test_log_enabled_direct) {
count++;
log_do_end(log_l2)
}
assert_d_eq(count, 20, "Mis-logged!");
expect_d_eq(count, 20, "Mis-logged!");
}
TEST_END
@ -133,7 +133,7 @@ TEST_BEGIN(test_log_enabled_indirect) {
log_do_end(log_l2_b_b)
}
assert_d_eq(count, 40, "Mis-logged!");
expect_d_eq(count, 40, "Mis-logged!");
}
TEST_END
@ -155,7 +155,7 @@ TEST_BEGIN(test_log_enabled_global) {
count++;
log_do_end(log_l2_a_a)
}
assert_d_eq(count, 20, "Mis-logged!");
expect_d_eq(count, 20, "Mis-logged!");
}
TEST_END
@ -171,7 +171,7 @@ TEST_BEGIN(test_logs_if_no_init) {
count++;
log_do_end(l)
}
assert_d_eq(count, 0, "Logging shouldn't happen if not initialized.");
expect_d_eq(count, 0, "Logging shouldn't happen if not initialized.");
}
TEST_END

View File

@ -7,25 +7,25 @@ TEST_BEGIN(test_mallctl_errors) {
uint64_t epoch;
size_t sz;
assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
expect_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
"mallctl() should return ENOENT for non-existent names");
assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
expect_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
EPERM, "mallctl() should return EPERM on attempt to write "
"read-only value");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
sizeof(epoch)-1), EINVAL,
"mallctl() should return EINVAL for input size mismatch");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
sizeof(epoch)+1), EINVAL,
"mallctl() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
}
TEST_END
@ -35,7 +35,7 @@ TEST_BEGIN(test_mallctlnametomib_errors) {
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
expect_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
"mallctlnametomib() should return ENOENT for non-existent names");
}
TEST_END
@ -47,30 +47,30 @@ TEST_BEGIN(test_mallctlbymib_errors) {
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("version", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("version", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
"attempt to write read-only value");
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
sizeof(epoch)-1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
sizeof(epoch)+1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
}
@ -81,25 +81,25 @@ TEST_BEGIN(test_mallctl_read_write) {
size_t sz = sizeof(old_epoch);
/* Blind. */
assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read. */
assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Write. */
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read+write. */
assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
(void *)&new_epoch, sizeof(new_epoch)), 0,
"Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
}
TEST_END
@ -109,10 +109,10 @@ TEST_BEGIN(test_mallctlnametomib_short_mib) {
miblen = 3;
mib[3] = 42;
assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_zu_eq(miblen, 3, "Unexpected mib output length");
assert_zu_eq(mib[3], 42,
expect_zu_eq(miblen, 3, "Unexpected mib output length");
expect_zu_eq(mib[3], 42,
"mallctlnametomib() wrote past the end of the input mib");
}
TEST_END
@ -121,10 +121,10 @@ TEST_BEGIN(test_mallctl_config) {
#define TEST_MALLCTL_CONFIG(config, t) do { \
t oldval; \
size_t sz = sizeof(oldval); \
assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
expect_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
NULL, 0), 0, "Unexpected mallctl() failure"); \
assert_b_eq(oldval, config_##config, "Incorrect config value"); \
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
expect_b_eq(oldval, config_##config, "Incorrect config value"); \
expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
TEST_MALLCTL_CONFIG(cache_oblivious, bool);
@ -152,9 +152,9 @@ TEST_BEGIN(test_mallctl_opt) {
int expected = config_##config ? 0 : ENOENT; \
int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \
0); \
assert_d_eq(result, expected, \
expect_d_eq(result, expected, \
"Unexpected mallctl() result for opt."#opt); \
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
TEST_MALLCTL_OPT(bool, abort, always);
@ -203,18 +203,18 @@ TEST_BEGIN(test_manpage_example) {
size_t len, miblen;
len = sizeof(nbins);
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
"Unexpected mallctl() failure");
miblen = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
for (i = 0; i < nbins; i++) {
size_t bin_size;
mib[2] = i;
len = sizeof(bin_size);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
NULL, 0), 0, "Unexpected mallctlbymib() failure");
/* Do something with bin_size... */
}
@ -226,9 +226,9 @@ TEST_BEGIN(test_tcache_none) {
/* Allocate p and q. */
void *p0 = mallocx(42, 0);
assert_ptr_not_null(p0, "Unexpected mallocx() failure");
expect_ptr_not_null(p0, "Unexpected mallocx() failure");
void *q = mallocx(42, 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
expect_ptr_not_null(q, "Unexpected mallocx() failure");
/* Deallocate p and q, but bypass the tcache for q. */
dallocx(p0, 0);
@ -236,8 +236,8 @@ TEST_BEGIN(test_tcache_none) {
/* Make sure that tcache-based allocation returns p, not q. */
void *p1 = mallocx(42, 0);
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region");
expect_ptr_not_null(p1, "Unexpected mallocx() failure");
expect_ptr_eq(p0, p1, "Expected tcache to allocate cached region");
/* Clean up. */
dallocx(p1, MALLOCX_TCACHE_NONE);
@ -258,25 +258,25 @@ TEST_BEGIN(test_tcache) {
/* Create tcaches. */
for (i = 0; i < NTCACHES; i++) {
sz = sizeof(unsigned);
assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
0), 0, "Unexpected mallctl() failure, i=%u", i);
}
/* Exercise tcache ID recycling. */
for (i = 0; i < NTCACHES; i++) {
assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
(void *)&tis[i], sizeof(unsigned)), 0,
"Unexpected mallctl() failure, i=%u", i);
}
for (i = 0; i < NTCACHES; i++) {
sz = sizeof(unsigned);
assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
0), 0, "Unexpected mallctl() failure, i=%u", i);
}
/* Flush empty tcaches. */
for (i = 0; i < NTCACHES; i++) {
assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
i);
}
@ -284,12 +284,12 @@ TEST_BEGIN(test_tcache) {
/* Cache some allocations. */
for (i = 0; i < NTCACHES; i++) {
ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
i);
dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
expect_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
i);
dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
}
@ -298,9 +298,9 @@ TEST_BEGIN(test_tcache) {
for (i = 0; i < NTCACHES; i++) {
void *p0 = ps[i];
ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
i);
assert_ptr_eq(ps[i], p0,
expect_ptr_eq(ps[i], p0,
"Expected mallocx() to allocate cached region, i=%u", i);
}
@ -308,9 +308,9 @@ TEST_BEGIN(test_tcache) {
for (i = 0; i < NTCACHES; i++) {
void *q0 = qs[i];
qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
expect_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
i);
assert_ptr_eq(qs[i], q0,
expect_ptr_eq(qs[i], q0,
"Expected rallocx() to allocate cached region, i=%u", i);
/* Avoid undefined behavior in case of test failure. */
if (qs[i] == NULL) {
@ -323,14 +323,14 @@ TEST_BEGIN(test_tcache) {
/* Flush some non-empty tcaches. */
for (i = 0; i < NTCACHES/2; i++) {
assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
i);
}
/* Destroy tcaches. */
for (i = 0; i < NTCACHES; i++) {
assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
(void *)&tis[i], sizeof(unsigned)), 0,
"Unexpected mallctl() failure, i=%u", i);
}
@ -342,32 +342,32 @@ TEST_BEGIN(test_thread_arena) {
const char *opa;
size_t sz = sizeof(opa);
assert_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0,
expect_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
if (opt_oversize_threshold != 0) {
narenas--;
}
assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
expect_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
if (strcmp(opa, "disabled") == 0) {
new_arena_ind = narenas - 1;
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
(void *)&new_arena_ind, sizeof(unsigned)), 0,
"Unexpected mallctl() failure");
new_arena_ind = 0;
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
(void *)&new_arena_ind, sizeof(unsigned)), 0,
"Unexpected mallctl() failure");
} else {
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1;
if (old_arena_ind != new_arena_ind) {
assert_d_eq(mallctl("thread.arena",
expect_d_eq(mallctl("thread.arena",
(void *)&old_arena_ind, &sz, (void *)&new_arena_ind,
sizeof(unsigned)), EPERM, "thread.arena ctl "
"should not be allowed with percpu arena");
@ -384,32 +384,32 @@ TEST_BEGIN(test_arena_i_initialized) {
bool initialized;
sz = sizeof(narenas);
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
for (i = 0; i < narenas; i++) {
mib[1] = i;
sz = sizeof(initialized);
assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
}
mib[1] = MALLCTL_ARENAS_ALL;
sz = sizeof(initialized);
assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_true(initialized,
expect_true(initialized,
"Merged arena statistics should always be initialized");
/* Equivalent to the above but using mallctl() directly. */
sz = sizeof(initialized);
assert_d_eq(mallctl(
expect_d_eq(mallctl(
"arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized",
(void *)&initialized, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_true(initialized,
expect_true(initialized,
"Merged arena statistics should always be initialized");
}
TEST_END
@ -418,17 +418,17 @@ TEST_BEGIN(test_arena_i_dirty_decay_ms) {
ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
size_t sz = sizeof(ssize_t);
assert_d_eq(mallctl("arena.0.dirty_decay_ms",
expect_d_eq(mallctl("arena.0.dirty_decay_ms",
(void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
dirty_decay_ms = -2;
assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
(void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
dirty_decay_ms = 0x7fffffff;
assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
(void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
@ -437,10 +437,10 @@ TEST_BEGIN(test_arena_i_dirty_decay_ms) {
dirty_decay_ms++) {
ssize_t old_dirty_decay_ms;
assert_d_eq(mallctl("arena.0.dirty_decay_ms",
expect_d_eq(mallctl("arena.0.dirty_decay_ms",
(void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
"Unexpected old arena.0.dirty_decay_ms");
}
}
@ -450,17 +450,17 @@ TEST_BEGIN(test_arena_i_muzzy_decay_ms) {
ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
size_t sz = sizeof(ssize_t);
assert_d_eq(mallctl("arena.0.muzzy_decay_ms",
expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
(void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
muzzy_decay_ms = -2;
assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
(void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
muzzy_decay_ms = 0x7fffffff;
assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
(void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
@ -469,10 +469,10 @@ TEST_BEGIN(test_arena_i_muzzy_decay_ms) {
muzzy_decay_ms++) {
ssize_t old_muzzy_decay_ms;
assert_d_eq(mallctl("arena.0.muzzy_decay_ms",
expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
(void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
"Unexpected old arena.0.muzzy_decay_ms");
}
}
@ -484,19 +484,19 @@ TEST_BEGIN(test_arena_i_purge) {
size_t mib[3];
size_t miblen = 3;
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = narenas;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
mib[1] = MALLCTL_ARENAS_ALL;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
TEST_END
@ -507,19 +507,19 @@ TEST_BEGIN(test_arena_i_decay) {
size_t mib[3];
size_t miblen = 3;
assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = narenas;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
mib[1] = MALLCTL_ARENAS_ALL;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
TEST_END
@ -531,40 +531,40 @@ TEST_BEGIN(test_arena_i_dss) {
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
"Unexpected mallctlnametomib() error");
dss_prec_new = "disabled";
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
(void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
"Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
expect_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
(void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
expect_str_ne(dss_prec_old, "primary",
"Unexpected value for dss precedence");
mib[1] = narenas_total_get();
dss_prec_new = "disabled";
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
(void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
"Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
expect_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
(void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
expect_str_ne(dss_prec_old, "primary",
"Unexpected value for dss precedence");
}
TEST_END
@ -576,43 +576,43 @@ TEST_BEGIN(test_arena_i_retain_grow_limit) {
bool retain_enabled;
size_t sz = sizeof(retain_enabled);
assert_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0),
expect_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
test_skip_if(!retain_enabled);
sz = sizeof(default_limit);
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen),
expect_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen),
0, "Unexpected mallctlnametomib() error");
assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(default_limit, SC_LARGE_MAXCLASS,
expect_zu_eq(default_limit, SC_LARGE_MAXCLASS,
"Unexpected default for retain_grow_limit");
new_limit = PAGE - 1;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
sizeof(new_limit)), EFAULT, "Unexpected mallctl() success");
new_limit = PAGE + 1;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
sizeof(new_limit)), 0, "Unexpected mallctl() failure");
assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(old_limit, PAGE,
expect_zu_eq(old_limit, PAGE,
"Unexpected value for retain_grow_limit");
/* Expect grow less than psize class 10. */
new_limit = sz_pind2sz(10) - 1;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
sizeof(new_limit)), 0, "Unexpected mallctl() failure");
assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(old_limit, sz_pind2sz(9),
expect_zu_eq(old_limit, sz_pind2sz(9),
"Unexpected value for retain_grow_limit");
/* Restore to default. */
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit,
sizeof(default_limit)), 0, "Unexpected mallctl() failure");
}
TEST_END
@ -621,17 +621,17 @@ TEST_BEGIN(test_arenas_dirty_decay_ms) {
ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
size_t sz = sizeof(ssize_t);
assert_d_eq(mallctl("arenas.dirty_decay_ms",
expect_d_eq(mallctl("arenas.dirty_decay_ms",
(void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
dirty_decay_ms = -2;
assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
(void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
dirty_decay_ms = 0x7fffffff;
assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
(void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
"Expected mallctl() failure");
@ -640,10 +640,10 @@ TEST_BEGIN(test_arenas_dirty_decay_ms) {
dirty_decay_ms++) {
ssize_t old_dirty_decay_ms;
assert_d_eq(mallctl("arenas.dirty_decay_ms",
expect_d_eq(mallctl("arenas.dirty_decay_ms",
(void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
"Unexpected old arenas.dirty_decay_ms");
}
}
@ -653,17 +653,17 @@ TEST_BEGIN(test_arenas_muzzy_decay_ms) {
ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
size_t sz = sizeof(ssize_t);
assert_d_eq(mallctl("arenas.muzzy_decay_ms",
expect_d_eq(mallctl("arenas.muzzy_decay_ms",
(void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
muzzy_decay_ms = -2;
assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
(void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
muzzy_decay_ms = 0x7fffffff;
assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
(void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
"Expected mallctl() failure");
@ -672,10 +672,10 @@ TEST_BEGIN(test_arenas_muzzy_decay_ms) {
muzzy_decay_ms++) {
ssize_t old_muzzy_decay_ms;
assert_d_eq(mallctl("arenas.muzzy_decay_ms",
expect_d_eq(mallctl("arenas.muzzy_decay_ms",
(void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
"Unexpected old arenas.muzzy_decay_ms");
}
}
@ -685,9 +685,9 @@ TEST_BEGIN(test_arenas_constants) {
#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
expect_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
0), 0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
expect_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
@ -703,9 +703,9 @@ TEST_BEGIN(test_arenas_bin_constants) {
#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
expect_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
NULL, 0), 0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
expect_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size);
@ -722,9 +722,9 @@ TEST_BEGIN(test_arenas_lextent_constants) {
#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \
expect_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \
&sz, NULL, 0), 0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
expect_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
@ -738,16 +738,16 @@ TEST_BEGIN(test_arenas_create) {
unsigned narenas_before, arena, narenas_after;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
assert_u_eq(narenas_before+1, narenas_after,
expect_u_eq(narenas_before+1, narenas_after,
"Unexpected number of arenas before versus after extension");
assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
expect_u_eq(arena, narenas_after-1, "Unexpected arena index");
}
TEST_END
@ -756,13 +756,13 @@ TEST_BEGIN(test_arenas_lookup) {
void *ptr;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
assert_ptr_not_null(ptr, "Unexpected mallocx() failure");
assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
expect_ptr_not_null(ptr, "Unexpected mallocx() failure");
expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
0, "Unexpected mallctl() failure");
assert_u_eq(arena, arena1, "Unexpected arena index");
expect_u_eq(arena, arena1, "Unexpected arena index");
dallocx(ptr, 0);
}
TEST_END
@ -778,18 +778,18 @@ TEST_BEGIN(test_prof_active) {
size_t len = sizeof(bool);
active = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, len), ENOENT,
expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), ENOENT,
"Setting prof_active to true should fail when opt_prof is off");
old = true;
assert_d_eq(mallctl("prof.active", &old, &len, &active, len), ENOENT,
expect_d_eq(mallctl("prof.active", &old, &len, &active, len), ENOENT,
"Setting prof_active to true should fail when opt_prof is off");
assert_true(old, "old valud should not be touched when mallctl fails");
expect_true(old, "old valud should not be touched when mallctl fails");
active = false;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, len), 0,
expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), 0,
"Setting prof_active to false should succeed when opt_prof is off");
assert_d_eq(mallctl("prof.active", &old, &len, &active, len), 0,
expect_d_eq(mallctl("prof.active", &old, &len, &active, len), 0,
"Setting prof_active to false should succeed when opt_prof is off");
assert_false(old, "prof_active should be false when opt_prof is off");
expect_false(old, "prof_active should be false when opt_prof is off");
}
TEST_END
@ -797,7 +797,7 @@ TEST_BEGIN(test_stats_arenas) {
#define TEST_STATS_ARENAS(t, name) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
expect_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
NULL, 0), 0, "Unexpected mallctl() failure"); \
} while (0)
@ -831,21 +831,21 @@ TEST_BEGIN(test_hooks) {
size_t sz = sizeof(handle);
int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
sizeof(hooks));
assert_d_eq(err, 0, "Hook installation failed");
assert_ptr_ne(handle, NULL, "Hook installation gave null handle");
expect_d_eq(err, 0, "Hook installation failed");
expect_ptr_ne(handle, NULL, "Hook installation gave null handle");
void *ptr = mallocx(1, 0);
assert_true(hook_called, "Alloc hook not called");
expect_true(hook_called, "Alloc hook not called");
hook_called = false;
free(ptr);
assert_true(hook_called, "Free hook not called");
expect_true(hook_called, "Free hook not called");
err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
sizeof(handle));
assert_d_eq(err, 0, "Hook removal failed");
expect_d_eq(err, 0, "Hook removal failed");
hook_called = false;
ptr = mallocx(1, 0);
free(ptr);
assert_false(hook_called, "Hook called after removal");
expect_false(hook_called, "Hook called after removal");
}
TEST_END
@ -861,27 +861,27 @@ TEST_BEGIN(test_hooks_exhaustion) {
handle = NULL;
err = mallctl("experimental.hooks.install", &handle, &sz,
&hooks, sizeof(hooks));
assert_d_eq(err, 0, "Error installation hooks");
assert_ptr_ne(handle, NULL, "Got NULL handle");
expect_d_eq(err, 0, "Error installation hooks");
expect_ptr_ne(handle, NULL, "Got NULL handle");
handles[i] = handle;
}
err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
sizeof(hooks));
assert_d_eq(err, EAGAIN, "Should have failed hook installation");
expect_d_eq(err, EAGAIN, "Should have failed hook installation");
for (int i = 0; i < HOOK_MAX; i++) {
err = mallctl("experimental.hooks.remove", NULL, NULL,
&handles[i], sizeof(handles[i]));
assert_d_eq(err, 0, "Hook removal failed");
expect_d_eq(err, 0, "Hook removal failed");
}
/* Insertion failed, but then we removed some; it should work now. */
handle = NULL;
err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
sizeof(hooks));
assert_d_eq(err, 0, "Hook insertion failed");
assert_ptr_ne(handle, NULL, "Got NULL handle");
expect_d_eq(err, 0, "Hook insertion failed");
expect_ptr_ne(handle, NULL, "Got NULL handle");
err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
sizeof(handle));
assert_d_eq(err, 0, "Hook removal failed");
expect_d_eq(err, 0, "Hook removal failed");
}
TEST_END
@ -901,25 +901,25 @@ TEST_BEGIN(test_thread_idle) {
bool tcache_enabled = false;
sz = sizeof(tcache_enabled);
err = mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL, 0);
assert_d_eq(err, 0, "");
expect_d_eq(err, 0, "");
test_skip_if(!tcache_enabled);
size_t tcache_max;
sz = sizeof(tcache_max);
err = mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, 0);
assert_d_eq(err, 0, "");
expect_d_eq(err, 0, "");
test_skip_if(tcache_max == 0);
unsigned arena_ind;
sz = sizeof(arena_ind);
err = mallctl("thread.arena", &arena_ind, &sz, NULL, 0);
assert_d_eq(err, 0, "");
expect_d_eq(err, 0, "");
/* We're going to do an allocation of size 1, which we know is small. */
size_t mib[5];
miblen = sizeof(mib)/sizeof(mib[0]);
err = mallctlnametomib("stats.arenas.0.small.ndalloc", mib, &miblen);
assert_d_eq(err, 0, "");
expect_d_eq(err, 0, "");
mib[2] = arena_ind;
/*
@ -931,25 +931,25 @@ TEST_BEGIN(test_thread_idle) {
uint64_t epoch;
err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
assert_d_eq(err, 0, "");
expect_d_eq(err, 0, "");
uint64_t small_dalloc_pre_idle;
sz = sizeof(small_dalloc_pre_idle);
err = mallctlbymib(mib, miblen, &small_dalloc_pre_idle, &sz, NULL, 0);
assert_d_eq(err, 0, "");
expect_d_eq(err, 0, "");
err = mallctl("thread.idle", NULL, NULL, NULL, 0);
assert_d_eq(err, 0, "");
expect_d_eq(err, 0, "");
err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
assert_d_eq(err, 0, "");
expect_d_eq(err, 0, "");
uint64_t small_dalloc_post_idle;
sz = sizeof(small_dalloc_post_idle);
err = mallctlbymib(mib, miblen, &small_dalloc_post_idle, &sz, NULL, 0);
assert_d_eq(err, 0, "");
expect_d_eq(err, 0, "");
assert_u64_lt(small_dalloc_pre_idle, small_dalloc_post_idle,
expect_u64_lt(small_dalloc_pre_idle, small_dalloc_post_idle,
"Purge didn't flush the tcache");
}
TEST_END

View File

@ -4,9 +4,9 @@ TEST_BEGIN(test_malloc_strtoumax_no_endptr) {
int err;
set_errno(0);
assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
expect_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
err = get_errno();
assert_d_eq(err, 0, "Unexpected failure");
expect_d_eq(err, 0, "Unexpected failure");
}
TEST_END
@ -89,14 +89,14 @@ TEST_BEGIN(test_malloc_strtoumax) {
set_errno(0);
result = malloc_strtoumax(test->input, &remainder, test->base);
err = get_errno();
assert_d_eq(err, test->expected_errno,
expect_d_eq(err, test->expected_errno,
"Expected errno %s for \"%s\", base %d",
test->expected_errno_name, test->input, test->base);
assert_str_eq(remainder, test->expected_remainder,
expect_str_eq(remainder, test->expected_remainder,
"Unexpected remainder for \"%s\", base %d",
test->input, test->base);
if (err == 0) {
assert_ju_eq(result, test->expected_x,
expect_ju_eq(result, test->expected_x,
"Unexpected result for \"%s\", base %d",
test->input, test->base);
}
@ -111,10 +111,10 @@ TEST_BEGIN(test_malloc_snprintf_truncated) {
size_t len;
#define TEST(expected_str_untruncated, ...) do { \
result = malloc_snprintf(buf, len, __VA_ARGS__); \
assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
expect_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
"Unexpected string inequality (\"%s\" vs \"%s\")", \
buf, expected_str_untruncated); \
assert_zu_eq(result, strlen(expected_str_untruncated), \
expect_zu_eq(result, strlen(expected_str_untruncated), \
"Unexpected result"); \
} while (0)
@ -142,8 +142,8 @@ TEST_BEGIN(test_malloc_snprintf) {
size_t result;
#define TEST(expected_str, ...) do { \
result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
assert_str_eq(buf, expected_str, "Unexpected output"); \
assert_zu_eq(result, strlen(expected_str), "Unexpected result");\
expect_str_eq(buf, expected_str, "Unexpected output"); \
expect_zu_eq(result, strlen(expected_str), "Unexpected result");\
} while (0)
TEST("hello", "hello");

View File

@ -41,7 +41,7 @@ TEST_BEGIN(test_ln_gamma_factorial) {
/* exp(ln_gamma(x)) == (x-1)! for integer x. */
for (x = 1; x <= 21; x++) {
assert_true(double_eq_rel(exp(ln_gamma(x)),
expect_true(double_eq_rel(exp(ln_gamma(x)),
(double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect factorial result for x=%u", x);
}
@ -192,7 +192,7 @@ TEST_BEGIN(test_ln_gamma_misc) {
for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
double x = (double)i * 0.25;
assert_true(double_eq_rel(ln_gamma(x),
expect_true(double_eq_rel(ln_gamma(x),
ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect ln_gamma result for i=%u", i);
}
@ -242,7 +242,7 @@ TEST_BEGIN(test_pt_norm) {
for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
double p = (double)i * 0.01;
assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
expect_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect pt_norm result for i=%u", i);
}
@ -295,7 +295,7 @@ TEST_BEGIN(test_pt_chi2) {
double ln_gamma_df = ln_gamma(df * 0.5);
for (j = 1; j < 100; j += 7) {
double p = (double)j * 0.01;
assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
expect_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect pt_chi2 result for i=%u, j=%u", i, j);
e++;
@ -356,7 +356,7 @@ TEST_BEGIN(test_pt_gamma_shape) {
double ln_gamma_shape = ln_gamma(shape);
for (j = 1; j < 100; j += 7) {
double p = (double)j * 0.01;
assert_true(double_eq_rel(pt_gamma(p, shape, 1.0,
expect_true(double_eq_rel(pt_gamma(p, shape, 1.0,
ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
MAX_ABS_ERR),
"Incorrect pt_gamma result for i=%u, j=%u", i, j);
@ -370,7 +370,7 @@ TEST_BEGIN(test_pt_gamma_scale) {
double shape = 1.0;
double ln_gamma_shape = ln_gamma(shape);
assert_true(double_eq_rel(
expect_true(double_eq_rel(
pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
MAX_ABS_ERR),

View File

@ -13,17 +13,17 @@ TEST_BEGIN(test_mq_basic) {
mq_t mq;
mq_msg_t msg;
assert_false(mq_init(&mq), "Unexpected mq_init() failure");
assert_u_eq(mq_count(&mq), 0, "mq should be empty");
assert_ptr_null(mq_tryget(&mq),
expect_false(mq_init(&mq), "Unexpected mq_init() failure");
expect_u_eq(mq_count(&mq), 0, "mq should be empty");
expect_ptr_null(mq_tryget(&mq),
"mq_tryget() should fail when the queue is empty");
mq_put(&mq, &msg);
assert_u_eq(mq_count(&mq), 1, "mq should contain one message");
assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
expect_u_eq(mq_count(&mq), 1, "mq should contain one message");
expect_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
mq_put(&mq, &msg);
assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
expect_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
mq_fini(&mq);
}
@ -36,7 +36,7 @@ thd_receiver_start(void *arg) {
for (i = 0; i < (NSENDERS * NMSGS); i++) {
mq_msg_t *msg = mq_get(mq);
assert_ptr_not_null(msg, "mq_get() should never return NULL");
expect_ptr_not_null(msg, "mq_get() should never return NULL");
dallocx(msg, 0);
}
return NULL;
@ -51,7 +51,7 @@ thd_sender_start(void *arg) {
mq_msg_t *msg;
void *p;
p = mallocx(sizeof(mq_msg_t), 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
msg = (mq_msg_t *)p;
mq_put(mq, msg);
}
@ -64,7 +64,7 @@ TEST_BEGIN(test_mq_threaded) {
thd_t senders[NSENDERS];
unsigned i;
assert_false(mq_init(&mq), "Unexpected mq_init() failure");
expect_false(mq_init(&mq), "Unexpected mq_init() failure");
thd_create(&receiver, thd_receiver_start, (void *)&mq);
for (i = 0; i < NSENDERS; i++) {

View File

@ -6,7 +6,7 @@
TEST_BEGIN(test_mtx_basic) {
mtx_t mtx;
assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
expect_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
mtx_lock(&mtx);
mtx_unlock(&mtx);
mtx_fini(&mtx);
@ -36,7 +36,7 @@ TEST_BEGIN(test_mtx_race) {
thd_t thds[NTHREADS];
unsigned i;
assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
expect_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
arg.x = 0;
for (i = 0; i < NTHREADS; i++) {
thd_create(&thds[i], thd_start, (void *)&arg);
@ -44,7 +44,7 @@ TEST_BEGIN(test_mtx_race) {
for (i = 0; i < NTHREADS; i++) {
thd_join(thds[i], NULL);
}
assert_u_eq(arg.x, NTHREADS * NINCRS,
expect_u_eq(arg.x, NTHREADS * NINCRS,
"Race-related counter corruption");
}
TEST_END

View File

@ -6,9 +6,9 @@ TEST_BEGIN(test_nstime_init) {
nstime_t nst;
nstime_init(&nst, 42000000043);
assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
expect_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
expect_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
expect_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
}
TEST_END
@ -16,8 +16,8 @@ TEST_BEGIN(test_nstime_init2) {
nstime_t nst;
nstime_init2(&nst, 42, 43);
assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
expect_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
expect_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
}
TEST_END
@ -27,8 +27,8 @@ TEST_BEGIN(test_nstime_copy) {
nstime_init2(&nsta, 42, 43);
nstime_init_zero(&nstb);
nstime_copy(&nstb, &nsta);
assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
expect_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
expect_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
}
TEST_END
@ -37,31 +37,31 @@ TEST_BEGIN(test_nstime_compare) {
nstime_init2(&nsta, 42, 43);
nstime_copy(&nstb, &nsta);
assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
expect_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
expect_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
nstime_init2(&nstb, 42, 42);
assert_d_eq(nstime_compare(&nsta, &nstb), 1,
expect_d_eq(nstime_compare(&nsta, &nstb), 1,
"nsta should be greater than nstb");
assert_d_eq(nstime_compare(&nstb, &nsta), -1,
expect_d_eq(nstime_compare(&nstb, &nsta), -1,
"nstb should be less than nsta");
nstime_init2(&nstb, 42, 44);
assert_d_eq(nstime_compare(&nsta, &nstb), -1,
expect_d_eq(nstime_compare(&nsta, &nstb), -1,
"nsta should be less than nstb");
assert_d_eq(nstime_compare(&nstb, &nsta), 1,
expect_d_eq(nstime_compare(&nstb, &nsta), 1,
"nstb should be greater than nsta");
nstime_init2(&nstb, 41, BILLION - 1);
assert_d_eq(nstime_compare(&nsta, &nstb), 1,
expect_d_eq(nstime_compare(&nsta, &nstb), 1,
"nsta should be greater than nstb");
assert_d_eq(nstime_compare(&nstb, &nsta), -1,
expect_d_eq(nstime_compare(&nstb, &nsta), -1,
"nstb should be less than nsta");
nstime_init2(&nstb, 43, 0);
assert_d_eq(nstime_compare(&nsta, &nstb), -1,
expect_d_eq(nstime_compare(&nsta, &nstb), -1,
"nsta should be less than nstb");
assert_d_eq(nstime_compare(&nstb, &nsta), 1,
expect_d_eq(nstime_compare(&nstb, &nsta), 1,
"nstb should be greater than nsta");
}
TEST_END
@ -73,14 +73,14 @@ TEST_BEGIN(test_nstime_add) {
nstime_copy(&nstb, &nsta);
nstime_add(&nsta, &nstb);
nstime_init2(&nstb, 84, 86);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect addition result");
nstime_init2(&nsta, 42, BILLION - 1);
nstime_copy(&nstb, &nsta);
nstime_add(&nsta, &nstb);
nstime_init2(&nstb, 85, BILLION - 2);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect addition result");
}
TEST_END
@ -91,13 +91,13 @@ TEST_BEGIN(test_nstime_iadd) {
nstime_init2(&nsta, 42, BILLION - 1);
nstime_iadd(&nsta, 1);
nstime_init2(&nstb, 43, 0);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect addition result");
nstime_init2(&nsta, 42, 1);
nstime_iadd(&nsta, BILLION + 1);
nstime_init2(&nstb, 43, 2);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect addition result");
}
TEST_END
@ -109,14 +109,14 @@ TEST_BEGIN(test_nstime_subtract) {
nstime_copy(&nstb, &nsta);
nstime_subtract(&nsta, &nstb);
nstime_init_zero(&nstb);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect subtraction result");
nstime_init2(&nsta, 42, 43);
nstime_init2(&nstb, 41, 44);
nstime_subtract(&nsta, &nstb);
nstime_init2(&nstb, 0, BILLION - 1);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect subtraction result");
}
TEST_END
@ -127,13 +127,13 @@ TEST_BEGIN(test_nstime_isubtract) {
nstime_init2(&nsta, 42, 43);
nstime_isubtract(&nsta, 42*BILLION + 43);
nstime_init_zero(&nstb);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect subtraction result");
nstime_init2(&nsta, 42, 43);
nstime_isubtract(&nsta, 41*BILLION + 44);
nstime_init2(&nstb, 0, BILLION - 1);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect subtraction result");
}
TEST_END
@ -144,13 +144,13 @@ TEST_BEGIN(test_nstime_imultiply) {
nstime_init2(&nsta, 42, 43);
nstime_imultiply(&nsta, 10);
nstime_init2(&nstb, 420, 430);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect multiplication result");
nstime_init2(&nsta, 42, 666666666);
nstime_imultiply(&nsta, 3);
nstime_init2(&nstb, 127, 999999998);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect multiplication result");
}
TEST_END
@ -162,14 +162,14 @@ TEST_BEGIN(test_nstime_idivide) {
nstime_copy(&nstb, &nsta);
nstime_imultiply(&nsta, 10);
nstime_idivide(&nsta, 10);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect division result");
nstime_init2(&nsta, 42, 666666666);
nstime_copy(&nstb, &nsta);
nstime_imultiply(&nsta, 3);
nstime_idivide(&nsta, 3);
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect division result");
}
TEST_END
@ -180,7 +180,7 @@ TEST_BEGIN(test_nstime_divide) {
nstime_init2(&nsta, 42, 43);
nstime_copy(&nstb, &nsta);
nstime_imultiply(&nsta, 10);
assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
expect_u64_eq(nstime_divide(&nsta, &nstb), 10,
"Incorrect division result");
nstime_init2(&nsta, 42, 43);
@ -188,7 +188,7 @@ TEST_BEGIN(test_nstime_divide) {
nstime_imultiply(&nsta, 10);
nstime_init(&nstc, 1);
nstime_add(&nsta, &nstc);
assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
expect_u64_eq(nstime_divide(&nsta, &nstb), 10,
"Incorrect division result");
nstime_init2(&nsta, 42, 43);
@ -196,7 +196,7 @@ TEST_BEGIN(test_nstime_divide) {
nstime_imultiply(&nsta, 10);
nstime_init(&nstc, 1);
nstime_subtract(&nsta, &nstc);
assert_u64_eq(nstime_divide(&nsta, &nstb), 9,
expect_u64_eq(nstime_divide(&nsta, &nstb), 9,
"Incorrect division result");
}
TEST_END
@ -209,7 +209,7 @@ TEST_END
TEST_BEGIN(test_nstime_update) {
nstime_t nst;
assert_false(nstime_init_update(&nst), "Basic time update failed.");
expect_false(nstime_init_update(&nst), "Basic time update failed.");
/* Only Rip Van Winkle sleeps this long. */
{
@ -220,9 +220,9 @@ TEST_BEGIN(test_nstime_update) {
{
nstime_t nst0;
nstime_copy(&nst0, &nst);
assert_true(nstime_update(&nst),
expect_true(nstime_update(&nst),
"Update should detect time roll-back.");
assert_d_eq(nstime_compare(&nst, &nst0), 0,
expect_d_eq(nstime_compare(&nst, &nst0), 0,
"Time should not have been modified");
}
}

View File

@ -22,7 +22,7 @@ binind_compute(void) {
unsigned nbins, i;
sz = sizeof(nbins);
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
for (i = 0; i < nbins; i++) {
@ -30,12 +30,12 @@ binind_compute(void) {
size_t miblen = sizeof(mib)/sizeof(size_t);
size_t size;
assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
&miblen), 0, "Unexpected mallctlnametomb failure");
mib[2] = (size_t)i;
sz = sizeof(size);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
0), 0, "Unexpected mallctlbymib failure");
if (size == SZ) {
return i;
@ -54,11 +54,11 @@ nregs_per_run_compute(void) {
size_t mib[4];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
"Unexpected mallctlnametomb failure");
mib[2] = (size_t)binind;
sz = sizeof(nregs);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
0), 0, "Unexpected mallctlbymib failure");
return nregs;
}
@ -69,7 +69,7 @@ arenas_create_mallctl(void) {
size_t sz;
sz = sizeof(arena_ind);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Error in arenas.create");
return arena_ind;
@ -80,10 +80,10 @@ arena_reset_mallctl(unsigned arena_ind) {
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
@ -105,7 +105,7 @@ TEST_BEGIN(test_pack) {
for (j = 0; j < nregs_per_run; j++) {
void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
" MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
SZ, arena_ind, i, j);
@ -148,7 +148,7 @@ TEST_BEGIN(test_pack) {
}
p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
MALLOCX_TCACHE_NONE);
assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
expect_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
"Unexpected refill discrepancy, run=%zu, reg=%zu\n",
i, j);
}

View File

@ -8,13 +8,13 @@ TEST_BEGIN(test_pages_huge) {
alloc_size = HUGEPAGE * 2 - PAGE;
commit = true;
pages = pages_map(NULL, alloc_size, PAGE, &commit);
assert_ptr_not_null(pages, "Unexpected pages_map() error");
expect_ptr_not_null(pages, "Unexpected pages_map() error");
if (init_system_thp_mode == thp_mode_default) {
hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE));
assert_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge,
expect_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge,
"Unexpected pages_huge() result");
assert_false(pages_nohuge(hugepage, HUGEPAGE),
expect_false(pages_nohuge(hugepage, HUGEPAGE),
"Unexpected pages_nohuge() result");
}

View File

@ -30,8 +30,8 @@ node_cmp(const node_t *a, const node_t *b) {
static int
node_cmp_magic(const node_t *a, const node_t *b) {
assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
return node_cmp(a, b);
}
@ -74,7 +74,7 @@ heap_print(const heap_t *heap) {
for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
auxelm = phn_next_get(node_t, link, auxelm)) {
assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
expect_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
link, auxelm)), auxelm,
"auxelm's prev doesn't link to auxelm");
node_print(auxelm, 0);
@ -90,7 +90,7 @@ node_validate(const node_t *node, const node_t *parent) {
node_t *leftmost_child, *sibling;
if (parent != NULL) {
assert_d_ge(node_cmp_magic(node, parent), 0,
expect_d_ge(node_cmp_magic(node, parent), 0,
"Child is less than parent");
}
@ -98,13 +98,13 @@ node_validate(const node_t *node, const node_t *parent) {
if (leftmost_child == NULL) {
return nnodes;
}
assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child),
expect_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child),
(void *)node, "Leftmost child does not link to node");
nnodes += node_validate(leftmost_child, node);
for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
NULL; sibling = phn_next_get(node_t, link, sibling)) {
assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
expect_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
link, sibling)), sibling,
"sibling's prev doesn't link to sibling");
nnodes += node_validate(sibling, node);
@ -125,7 +125,7 @@ heap_validate(const heap_t *heap) {
for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
auxelm = phn_next_get(node_t, link, auxelm)) {
assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
expect_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
link, auxelm)), auxelm,
"auxelm's prev doesn't link to auxelm");
nnodes += node_validate(auxelm, NULL);
@ -142,9 +142,9 @@ TEST_BEGIN(test_ph_empty) {
heap_t heap;
heap_new(&heap);
assert_true(heap_empty(&heap), "Heap should be empty");
assert_ptr_null(heap_first(&heap), "Unexpected node");
assert_ptr_null(heap_any(&heap), "Unexpected node");
expect_true(heap_empty(&heap), "Heap should be empty");
expect_ptr_null(heap_first(&heap), "Unexpected node");
expect_ptr_null(heap_any(&heap), "Unexpected node");
}
TEST_END
@ -203,7 +203,7 @@ TEST_BEGIN(test_ph_random) {
for (j = 1; j <= NNODES; j++) {
/* Initialize heap and nodes. */
heap_new(&heap);
assert_u_eq(heap_validate(&heap), 0,
expect_u_eq(heap_validate(&heap), 0,
"Incorrect node count");
for (k = 0; k < j; k++) {
nodes[k].magic = NODE_MAGIC;
@ -214,34 +214,34 @@ TEST_BEGIN(test_ph_random) {
for (k = 0; k < j; k++) {
heap_insert(&heap, &nodes[k]);
if (i % 13 == 12) {
assert_ptr_not_null(heap_any(&heap),
expect_ptr_not_null(heap_any(&heap),
"Heap should not be empty");
/* Trigger merging. */
assert_ptr_not_null(heap_first(&heap),
expect_ptr_not_null(heap_first(&heap),
"Heap should not be empty");
}
assert_u_eq(heap_validate(&heap), k + 1,
expect_u_eq(heap_validate(&heap), k + 1,
"Incorrect node count");
}
assert_false(heap_empty(&heap),
expect_false(heap_empty(&heap),
"Heap should not be empty");
/* Remove nodes. */
switch (i % 6) {
case 0:
for (k = 0; k < j; k++) {
assert_u_eq(heap_validate(&heap), j - k,
expect_u_eq(heap_validate(&heap), j - k,
"Incorrect node count");
node_remove(&heap, &nodes[k]);
assert_u_eq(heap_validate(&heap), j - k
expect_u_eq(heap_validate(&heap), j - k
- 1, "Incorrect node count");
}
break;
case 1:
for (k = j; k > 0; k--) {
node_remove(&heap, &nodes[k-1]);
assert_u_eq(heap_validate(&heap), k - 1,
expect_u_eq(heap_validate(&heap), k - 1,
"Incorrect node count");
}
break;
@ -249,10 +249,10 @@ TEST_BEGIN(test_ph_random) {
node_t *prev = NULL;
for (k = 0; k < j; k++) {
node_t *node = node_remove_first(&heap);
assert_u_eq(heap_validate(&heap), j - k
expect_u_eq(heap_validate(&heap), j - k
- 1, "Incorrect node count");
if (prev != NULL) {
assert_d_ge(node_cmp(node,
expect_d_ge(node_cmp(node,
prev), 0,
"Bad removal order");
}
@ -263,15 +263,15 @@ TEST_BEGIN(test_ph_random) {
node_t *prev = NULL;
for (k = 0; k < j; k++) {
node_t *node = heap_first(&heap);
assert_u_eq(heap_validate(&heap), j - k,
expect_u_eq(heap_validate(&heap), j - k,
"Incorrect node count");
if (prev != NULL) {
assert_d_ge(node_cmp(node,
expect_d_ge(node_cmp(node,
prev), 0,
"Bad removal order");
}
node_remove(&heap, node);
assert_u_eq(heap_validate(&heap), j - k
expect_u_eq(heap_validate(&heap), j - k
- 1, "Incorrect node count");
prev = node;
}
@ -279,17 +279,17 @@ TEST_BEGIN(test_ph_random) {
} case 4: {
for (k = 0; k < j; k++) {
node_remove_any(&heap);
assert_u_eq(heap_validate(&heap), j - k
expect_u_eq(heap_validate(&heap), j - k
- 1, "Incorrect node count");
}
break;
} case 5: {
for (k = 0; k < j; k++) {
node_t *node = heap_any(&heap);
assert_u_eq(heap_validate(&heap), j - k,
expect_u_eq(heap_validate(&heap), j - k,
"Incorrect node count");
node_remove(&heap, node);
assert_u_eq(heap_validate(&heap), j - k
expect_u_eq(heap_validate(&heap), j - k
- 1, "Incorrect node count");
}
break;
@ -297,11 +297,11 @@ TEST_BEGIN(test_ph_random) {
not_reached();
}
assert_ptr_null(heap_first(&heap),
expect_ptr_null(heap_first(&heap),
"Heap should be empty");
assert_ptr_null(heap_any(&heap),
expect_ptr_null(heap_any(&heap),
"Heap should be empty");
assert_true(heap_empty(&heap), "Heap should be empty");
expect_true(heap_empty(&heap), "Heap should be empty");
}
}
fini_gen_rand(sfmt);

View File

@ -10,18 +10,18 @@ test_prng_lg_range_u32(bool atomic) {
ra = prng_lg_range_u32(&sa, 32, atomic);
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
rb = prng_lg_range_u32(&sa, 32, atomic);
assert_u32_eq(ra, rb,
expect_u32_eq(ra, rb,
"Repeated generation should produce repeated results");
atomic_store_u32(&sb, 42, ATOMIC_RELAXED);
rb = prng_lg_range_u32(&sb, 32, atomic);
assert_u32_eq(ra, rb,
expect_u32_eq(ra, rb,
"Equivalent generation should produce equivalent results");
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
ra = prng_lg_range_u32(&sa, 32, atomic);
rb = prng_lg_range_u32(&sa, 32, atomic);
assert_u32_ne(ra, rb,
expect_u32_ne(ra, rb,
"Full-width results must not immediately repeat");
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
@ -29,9 +29,9 @@ test_prng_lg_range_u32(bool atomic) {
for (lg_range = 31; lg_range > 0; lg_range--) {
atomic_store_u32(&sb, 42, ATOMIC_RELAXED);
rb = prng_lg_range_u32(&sb, lg_range, atomic);
assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
expect_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
0, "High order bits should be 0, lg_range=%u", lg_range);
assert_u32_eq(rb, (ra >> (32 - lg_range)),
expect_u32_eq(rb, (ra >> (32 - lg_range)),
"Expected high order bits of full-width result, "
"lg_range=%u", lg_range);
}
@ -46,18 +46,18 @@ test_prng_lg_range_u64(void) {
ra = prng_lg_range_u64(&sa, 64);
sa = 42;
rb = prng_lg_range_u64(&sa, 64);
assert_u64_eq(ra, rb,
expect_u64_eq(ra, rb,
"Repeated generation should produce repeated results");
sb = 42;
rb = prng_lg_range_u64(&sb, 64);
assert_u64_eq(ra, rb,
expect_u64_eq(ra, rb,
"Equivalent generation should produce equivalent results");
sa = 42;
ra = prng_lg_range_u64(&sa, 64);
rb = prng_lg_range_u64(&sa, 64);
assert_u64_ne(ra, rb,
expect_u64_ne(ra, rb,
"Full-width results must not immediately repeat");
sa = 42;
@ -65,9 +65,9 @@ test_prng_lg_range_u64(void) {
for (lg_range = 63; lg_range > 0; lg_range--) {
sb = 42;
rb = prng_lg_range_u64(&sb, lg_range);
assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
expect_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
0, "High order bits should be 0, lg_range=%u", lg_range);
assert_u64_eq(rb, (ra >> (64 - lg_range)),
expect_u64_eq(rb, (ra >> (64 - lg_range)),
"Expected high order bits of full-width result, "
"lg_range=%u", lg_range);
}
@ -83,18 +83,18 @@ test_prng_lg_range_zu(bool atomic) {
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
assert_zu_eq(ra, rb,
expect_zu_eq(ra, rb,
"Repeated generation should produce repeated results");
atomic_store_zu(&sb, 42, ATOMIC_RELAXED);
rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
assert_zu_eq(ra, rb,
expect_zu_eq(ra, rb,
"Equivalent generation should produce equivalent results");
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
assert_zu_ne(ra, rb,
expect_zu_ne(ra, rb,
"Full-width results must not immediately repeat");
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
@ -103,9 +103,9 @@ test_prng_lg_range_zu(bool atomic) {
lg_range--) {
atomic_store_zu(&sb, 42, ATOMIC_RELAXED);
rb = prng_lg_range_zu(&sb, lg_range, atomic);
assert_zu_eq((rb & (SIZE_T_MAX << lg_range)),
expect_zu_eq((rb & (SIZE_T_MAX << lg_range)),
0, "High order bits should be 0, lg_range=%u", lg_range);
assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
expect_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
lg_range)), "Expected high order bits of full-width "
"result, lg_range=%u", lg_range);
}
@ -151,7 +151,7 @@ test_prng_range_u32(bool atomic) {
for (rep = 0; rep < NREPS; rep++) {
uint32_t r = prng_range_u32(&s, range, atomic);
assert_u32_lt(r, range, "Out of range");
expect_u32_lt(r, range, "Out of range");
}
}
}
@ -171,7 +171,7 @@ test_prng_range_u64(void) {
for (rep = 0; rep < NREPS; rep++) {
uint64_t r = prng_range_u64(&s, range);
assert_u64_lt(r, range, "Out of range");
expect_u64_lt(r, range, "Out of range");
}
}
}
@ -191,7 +191,7 @@ test_prng_range_zu(bool atomic) {
for (rep = 0; rep < NREPS; rep++) {
size_t r = prng_range_zu(&s, range, atomic);
assert_zu_lt(r, range, "Out of range");
expect_zu_lt(r, range, "Out of range");
}
}
}

View File

@ -10,7 +10,7 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) {
int fd;
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
expect_d_ne(fd, -1, "Unexpected open() failure");
return fd;
}
@ -32,14 +32,14 @@ thd_start(void *varg) {
void *p = alloc_from_permuted_backtrace(thd_ind, i);
dallocx(p, 0);
if (i % DUMP_INTERVAL == 0) {
assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
0, "Unexpected error while dumping heap profile");
}
if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
i+1 == NALLOCS_PER_THREAD) {
bt_count = prof_bt_count();
assert_zu_le(bt_count_prev+(i-i_prev), bt_count,
expect_zu_le(bt_count_prev+(i-i_prev), bt_count,
"Expected larger backtrace count increase");
i_prev = i;
bt_count_prev = bt_count;
@ -58,7 +58,7 @@ TEST_BEGIN(test_idump) {
test_skip_if(!config_prof);
active = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
sizeof(active)), 0,
"Unexpected mallctl failure while activating profiling");

View File

@ -6,9 +6,9 @@ mallctl_bool_get(const char *name, bool expected, const char *func, int line) {
size_t sz;
sz = sizeof(old);
assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
expect_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
"%s():%d: Unexpected mallctl failure reading %s", func, line, name);
assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
expect_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
name);
}
@ -19,11 +19,11 @@ mallctl_bool_set(const char *name, bool old_expected, bool val_new,
size_t sz;
sz = sizeof(old);
assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
expect_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
sizeof(val_new)), 0,
"%s():%d: Unexpected mallctl failure reading/writing %s", func,
line, name);
assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
expect_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
line, name);
}
@ -67,11 +67,11 @@ prof_sampling_probe_impl(bool expect_sample, const char *func, int line) {
void *p;
size_t expected_backtraces = expect_sample ? 1 : 0;
assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
expect_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
line);
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_zu_eq(prof_bt_count(), expected_backtraces,
expect_ptr_not_null(p, "Unexpected mallocx() failure");
expect_zu_eq(prof_bt_count(), expected_backtraces,
"%s():%d: Unexpected backtrace count", func, line);
dallocx(p, 0);
}

View File

@ -9,7 +9,7 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) {
did_prof_dump_open = true;
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
expect_d_ne(fd, -1, "Unexpected open() failure");
return fd;
}
@ -22,7 +22,7 @@ TEST_BEGIN(test_gdump) {
test_skip_if(!config_prof);
active = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
sizeof(active)), 0,
"Unexpected mallctl failure while activating profiling");
@ -30,35 +30,35 @@ TEST_BEGIN(test_gdump) {
did_prof_dump_open = false;
p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
expect_true(did_prof_dump_open, "Expected a profile dump");
did_prof_dump_open = false;
q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
expect_ptr_not_null(q, "Unexpected mallocx() failure");
expect_true(did_prof_dump_open, "Expected a profile dump");
gdump = false;
sz = sizeof(gdump_old);
assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
(void *)&gdump, sizeof(gdump)), 0,
"Unexpected mallctl failure while disabling prof.gdump");
assert(gdump_old);
did_prof_dump_open = false;
r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_false(did_prof_dump_open, "Unexpected profile dump");
expect_ptr_not_null(q, "Unexpected mallocx() failure");
expect_false(did_prof_dump_open, "Unexpected profile dump");
gdump = true;
sz = sizeof(gdump_old);
assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
(void *)&gdump, sizeof(gdump)), 0,
"Unexpected mallctl failure while enabling prof.gdump");
assert(!gdump_old);
did_prof_dump_open = false;
s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
expect_ptr_not_null(q, "Unexpected mallocx() failure");
expect_true(did_prof_dump_open, "Expected a profile dump");
dallocx(p, 0);
dallocx(q, 0);

View File

@ -11,11 +11,11 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) {
did_prof_dump_open = true;
const char filename_prefix[] = TEST_PREFIX ".";
assert_d_eq(strncmp(filename_prefix, filename, sizeof(filename_prefix)
expect_d_eq(strncmp(filename_prefix, filename, sizeof(filename_prefix)
- 1), 0, "Dump file name should start with \"" TEST_PREFIX ".\"");
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
expect_d_ne(fd, -1, "Unexpected open() failure");
return fd;
}
@ -30,11 +30,11 @@ TEST_BEGIN(test_idump) {
active = true;
assert_d_eq(mallctl("prof.dump_prefix", NULL, NULL,
expect_d_eq(mallctl("prof.dump_prefix", NULL, NULL,
(void *)&dump_prefix, sizeof(dump_prefix)), 0,
"Unexpected mallctl failure while overwriting dump prefix");
assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
sizeof(active)), 0,
"Unexpected mallctl failure while activating profiling");
@ -42,9 +42,9 @@ TEST_BEGIN(test_idump) {
did_prof_dump_open = false;
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
dallocx(p, 0);
assert_true(did_prof_dump_open, "Expected a profile dump");
expect_true(did_prof_dump_open, "Expected a profile dump");
}
TEST_END

View File

@ -4,16 +4,16 @@
#define N_PARAM 100
#define N_THREADS 10
static void assert_rep() {
assert_b_eq(prof_log_rep_check(), false, "Rep check failed");
static void expect_rep() {
expect_b_eq(prof_log_rep_check(), false, "Rep check failed");
}
static void assert_log_empty() {
assert_zu_eq(prof_log_bt_count(), 0,
static void expect_log_empty() {
expect_zu_eq(prof_log_bt_count(), 0,
"The log has backtraces; it isn't empty");
assert_zu_eq(prof_log_thr_count(), 0,
expect_zu_eq(prof_log_thr_count(), 0,
"The log has threads; it isn't empty");
assert_zu_eq(prof_log_alloc_count(), 0,
expect_zu_eq(prof_log_alloc_count(), 0,
"The log has allocations; it isn't empty");
}
@ -35,22 +35,22 @@ TEST_BEGIN(test_prof_log_many_logs) {
test_skip_if(!config_prof);
for (i = 0; i < N_PARAM; i++) {
assert_b_eq(prof_log_is_logging(), false,
expect_b_eq(prof_log_is_logging(), false,
"Logging shouldn't have started yet");
assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
assert_b_eq(prof_log_is_logging(), true,
expect_b_eq(prof_log_is_logging(), true,
"Logging should be started by now");
assert_log_empty();
assert_rep();
expect_log_empty();
expect_rep();
f();
assert_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
assert_rep();
assert_b_eq(prof_log_is_logging(), true,
expect_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
expect_rep();
expect_b_eq(prof_log_is_logging(), true,
"Logging should still be on");
assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
assert_b_eq(prof_log_is_logging(), false,
expect_b_eq(prof_log_is_logging(), false,
"Logging should have turned off");
}
}
@ -74,7 +74,7 @@ TEST_BEGIN(test_prof_log_many_threads) {
test_skip_if(!config_prof);
int i;
assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
for (i = 0; i < N_THREADS; i++) {
thd_create(&thr_buf[i], &f_thread, NULL);
@ -83,10 +83,10 @@ TEST_BEGIN(test_prof_log_many_threads) {
for (i = 0; i < N_THREADS; i++) {
thd_join(thr_buf[i], NULL);
}
assert_zu_eq(prof_log_thr_count(), N_THREADS,
expect_zu_eq(prof_log_thr_count(), N_THREADS,
"Wrong number of thread entries");
assert_rep();
assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
expect_rep();
expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
}
TEST_END
@ -111,19 +111,19 @@ TEST_BEGIN(test_prof_log_many_traces) {
test_skip_if(!config_prof);
assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
int i;
assert_rep();
assert_log_empty();
expect_rep();
expect_log_empty();
for (i = 0; i < N_PARAM; i++) {
assert_rep();
expect_rep();
f1();
assert_rep();
expect_rep();
f2();
assert_rep();
expect_rep();
f3();
assert_rep();
expect_rep();
}
/*
* There should be 8 total backtraces: two for malloc/free in f1(), two
@ -132,9 +132,9 @@ TEST_BEGIN(test_prof_log_many_traces) {
* optimizations such as loop unrolling might generate more call sites.
* So >= 8 traces are expected.
*/
assert_zu_ge(prof_log_bt_count(), 8,
expect_zu_ge(prof_log_bt_count(), 8,
"Expect at least 8 backtraces given sample workload");
assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
}
TEST_END

View File

@ -8,14 +8,14 @@
/* Invariant before and after every test (when config_prof is on) */
static void confirm_prof_setup(tsd_t *tsd) {
/* Options */
assert_true(opt_prof, "opt_prof not on");
assert_true(opt_prof_active, "opt_prof_active not on");
assert_zd_eq(opt_prof_recent_alloc_max, OPT_ALLOC_MAX,
expect_true(opt_prof, "opt_prof not on");
expect_true(opt_prof_active, "opt_prof_active not on");
expect_zd_eq(opt_prof_recent_alloc_max, OPT_ALLOC_MAX,
"opt_prof_recent_alloc_max not set correctly");
/* Dynamics */
assert_true(prof_active, "prof_active not on");
assert_zd_eq(prof_recent_alloc_max_ctl_read(tsd), OPT_ALLOC_MAX,
expect_true(prof_active, "prof_active not on");
expect_zd_eq(prof_recent_alloc_max_ctl_read(tsd), OPT_ALLOC_MAX,
"prof_recent_alloc_max not set correctly");
}
@ -35,11 +35,11 @@ TEST_BEGIN(test_prof_recent_off) {
size_t len = len_ref;
#define ASSERT_SHOULD_FAIL(opt, a, b, c, d) do { \
assert_d_eq(mallctl("experimental.prof_recent." opt, a, b, c, \
expect_d_eq(mallctl("experimental.prof_recent." opt, a, b, c, \
d), ENOENT, "Should return ENOENT when config_prof is off");\
assert_zd_eq(past, past_ref, "output was touched"); \
assert_zu_eq(len, len_ref, "output length was touched"); \
assert_zd_eq(future, future_ref, "input was touched"); \
expect_zd_eq(past, past_ref, "output was touched"); \
expect_zu_eq(len, len_ref, "output length was touched"); \
expect_zd_eq(future, future_ref, "input was touched"); \
} while (0)
ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, NULL, 0);
@ -61,35 +61,35 @@ TEST_BEGIN(test_prof_recent_on) {
confirm_prof_setup(tsd);
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, NULL, 0), 0, "no-op mallctl should be allowed");
confirm_prof_setup(tsd);
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
&past, &len, NULL, 0), 0, "Read error");
assert_zd_eq(past, OPT_ALLOC_MAX, "Wrong read result");
expect_zd_eq(past, OPT_ALLOC_MAX, "Wrong read result");
future = OPT_ALLOC_MAX + 1;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, len), 0, "Write error");
future = -1;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
&past, &len, &future, len), 0, "Read/write error");
assert_zd_eq(past, OPT_ALLOC_MAX + 1, "Wrong read result");
expect_zd_eq(past, OPT_ALLOC_MAX + 1, "Wrong read result");
future = -2;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
&past, &len, &future, len), EINVAL,
"Invalid write should return EINVAL");
assert_zd_eq(past, OPT_ALLOC_MAX + 1,
expect_zd_eq(past, OPT_ALLOC_MAX + 1,
"Output should not be touched given invalid write");
future = OPT_ALLOC_MAX;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
&past, &len, &future, len), 0, "Read/write error");
assert_zd_eq(past, -1, "Wrong read result");
expect_zd_eq(past, -1, "Wrong read result");
future = OPT_ALLOC_MAX + 2;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
&past, &len, &future, len * 2), EINVAL,
"Invalid write should return EINVAL");
assert_zd_eq(past, -1,
expect_zd_eq(past, -1,
"Output should not be touched given invalid write");
confirm_prof_setup(tsd);
@ -100,44 +100,44 @@ TEST_END
#define NTH_REQ_SIZE(n) ((n) * 97 + 101)
static void confirm_malloc(tsd_t *tsd, void *p) {
assert_ptr_not_null(p, "malloc failed unexpectedly");
expect_ptr_not_null(p, "malloc failed unexpectedly");
edata_t *e = emap_edata_lookup(TSDN_NULL, &emap_global, p);
assert_ptr_not_null(e, "NULL edata for living pointer");
expect_ptr_not_null(e, "NULL edata for living pointer");
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_t *n = edata_prof_recent_alloc_get(tsd, e);
assert_ptr_not_null(n, "Record in edata should not be NULL");
assert_ptr_not_null(n->alloc_tctx,
expect_ptr_not_null(n, "Record in edata should not be NULL");
expect_ptr_not_null(n->alloc_tctx,
"alloc_tctx in record should not be NULL");
assert_ptr_eq(e, n->alloc_edata,
expect_ptr_eq(e, n->alloc_edata,
"edata pointer in record is not correct");
assert_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
}
static void confirm_record_size(tsd_t *tsd, prof_recent_t *n, unsigned kth) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_zu_eq(n->size, NTH_REQ_SIZE(kth),
expect_zu_eq(n->size, NTH_REQ_SIZE(kth),
"Recorded allocation size is wrong");
}
static void confirm_record_living(tsd_t *tsd, prof_recent_t *n) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_ptr_not_null(n->alloc_tctx,
expect_ptr_not_null(n->alloc_tctx,
"alloc_tctx in record should not be NULL");
assert_ptr_not_null(n->alloc_edata,
expect_ptr_not_null(n->alloc_edata,
"Recorded edata should not be NULL for living pointer");
assert_ptr_eq(n, edata_prof_recent_alloc_get(tsd, n->alloc_edata),
expect_ptr_eq(n, edata_prof_recent_alloc_get(tsd, n->alloc_edata),
"Record in edata is not correct");
assert_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
}
static void confirm_record_released(tsd_t *tsd, prof_recent_t *n) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_ptr_not_null(n->alloc_tctx,
expect_ptr_not_null(n->alloc_tctx,
"alloc_tctx in record should not be NULL");
assert_ptr_null(n->alloc_edata,
expect_ptr_null(n->alloc_edata,
"Recorded edata should be NULL for released pointer");
assert_ptr_not_null(n->dalloc_tctx,
expect_ptr_not_null(n->dalloc_tctx,
"dalloc_tctx in record should not be NULL for released pointer");
}
@ -167,7 +167,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
if (i < OPT_ALLOC_MAX - 1) {
malloc_mutex_lock(tsd_tsdn(tsd),
&prof_recent_alloc_mtx);
assert_ptr_ne(prof_recent_alloc_begin(tsd),
expect_ptr_ne(prof_recent_alloc_begin(tsd),
prof_recent_alloc_end(tsd),
"Empty recent allocation");
malloc_mutex_unlock(tsd_tsdn(tsd),
@ -194,7 +194,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
}
}
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_u_eq(c, OPT_ALLOC_MAX,
expect_u_eq(c, OPT_ALLOC_MAX,
"Incorrect total number of allocations");
free(p);
}
@ -202,7 +202,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
confirm_prof_setup(tsd);
b = false;
assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
expect_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
"mallctl for turning off prof_active failed");
/*
@ -212,7 +212,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
for (; i < 3 * OPT_ALLOC_MAX; ++i) {
req_size = NTH_REQ_SIZE(i);
p = malloc(req_size);
assert_ptr_not_null(p, "malloc failed unexpectedly");
expect_ptr_not_null(p, "malloc failed unexpectedly");
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
for (n = prof_recent_alloc_begin(tsd);
@ -223,13 +223,13 @@ TEST_BEGIN(test_prof_recent_alloc) {
++c;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_u_eq(c, OPT_ALLOC_MAX,
expect_u_eq(c, OPT_ALLOC_MAX,
"Incorrect total number of allocations");
free(p);
}
b = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
expect_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
"mallctl for turning on prof_active failed");
confirm_prof_setup(tsd);
@ -267,14 +267,14 @@ TEST_BEGIN(test_prof_recent_alloc) {
}
}
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_u_eq(c, OPT_ALLOC_MAX,
expect_u_eq(c, OPT_ALLOC_MAX,
"Incorrect total number of allocations");
free(p);
}
/* Increasing the limit shouldn't alter the list of records. */
future = OPT_ALLOC_MAX + 1;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
@ -286,7 +286,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
++c;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_u_eq(c, OPT_ALLOC_MAX,
expect_u_eq(c, OPT_ALLOC_MAX,
"Incorrect total number of allocations");
/*
@ -294,7 +294,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
* the new limit is still no less than the length of the list.
*/
future = OPT_ALLOC_MAX;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
@ -306,7 +306,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
++c;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_u_eq(c, OPT_ALLOC_MAX,
expect_u_eq(c, OPT_ALLOC_MAX,
"Incorrect total number of allocations");
/*
@ -314,7 +314,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
* limit is less than the length of the list.
*/
future = OPT_ALLOC_MAX - 1;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
@ -326,12 +326,12 @@ TEST_BEGIN(test_prof_recent_alloc) {
confirm_record_released(tsd, n);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_u_eq(c, OPT_ALLOC_MAX - 1,
expect_u_eq(c, OPT_ALLOC_MAX - 1,
"Incorrect total number of allocations");
/* Setting to unlimited shouldn't alter the list of records. */
future = -1;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
@ -343,12 +343,12 @@ TEST_BEGIN(test_prof_recent_alloc) {
confirm_record_released(tsd, n);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_u_eq(c, OPT_ALLOC_MAX - 1,
expect_u_eq(c, OPT_ALLOC_MAX - 1,
"Incorrect total number of allocations");
/* Downshift to only one record. */
future = 1;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
n = prof_recent_alloc_begin(tsd);
@ -361,7 +361,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
/* Completely turn off. */
future = 0;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(prof_recent_alloc_begin(tsd) == prof_recent_alloc_end(tsd));
@ -369,7 +369,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
/* Restore the settings. */
future = OPT_ALLOC_MAX;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(prof_recent_alloc_begin(tsd) == prof_recent_alloc_end(tsd));
@ -395,7 +395,7 @@ static void test_dump_write_cb(void *not_used, const char *str) {
static void call_dump() {
static void *in[2] = {test_dump_write_cb, NULL};
dump_out_len = 0;
assert_d_eq(mallctl("experimental.prof_recent.alloc_dump",
expect_d_eq(mallctl("experimental.prof_recent.alloc_dump",
NULL, NULL, in, sizeof(in)), 0, "Dump mallctl raised error");
}
@ -418,9 +418,9 @@ static void confirm_record(const char *template,
* "{\"recent_alloc_max\":XYZ,\"recent_alloc\":[...]}".
* Using "- 2" serves to cut right before the ending "]}".
*/
assert_d_eq(memcmp(dump_out, template, strlen(template) - 2), 0,
expect_d_eq(memcmp(dump_out, template, strlen(template) - 2), 0,
DUMP_ERROR);
assert_d_eq(memcmp(dump_out + strlen(dump_out) - 2,
expect_d_eq(memcmp(dump_out + strlen(dump_out) - 2,
template + strlen(template) - 2, 2), 0, DUMP_ERROR);
const char *start = dump_out + strlen(template) - 2;
@ -429,14 +429,14 @@ static void confirm_record(const char *template,
for (record = records; record < records + n_records; ++record) {
#define ASSERT_CHAR(c) do { \
assert_true(start < end, DUMP_ERROR); \
assert_c_eq(*start++, c, DUMP_ERROR); \
expect_true(start < end, DUMP_ERROR); \
expect_c_eq(*start++, c, DUMP_ERROR); \
} while (0)
#define ASSERT_STR(s) do { \
const size_t len = strlen(s); \
assert_true(start + len <= end, DUMP_ERROR); \
assert_d_eq(memcmp(start, s, len), 0, DUMP_ERROR); \
expect_true(start + len <= end, DUMP_ERROR); \
expect_d_eq(memcmp(start, s, len), 0, DUMP_ERROR); \
start += len; \
} while (0)
@ -512,8 +512,8 @@ static void confirm_record(const char *template,
#undef ASSERT_CHAR
}
assert_ptr_eq(record, records + n_records, DUMP_ERROR);
assert_ptr_eq(start, end, DUMP_ERROR);
expect_ptr_eq(record, records + n_records, DUMP_ERROR);
expect_ptr_eq(start, end, DUMP_ERROR);
}
TEST_BEGIN(test_prof_recent_alloc_dump) {
@ -527,18 +527,18 @@ TEST_BEGIN(test_prof_recent_alloc_dump) {
confirm_record_t records[2];
future = 0;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
call_dump();
assert_str_eq(dump_out, "{\"recent_alloc_max\":0,\"recent_alloc\":[]}",
expect_str_eq(dump_out, "{\"recent_alloc_max\":0,\"recent_alloc\":[]}",
DUMP_ERROR);
future = 2;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
call_dump();
const char *template = "{\"recent_alloc_max\":2,\"recent_alloc\":[]}";
assert_str_eq(dump_out, template, DUMP_ERROR);
expect_str_eq(dump_out, template, DUMP_ERROR);
p = malloc(7);
call_dump();
@ -563,7 +563,7 @@ TEST_BEGIN(test_prof_recent_alloc_dump) {
confirm_record(template, records, 2);
future = OPT_ALLOC_MAX;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
confirm_prof_setup(tsd);
}
@ -632,7 +632,7 @@ static void *f_thread(void *arg) {
last_max =
prof_recent_alloc_max_ctl_write(tsd, test_max / 2);
}
assert_zd_ge(last_max, -1, "Illegal last-N max");
expect_zd_ge(last_max, -1, "Illegal last-N max");
}
while (data_p->count > 0) {
@ -660,7 +660,7 @@ TEST_BEGIN(test_prof_recent_stress) {
}
test_max = STRESS_ALLOC_MAX;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
for (size_t i = 0; i < N_THREADS; i++) {
thd_data_t *data_p = thd_data + i;
@ -673,7 +673,7 @@ TEST_BEGIN(test_prof_recent_stress) {
}
test_max = OPT_ALLOC_MAX;
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
expect_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
confirm_prof_setup(tsd);
}

View File

@ -5,14 +5,14 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) {
int fd;
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
expect_d_ne(fd, -1, "Unexpected open() failure");
return fd;
}
static void
set_prof_active(bool active) {
assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
sizeof(active)), 0, "Unexpected mallctl failure");
}
@ -21,7 +21,7 @@ get_lg_prof_sample(void) {
size_t lg_prof_sample;
size_t sz = sizeof(size_t);
assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
expect_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate");
return lg_prof_sample;
@ -29,10 +29,10 @@ get_lg_prof_sample(void) {
static void
do_prof_reset(size_t lg_prof_sample) {
assert_d_eq(mallctl("prof.reset", NULL, NULL,
expect_d_eq(mallctl("prof.reset", NULL, NULL,
(void *)&lg_prof_sample, sizeof(size_t)), 0,
"Unexpected mallctl failure while resetting profile data");
assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
expect_zu_eq(lg_prof_sample, get_lg_prof_sample(),
"Expected profile sample rate change");
}
@ -44,22 +44,22 @@ TEST_BEGIN(test_prof_reset_basic) {
test_skip_if(!config_prof);
sz = sizeof(size_t);
assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
expect_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
&sz, NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate");
assert_zu_eq(lg_prof_sample_orig, 0,
expect_zu_eq(lg_prof_sample_orig, 0,
"Unexpected profiling sample rate");
lg_prof_sample = get_lg_prof_sample();
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample,
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
"\"prof.lg_sample\"");
/* Test simple resets. */
for (i = 0; i < 2; i++) {
assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure while resetting profile data");
lg_prof_sample = get_lg_prof_sample();
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample,
"Unexpected profile sample rate change");
}
@ -68,14 +68,14 @@ TEST_BEGIN(test_prof_reset_basic) {
for (i = 0; i < 2; i++) {
do_prof_reset(lg_prof_sample_next);
lg_prof_sample = get_lg_prof_sample();
assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
expect_zu_eq(lg_prof_sample, lg_prof_sample_next,
"Expected profile sample rate change");
lg_prof_sample_next = lg_prof_sample_orig;
}
/* Make sure the test code restored prof.lg_sample. */
lg_prof_sample = get_lg_prof_sample();
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample,
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
"\"prof.lg_sample\"");
}
@ -100,31 +100,31 @@ TEST_BEGIN(test_prof_reset_cleanup) {
set_prof_active(true);
assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
prof_dump_header_orig = prof_dump_header;
prof_dump_header = prof_dump_header_intercept;
assert_false(prof_dump_header_intercepted, "Unexpected intercept");
expect_false(prof_dump_header_intercepted, "Unexpected intercept");
assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
0, "Unexpected error while dumping heap profile");
assert_true(prof_dump_header_intercepted, "Expected intercept");
assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation");
expect_true(prof_dump_header_intercepted, "Expected intercept");
expect_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation");
assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
"Unexpected error while resetting heap profile data");
assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
0, "Unexpected error while dumping heap profile");
assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations");
assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
expect_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations");
expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
prof_dump_header = prof_dump_header_orig;
dallocx(p, 0);
assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
set_prof_active(false);
}
@ -145,13 +145,13 @@ thd_start(void *varg) {
for (i = 0; i < NALLOCS_PER_THREAD; i++) {
if (i % RESET_INTERVAL == 0) {
assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
0, "Unexpected error while resetting heap profile "
"data");
}
if (i % DUMP_INTERVAL == 0) {
assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
0, "Unexpected error while dumping heap profile");
}
@ -162,7 +162,7 @@ thd_start(void *varg) {
*pp = NULL;
}
*pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
assert_ptr_not_null(*pp,
expect_ptr_not_null(*pp,
"Unexpected btalloc() failure");
}
}
@ -189,7 +189,7 @@ TEST_BEGIN(test_prof_reset) {
test_skip_if(!config_prof);
bt_count = prof_bt_count();
assert_zu_eq(bt_count, 0,
expect_zu_eq(bt_count, 0,
"Unexpected pre-existing tdata structures");
tdata_count = prof_tdata_count();
@ -206,9 +206,9 @@ TEST_BEGIN(test_prof_reset) {
thd_join(thds[i], NULL);
}
assert_zu_eq(prof_bt_count(), bt_count,
expect_zu_eq(prof_bt_count(), bt_count,
"Unexpected bactrace count change");
assert_zu_eq(prof_tdata_count(), tdata_count,
expect_zu_eq(prof_tdata_count(), tdata_count,
"Unexpected remaining tdata structures");
set_prof_active(false);
@ -246,19 +246,19 @@ TEST_BEGIN(test_xallocx) {
/* Allocate small object (which will be promoted). */
p = ptrs[i] = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
/* Reset profiling. */
do_prof_reset(0);
/* Perform successful xallocx(). */
sz = sallocx(p, 0);
assert_zu_eq(xallocx(p, sz, 0, 0), sz,
expect_zu_eq(xallocx(p, sz, 0, 0), sz,
"Unexpected xallocx() failure");
/* Perform unsuccessful xallocx(). */
nsz = nallocx(sz+1, 0);
assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
expect_zu_eq(xallocx(p, nsz, 0, 0), sz,
"Unexpected xallocx() success");
}

View File

@ -14,27 +14,27 @@ TEST_BEGIN(test_prof_realloc) {
prof_cnt_all(&curobjs_0, NULL, NULL, NULL);
p = mallocx(1024, flags);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
prof_info_get(tsd, p, NULL, &prof_info_p);
assert_ptr_ne(prof_info_p.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
expect_ptr_ne(prof_info_p.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
"Expected valid tctx");
prof_cnt_all(&curobjs_1, NULL, NULL, NULL);
assert_u64_eq(curobjs_0 + 1, curobjs_1,
expect_u64_eq(curobjs_0 + 1, curobjs_1,
"Allocation should have increased sample size");
q = rallocx(p, 2048, flags);
assert_ptr_ne(p, q, "Expected move");
assert_ptr_not_null(p, "Unexpected rmallocx() failure");
expect_ptr_ne(p, q, "Expected move");
expect_ptr_not_null(p, "Unexpected rmallocx() failure");
prof_info_get(tsd, q, NULL, &prof_info_q);
assert_ptr_ne(prof_info_q.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
expect_ptr_ne(prof_info_q.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
"Expected valid tctx");
prof_cnt_all(&curobjs_2, NULL, NULL, NULL);
assert_u64_eq(curobjs_1, curobjs_2,
expect_u64_eq(curobjs_1, curobjs_2,
"Reallocation should not have changed sample size");
dallocx(q, flags);
prof_cnt_all(&curobjs_3, NULL, NULL, NULL);
assert_u64_eq(curobjs_0, curobjs_3,
expect_u64_eq(curobjs_0, curobjs_3,
"Sample size should have returned to base level");
}
TEST_END

View File

@ -7,11 +7,11 @@ mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
size_t sz;
sz = sizeof(thread_name_old);
assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
expect_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
NULL, 0), 0,
"%s():%d: Unexpected mallctl failure reading thread.prof.name",
func, line);
assert_str_eq(thread_name_old, thread_name_expected,
expect_str_eq(thread_name_old, thread_name_expected,
"%s():%d: Unexpected thread.prof.name value", func, line);
}
#define mallctl_thread_name_get(a) \
@ -20,7 +20,7 @@ mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
static void
mallctl_thread_name_set_impl(const char *thread_name, const char *func,
int line) {
assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
(void *)&thread_name, sizeof(thread_name)), 0,
"%s():%d: Unexpected mallctl failure reading thread.prof.name",
func, line);
@ -39,14 +39,14 @@ TEST_BEGIN(test_prof_thread_name_validation) {
/* NULL input shouldn't be allowed. */
thread_name = NULL;
assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
(void *)&thread_name, sizeof(thread_name)), EFAULT,
"Unexpected mallctl result writing \"%s\" to thread.prof.name",
thread_name);
/* '\n' shouldn't be allowed. */
thread_name = "hi\nthere";
assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
(void *)&thread_name, sizeof(thread_name)), EFAULT,
"Unexpected mallctl result writing \"%s\" to thread.prof.name",
thread_name);
@ -57,7 +57,7 @@ TEST_BEGIN(test_prof_thread_name_validation) {
size_t sz;
sz = sizeof(thread_name_old);
assert_d_eq(mallctl("thread.prof.name",
expect_d_eq(mallctl("thread.prof.name",
(void *)&thread_name_old, &sz, (void *)&thread_name,
sizeof(thread_name)), EPERM,
"Unexpected mallctl result writing \"%s\" to "
@ -82,7 +82,7 @@ thd_start(void *varg) {
mallctl_thread_name_set(thread_name);
for (i = 0; i < NRESET; i++) {
assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
"Unexpected error while resetting heap profile data");
mallctl_thread_name_get(thread_name);
}

View File

@ -18,21 +18,21 @@ test_empty_list(list_head_t *head) {
list_t *t;
unsigned i;
assert_ptr_null(ql_first(head), "Unexpected element for empty list");
assert_ptr_null(ql_last(head, link),
expect_ptr_null(ql_first(head), "Unexpected element for empty list");
expect_ptr_null(ql_last(head, link),
"Unexpected element for empty list");
i = 0;
ql_foreach(t, head, link) {
i++;
}
assert_u_eq(i, 0, "Unexpected element for empty list");
expect_u_eq(i, 0, "Unexpected element for empty list");
i = 0;
ql_reverse_foreach(t, head, link) {
i++;
}
assert_u_eq(i, 0, "Unexpected element for empty list");
expect_u_eq(i, 0, "Unexpected element for empty list");
}
TEST_BEGIN(test_ql_empty) {
@ -58,34 +58,34 @@ test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) {
list_t *t;
unsigned i;
assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
expect_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
expect_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
"Element id mismatch");
i = 0;
ql_foreach(t, head, link) {
assert_c_eq(t->id, entries[i].id, "Element id mismatch");
expect_c_eq(t->id, entries[i].id, "Element id mismatch");
i++;
}
i = 0;
ql_reverse_foreach(t, head, link) {
assert_c_eq(t->id, entries[nentries-i-1].id,
expect_c_eq(t->id, entries[nentries-i-1].id,
"Element id mismatch");
i++;
}
for (i = 0; i < nentries-1; i++) {
t = ql_next(head, &entries[i], link);
assert_c_eq(t->id, entries[i+1].id, "Element id mismatch");
expect_c_eq(t->id, entries[i+1].id, "Element id mismatch");
}
assert_ptr_null(ql_next(head, &entries[nentries-1], link),
expect_ptr_null(ql_next(head, &entries[nentries-1], link),
"Unexpected element");
assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
expect_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
for (i = 1; i < nentries; i++) {
t = ql_prev(head, &entries[i], link);
assert_c_eq(t->id, entries[i-1].id, "Element id mismatch");
expect_c_eq(t->id, entries[i-1].id, "Element id mismatch");
}
}

View File

@ -34,7 +34,7 @@ test_independent_entries(ring_t *entries) {
qr_foreach(t, &entries[i], link) {
j++;
}
assert_u_eq(j, 1,
expect_u_eq(j, 1,
"Iteration over single-element ring should visit precisely "
"one element");
}
@ -43,19 +43,19 @@ test_independent_entries(ring_t *entries) {
qr_reverse_foreach(t, &entries[i], link) {
j++;
}
assert_u_eq(j, 1,
expect_u_eq(j, 1,
"Iteration over single-element ring should visit precisely "
"one element");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_next(&entries[i], link);
assert_ptr_eq(t, &entries[i],
expect_ptr_eq(t, &entries[i],
"Next element in single-element ring should be same as "
"current element");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_prev(&entries[i], link);
assert_ptr_eq(t, &entries[i],
expect_ptr_eq(t, &entries[i],
"Previous element in single-element ring should be same as "
"current element");
}
@ -77,7 +77,7 @@ test_entries_ring(ring_t *entries) {
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[(i+j) % NENTRIES].id,
expect_c_eq(t->id, entries[(i+j) % NENTRIES].id,
"Element id mismatch");
j++;
}
@ -85,19 +85,19 @@ test_entries_ring(ring_t *entries) {
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_reverse_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[(NENTRIES+i-j-1) %
expect_c_eq(t->id, entries[(NENTRIES+i-j-1) %
NENTRIES].id, "Element id mismatch");
j++;
}
}
for (i = 0; i < NENTRIES; i++) {
t = qr_next(&entries[i], link);
assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
expect_c_eq(t->id, entries[(i+1) % NENTRIES].id,
"Element id mismatch");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_prev(&entries[i], link);
assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
"Element id mismatch");
}
}
@ -127,13 +127,13 @@ TEST_BEGIN(test_qr_remove) {
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[i+j].id,
expect_c_eq(t->id, entries[i+j].id,
"Element id mismatch");
j++;
}
j = 0;
qr_reverse_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[NENTRIES - 1 - j].id,
expect_c_eq(t->id, entries[NENTRIES - 1 - j].id,
"Element id mismatch");
j++;
}
@ -155,7 +155,7 @@ TEST_BEGIN(test_qr_before_insert) {
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[(NENTRIES+i-j) %
expect_c_eq(t->id, entries[(NENTRIES+i-j) %
NENTRIES].id, "Element id mismatch");
j++;
}
@ -163,19 +163,19 @@ TEST_BEGIN(test_qr_before_insert) {
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_reverse_foreach(t, &entries[i], link) {
assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
expect_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
"Element id mismatch");
j++;
}
}
for (i = 0; i < NENTRIES; i++) {
t = qr_next(&entries[i], link);
assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
"Element id mismatch");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_prev(&entries[i], link);
assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
expect_c_eq(t->id, entries[(i+1) % NENTRIES].id,
"Element id mismatch");
}
}
@ -190,11 +190,11 @@ test_split_entries(ring_t *entries) {
j = 0;
qr_foreach(t, &entries[i], link) {
if (i < SPLIT_INDEX) {
assert_c_eq(t->id,
expect_c_eq(t->id,
entries[(i+j) % SPLIT_INDEX].id,
"Element id mismatch");
} else {
assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
expect_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
(NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id,
"Element id mismatch");
}

View File

@ -26,8 +26,8 @@ static int
node_cmp(const node_t *a, const node_t *b) {
int ret;
assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
ret = (a->key > b->key) - (a->key < b->key);
if (ret == 0) {
@ -50,21 +50,21 @@ TEST_BEGIN(test_rb_empty) {
tree_new(&tree);
assert_true(tree_empty(&tree), "Tree should be empty");
assert_ptr_null(tree_first(&tree), "Unexpected node");
assert_ptr_null(tree_last(&tree), "Unexpected node");
expect_true(tree_empty(&tree), "Tree should be empty");
expect_ptr_null(tree_first(&tree), "Unexpected node");
expect_ptr_null(tree_last(&tree), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_search(&tree, &key), "Unexpected node");
expect_ptr_null(tree_search(&tree, &key), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
expect_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
expect_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
}
TEST_END
@ -88,17 +88,17 @@ tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
/* Red nodes must be interleaved with black nodes. */
if (rbtn_red_get(node_t, link, node)) {
if (left_node != NULL) {
assert_false(rbtn_red_get(node_t, link, left_node),
expect_false(rbtn_red_get(node_t, link, left_node),
"Node should be black");
}
if (right_node != NULL) {
assert_false(rbtn_red_get(node_t, link, right_node),
expect_false(rbtn_red_get(node_t, link, right_node),
"Node should be black");
}
}
/* Self. */
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Left subtree. */
if (left_node != NULL) {
@ -122,21 +122,21 @@ tree_iterate_cb(tree_t *tree, node_t *node, void *data) {
unsigned *i = (unsigned *)data;
node_t *search_node;
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Test rb_search(). */
search_node = tree_search(tree, node);
assert_ptr_eq(search_node, node,
expect_ptr_eq(search_node, node,
"tree_search() returned unexpected node");
/* Test rb_nsearch(). */
search_node = tree_nsearch(tree, node);
assert_ptr_eq(search_node, node,
expect_ptr_eq(search_node, node,
"tree_nsearch() returned unexpected node");
/* Test rb_psearch(). */
search_node = tree_psearch(tree, node);
assert_ptr_eq(search_node, node,
expect_ptr_eq(search_node, node,
"tree_psearch() returned unexpected node");
(*i)++;
@ -174,14 +174,14 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes) {
/* Test rb_nsearch(). */
search_node = tree_nsearch(tree, node);
if (search_node != NULL) {
assert_u64_ge(search_node->key, node->key,
expect_u64_ge(search_node->key, node->key,
"Key ordering error");
}
/* Test rb_psearch(). */
search_node = tree_psearch(tree, node);
if (search_node != NULL) {
assert_u64_le(search_node->key, node->key,
expect_u64_le(search_node->key, node->key,
"Key ordering error");
}
@ -189,10 +189,10 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes) {
rbtn_black_height(node_t, link, tree, black_height);
imbalances = tree_recurse(tree->rbt_root, black_height, 0);
assert_u_eq(imbalances, 0, "Tree is unbalanced");
assert_u_eq(tree_iterate(tree), nnodes-1,
expect_u_eq(imbalances, 0, "Tree is unbalanced");
expect_u_eq(tree_iterate(tree), nnodes-1,
"Unexpected node iteration count");
assert_u_eq(tree_iterate_reverse(tree), nnodes-1,
expect_u_eq(tree_iterate_reverse(tree), nnodes-1,
"Unexpected node iteration count");
}
@ -220,7 +220,7 @@ static void
destroy_cb(node_t *node, void *data) {
unsigned *nnodes = (unsigned *)data;
assert_u_gt(*nnodes, 0, "Destruction removed too many nodes");
expect_u_gt(*nnodes, 0, "Destruction removed too many nodes");
(*nnodes)--;
}
@ -271,19 +271,19 @@ TEST_BEGIN(test_rb_random) {
black_height);
imbalances = tree_recurse(tree.rbt_root,
black_height, 0);
assert_u_eq(imbalances, 0,
expect_u_eq(imbalances, 0,
"Tree is unbalanced");
assert_u_eq(tree_iterate(&tree), k+1,
expect_u_eq(tree_iterate(&tree), k+1,
"Unexpected node iteration count");
assert_u_eq(tree_iterate_reverse(&tree), k+1,
expect_u_eq(tree_iterate_reverse(&tree), k+1,
"Unexpected node iteration count");
assert_false(tree_empty(&tree),
expect_false(tree_empty(&tree),
"Tree should not be empty");
assert_ptr_not_null(tree_first(&tree),
expect_ptr_not_null(tree_first(&tree),
"Tree should not be empty");
assert_ptr_not_null(tree_last(&tree),
expect_ptr_not_null(tree_last(&tree),
"Tree should not be empty");
tree_next(&tree, &nodes[k]);
@ -312,7 +312,7 @@ TEST_BEGIN(test_rb_random) {
remove_iterate_cb, (void *)&nnodes);
nnodes--;
} while (start != NULL);
assert_u_eq(nnodes, 0,
expect_u_eq(nnodes, 0,
"Removal terminated early");
break;
} case 3: {
@ -326,13 +326,13 @@ TEST_BEGIN(test_rb_random) {
(void *)&nnodes);
nnodes--;
} while (start != NULL);
assert_u_eq(nnodes, 0,
expect_u_eq(nnodes, 0,
"Removal terminated early");
break;
} case 4: {
unsigned nnodes = j;
tree_destroy(&tree, destroy_cb, &nnodes);
assert_u_eq(nnodes, 0,
expect_u_eq(nnodes, 0,
"Destruction terminated early");
break;
} default:

View File

@ -14,7 +14,7 @@ static unsigned
do_arena_create(extent_hooks_t *h) {
unsigned arena_ind;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
(void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
"Unexpected mallctl() failure");
return arena_ind;
@ -26,17 +26,17 @@ do_arena_destroy(unsigned arena_ind) {
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
static void
do_refresh(void) {
uint64_t epoch = 1;
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
sizeof(epoch)), 0, "Unexpected mallctl() failure");
}
@ -46,11 +46,11 @@ do_get_size_impl(const char *cmd, unsigned arena_ind) {
size_t miblen = sizeof(mib) / sizeof(size_t);
size_t z = sizeof(size_t);
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = arena_ind;
size_t size;
assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
return size;
@ -76,7 +76,7 @@ thd_start(void *arg) {
next_epoch) {
spin_adaptive(&spinner);
}
assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
expect_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
/*
* Allocate. The main thread will reset the arena, so there's
@ -86,7 +86,7 @@ thd_start(void *arg) {
void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
MALLOCX_TCACHE_NONE
);
assert_ptr_not_null(p,
expect_ptr_not_null(p,
"Unexpected mallocx() failure\n");
}
@ -134,9 +134,9 @@ TEST_BEGIN(test_retained) {
size_t allocated = esz * nthreads * PER_THD_NALLOCS;
size_t active = do_get_active(arena_ind);
assert_zu_le(allocated, active, "Unexpected active memory");
expect_zu_le(allocated, active, "Unexpected active memory");
size_t mapped = do_get_mapped(arena_ind);
assert_zu_le(active, mapped, "Unexpected mapped memory");
expect_zu_le(active, mapped, "Unexpected mapped memory");
arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
size_t usable = 0;
@ -150,7 +150,7 @@ TEST_BEGIN(test_retained) {
* Only consider size classes that wouldn't be skipped.
*/
if (psz_usable > 0) {
assert_zu_lt(usable, allocated,
expect_zu_lt(usable, allocated,
"Excessive retained memory "
"(%#zx[+%#zx] > %#zx)", usable, psz_usable,
allocated);
@ -165,7 +165,7 @@ TEST_BEGIN(test_retained) {
* (rather than retaining) during reset.
*/
do_arena_destroy(arena_ind);
assert_u_eq(do_arena_create(NULL), arena_ind,
expect_u_eq(do_arena_create(NULL), arena_ind,
"Unexpected arena index");
}

View File

@ -13,14 +13,14 @@ TEST_BEGIN(test_rtree_read_empty) {
tsdn = tsdn_fetch();
base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks);
assert_ptr_not_null(base, "Unexpected base_new failure");
expect_ptr_not_null(base, "Unexpected base_new failure");
rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
assert_false(rtree_new(rtree, base, false),
expect_false(rtree_new(rtree, base, false),
"Unexpected rtree_new() failure");
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE,
expect_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE,
false), "rtree_edata_read() should return NULL for empty tree");
base_delete(tsdn, base);
@ -42,27 +42,27 @@ TEST_BEGIN(test_rtree_extrema) {
tsdn_t *tsdn = tsdn_fetch();
base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks);
assert_ptr_not_null(base, "Unexpected base_new failure");
expect_ptr_not_null(base, "Unexpected base_new failure");
rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
assert_false(rtree_new(rtree, base, false),
expect_false(rtree_new(rtree, base, false),
"Unexpected rtree_new() failure");
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &edata_a,
expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &edata_a,
edata_szind_get(&edata_a), edata_slab_get(&edata_a)),
"Unexpected rtree_write() failure");
rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE,
edata_szind_get(&edata_a), edata_slab_get(&edata_a));
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE, true),
expect_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE, true),
&edata_a,
"rtree_edata_read() should return previously set value");
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
expect_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
&edata_b, edata_szind_get_maybe_invalid(&edata_b),
edata_slab_get(&edata_b)), "Unexpected rtree_write() failure");
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
expect_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
~((uintptr_t)0), true), &edata_b,
"rtree_edata_read() should return previously set value");
@ -73,7 +73,7 @@ TEST_END
TEST_BEGIN(test_rtree_bits) {
tsdn_t *tsdn = tsdn_fetch();
base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks);
assert_ptr_not_null(base, "Unexpected base_new failure");
expect_ptr_not_null(base, "Unexpected base_new failure");
uintptr_t keys[] = {PAGE, PAGE + 1,
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
@ -85,22 +85,22 @@ TEST_BEGIN(test_rtree_bits) {
rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
assert_false(rtree_new(rtree, base, false),
expect_false(rtree_new(rtree, base, false),
"Unexpected rtree_new() failure");
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
expect_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
&edata, SC_NSIZES, false),
"Unexpected rtree_write() failure");
for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
expect_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[j], true), &edata,
"rtree_edata_read() should return previously set "
"value and ignore insignificant key bits; i=%u, "
"j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
j, keys[i], keys[j]);
}
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
expect_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
(((uintptr_t)2) << LG_PAGE), false),
"Only leftmost rtree leaf should be set; i=%u", i);
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
@ -117,7 +117,7 @@ TEST_BEGIN(test_rtree_random) {
tsdn_t *tsdn = tsdn_fetch();
base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks);
assert_ptr_not_null(base, "Unexpected base_new failure");
expect_ptr_not_null(base, "Unexpected base_new failure");
uintptr_t keys[NSET];
rtree_t *rtree = &test_rtree;
@ -128,23 +128,23 @@ TEST_BEGIN(test_rtree_random) {
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
assert_false(rtree_new(rtree, base, false),
expect_false(rtree_new(rtree, base, false),
"Unexpected rtree_new() failure");
for (unsigned i = 0; i < NSET; i++) {
keys[i] = (uintptr_t)gen_rand64(sfmt);
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree,
&rtree_ctx, keys[i], false, true);
assert_ptr_not_null(elm,
expect_ptr_not_null(elm,
"Unexpected rtree_leaf_elm_lookup() failure");
rtree_leaf_elm_write(tsdn, rtree, elm, &edata, SC_NSIZES,
false);
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
expect_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[i], true), &edata,
"rtree_edata_read() should return previously set value");
}
for (unsigned i = 0; i < NSET; i++) {
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
expect_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[i], true), &edata,
"rtree_edata_read() should return previously set value, "
"i=%u", i);
@ -152,12 +152,12 @@ TEST_BEGIN(test_rtree_random) {
for (unsigned i = 0; i < NSET; i++) {
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
expect_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[i], true),
"rtree_edata_read() should return previously set value");
}
for (unsigned i = 0; i < NSET; i++) {
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
expect_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[i], true),
"rtree_edata_read() should return previously set value");
}

View File

@ -24,7 +24,7 @@ TEST_BEGIN(test_malloc_free_overflow) {
free(ptr);
safety_check_set_abort(NULL);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
@ -40,7 +40,7 @@ TEST_BEGIN(test_mallocx_dallocx_overflow) {
dallocx(ptr, 0);
safety_check_set_abort(NULL);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
@ -56,7 +56,7 @@ TEST_BEGIN(test_malloc_sdallocx_overflow) {
sdallocx(ptr, 128, 0);
safety_check_set_abort(NULL);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
@ -73,7 +73,7 @@ TEST_BEGIN(test_realloc_overflow) {
safety_check_set_abort(NULL);
free(ptr);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
@ -90,7 +90,7 @@ TEST_BEGIN(test_rallocx_overflow) {
safety_check_set_abort(NULL);
free(ptr);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
@ -104,9 +104,9 @@ TEST_BEGIN(test_xallocx_overflow) {
char* ptr = malloc(128);
ptr[128] = 0;
size_t result = xallocx(ptr, 129, 0, 0);
assert_zu_eq(result, 128, "");
expect_zu_eq(result, 128, "");
free(ptr);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
safety_check_set_abort(NULL);
}

View File

@ -9,7 +9,7 @@ TEST_BEGIN(test_update_slab_size) {
+ (ZU(tiny->ndelta) << tiny->lg_delta);
size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1;
sc_data_update_slab_size(&data, tiny_size, tiny_size, (int)pgs_too_big);
assert_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
expect_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
sc_data_update_slab_size(&data, 1, 10 * PAGE, 1);
for (int i = 0; i < data.nbins; i++) {
@ -17,9 +17,9 @@ TEST_BEGIN(test_update_slab_size) {
size_t reg_size = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << sc->lg_delta);
if (reg_size <= PAGE) {
assert_d_eq(sc->pgs, 1, "Ignored valid page size hint");
expect_d_eq(sc->pgs, 1, "Ignored valid page size hint");
} else {
assert_d_gt(sc->pgs, 1,
expect_d_gt(sc->pgs, 1,
"Allowed invalid page size hint");
}
}

View File

@ -15,10 +15,10 @@ set_data(data_t *data, int num) {
}
static void
assert_data(data_t *data) {
expect_data(data_t *data) {
int num = data->arr[0];
for (int i = 0; i < 10; i++) {
assert_d_eq(num, data->arr[i], "Data consistency error");
expect_d_eq(num, data->arr[i], "Data consistency error");
}
}
@ -37,8 +37,8 @@ seq_reader_thd(void *arg) {
while (iter < 1000 * 1000 - 1) {
bool success = seq_try_load_data(&local_data, &thd_data->data);
if (success) {
assert_data(&local_data);
assert_d_le(iter, local_data.arr[0],
expect_data(&local_data);
expect_d_le(iter, local_data.arr[0],
"Seq read went back in time.");
iter = local_data.arr[0];
}
@ -82,8 +82,8 @@ TEST_BEGIN(test_seq_simple) {
seq_store_data(&seq, &data);
set_data(&data, 0);
bool success = seq_try_load_data(&data, &seq);
assert_b_eq(success, true, "Failed non-racing read");
assert_data(&data);
expect_b_eq(success, true, "Failed non-racing read");
expect_data(&data);
}
}
TEST_END

View File

@ -7,16 +7,16 @@ get_max_size_class(void) {
size_t sz, miblen, max_size_class;
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
0), 0, "Unexpected mallctl() error");
miblen = sizeof(mib) / sizeof(size_t);
assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib() error");
mib[2] = nlextents - 1;
sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
NULL, 0), 0, "Unexpected mallctlbymib() error");
return max_size_class;
@ -32,50 +32,50 @@ TEST_BEGIN(test_size_classes) {
for (index = 0, size_class = sz_index2size(index); index < max_index ||
size_class < max_size_class; index++, size_class =
sz_index2size(index)) {
assert_true(index < max_index,
expect_true(index < max_index,
"Loop conditionals should be equivalent; index=%u, "
"size_class=%zu (%#zx)", index, size_class, size_class);
assert_true(size_class < max_size_class,
expect_true(size_class < max_size_class,
"Loop conditionals should be equivalent; index=%u, "
"size_class=%zu (%#zx)", index, size_class, size_class);
assert_u_eq(index, sz_size2index(size_class),
expect_u_eq(index, sz_size2index(size_class),
"sz_size2index() does not reverse sz_index2size(): index=%u"
" --> size_class=%zu --> index=%u --> size_class=%zu",
index, size_class, sz_size2index(size_class),
sz_index2size(sz_size2index(size_class)));
assert_zu_eq(size_class,
expect_zu_eq(size_class,
sz_index2size(sz_size2index(size_class)),
"sz_index2size() does not reverse sz_size2index(): index=%u"
" --> size_class=%zu --> index=%u --> size_class=%zu",
index, size_class, sz_size2index(size_class),
sz_index2size(sz_size2index(size_class)));
assert_u_eq(index+1, sz_size2index(size_class+1),
expect_u_eq(index+1, sz_size2index(size_class+1),
"Next size_class does not round up properly");
assert_zu_eq(size_class, (index > 0) ?
expect_zu_eq(size_class, (index > 0) ?
sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1),
"sz_s2u() does not round up to size class");
assert_zu_eq(size_class, sz_s2u(size_class-1),
expect_zu_eq(size_class, sz_s2u(size_class-1),
"sz_s2u() does not round up to size class");
assert_zu_eq(size_class, sz_s2u(size_class),
expect_zu_eq(size_class, sz_s2u(size_class),
"sz_s2u() does not compute same size class");
assert_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1),
expect_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1),
"sz_s2u() does not round up to next size class");
}
assert_u_eq(index, sz_size2index(sz_index2size(index)),
expect_u_eq(index, sz_size2index(sz_index2size(index)),
"sz_size2index() does not reverse sz_index2size()");
assert_zu_eq(max_size_class, sz_index2size(
expect_zu_eq(max_size_class, sz_index2size(
sz_size2index(max_size_class)),
"sz_index2size() does not reverse sz_size2index()");
assert_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1),
expect_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1),
"sz_s2u() does not round up to size class");
assert_zu_eq(size_class, sz_s2u(size_class-1),
expect_zu_eq(size_class, sz_s2u(size_class-1),
"sz_s2u() does not round up to size class");
assert_zu_eq(size_class, sz_s2u(size_class),
expect_zu_eq(size_class, sz_s2u(size_class),
"sz_s2u() does not compute same size class");
}
TEST_END
@ -90,53 +90,53 @@ TEST_BEGIN(test_psize_classes) {
for (pind = 0, size_class = sz_pind2sz(pind);
pind < max_pind || size_class < max_psz;
pind++, size_class = sz_pind2sz(pind)) {
assert_true(pind < max_pind,
expect_true(pind < max_pind,
"Loop conditionals should be equivalent; pind=%u, "
"size_class=%zu (%#zx)", pind, size_class, size_class);
assert_true(size_class < max_psz,
expect_true(size_class < max_psz,
"Loop conditionals should be equivalent; pind=%u, "
"size_class=%zu (%#zx)", pind, size_class, size_class);
assert_u_eq(pind, sz_psz2ind(size_class),
expect_u_eq(pind, sz_psz2ind(size_class),
"sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->"
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
size_class, sz_psz2ind(size_class),
sz_pind2sz(sz_psz2ind(size_class)));
assert_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)),
expect_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)),
"sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->"
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
size_class, sz_psz2ind(size_class),
sz_pind2sz(sz_psz2ind(size_class)));
if (size_class == SC_LARGE_MAXCLASS) {
assert_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1),
expect_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1),
"Next size_class does not round up properly");
} else {
assert_u_eq(pind + 1, sz_psz2ind(size_class + 1),
expect_u_eq(pind + 1, sz_psz2ind(size_class + 1),
"Next size_class does not round up properly");
}
assert_zu_eq(size_class, (pind > 0) ?
expect_zu_eq(size_class, (pind > 0) ?
sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1),
"sz_psz2u() does not round up to size class");
assert_zu_eq(size_class, sz_psz2u(size_class-1),
expect_zu_eq(size_class, sz_psz2u(size_class-1),
"sz_psz2u() does not round up to size class");
assert_zu_eq(size_class, sz_psz2u(size_class),
expect_zu_eq(size_class, sz_psz2u(size_class),
"sz_psz2u() does not compute same size class");
assert_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1),
expect_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1),
"sz_psz2u() does not round up to next size class");
}
assert_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)),
expect_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)),
"sz_psz2ind() does not reverse sz_pind2sz()");
assert_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)),
expect_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)),
"sz_pind2sz() does not reverse sz_psz2ind()");
assert_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1),
expect_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1),
"sz_psz2u() does not round up to size class");
assert_zu_eq(size_class, sz_psz2u(size_class-1),
expect_zu_eq(size_class, sz_psz2u(size_class-1),
"sz_psz2u() does not round up to size class");
assert_zu_eq(size_class, sz_psz2u(size_class),
expect_zu_eq(size_class, sz_psz2u(size_class),
"sz_psz2u() does not compute same size class");
}
TEST_END
@ -147,34 +147,34 @@ TEST_BEGIN(test_overflow) {
max_size_class = get_max_size_class();
max_psz = max_size_class + PAGE;
assert_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
expect_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
expect_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
assert_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
expect_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
assert_zu_eq(sz_s2u(max_size_class+1), 0,
expect_zu_eq(sz_s2u(max_size_class+1), 0,
"sz_s2u() should return 0 for unsupported size");
assert_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0,
expect_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0,
"sz_s2u() should return 0 for unsupported size");
assert_zu_eq(sz_s2u(SIZE_T_MAX), 0,
expect_zu_eq(sz_s2u(SIZE_T_MAX), 0,
"sz_s2u() should return 0 on overflow");
assert_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES,
expect_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES,
"sz_psz2ind() should return NPSIZES on overflow");
assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES,
expect_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES,
"sz_psz2ind() should return NPSIZES on overflow");
assert_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES,
expect_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES,
"sz_psz2ind() should return NPSIZES on overflow");
assert_zu_eq(sz_psz2u(max_size_class+1), max_psz,
expect_zu_eq(sz_psz2u(max_size_class+1), max_psz,
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
" size");
assert_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
expect_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported "
"size");
assert_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz,
expect_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz,
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow");
}
TEST_END

View File

@ -14,12 +14,12 @@ TEST_BEGIN(test_arena_slab_regind) {
bin_info->slab_size, true,
binind, 0, extent_state_active, false, true, true,
EXTENT_NOT_HEAD);
assert_ptr_not_null(edata_addr_get(&slab),
expect_ptr_not_null(edata_addr_get(&slab),
"Unexpected malloc() failure");
for (regind = 0; regind < bin_info->nregs; regind++) {
void *reg = (void *)((uintptr_t)edata_addr_get(&slab) +
(bin_info->reg_size * regind));
assert_zu_eq(arena_slab_regind(&slab, binind, reg),
expect_zu_eq(arena_slab_regind(&slab, binind, reg),
regind,
"Incorrect region index computed for size %zu",
bin_info->reg_size);

View File

@ -26,9 +26,9 @@ TEST_BEGIN(test_smoothstep_integral) {
max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
min = max - SMOOTHSTEP_NSTEPS;
assert_u64_ge(sum, min,
expect_u64_ge(sum, min,
"Integral too small, even accounting for truncation");
assert_u64_le(sum, max, "Integral exceeds 1/2");
expect_u64_le(sum, max, "Integral exceeds 1/2");
if (false) {
malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
max - sum, SMOOTHSTEP_NSTEPS);
@ -49,10 +49,10 @@ TEST_BEGIN(test_smoothstep_monotonic) {
prev_h = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
uint64_t h = smoothstep_tab[i];
assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
expect_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
prev_h = h;
}
assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
expect_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
(KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
}
TEST_END
@ -72,7 +72,7 @@ TEST_BEGIN(test_smoothstep_slope) {
for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
uint64_t h = smoothstep_tab[i];
uint64_t delta = h - prev_h;
assert_u64_ge(delta, prev_delta,
expect_u64_ge(delta, prev_delta,
"Slope must monotonically increase in 0.0 <= x <= 0.5, "
"i=%u", i);
prev_h = h;
@ -84,7 +84,7 @@ TEST_BEGIN(test_smoothstep_slope) {
for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
uint64_t h = smoothstep_tab[i];
uint64_t delta = prev_h - h;
assert_u64_ge(delta, prev_delta,
expect_u64_ge(delta, prev_delta,
"Slope must monotonically decrease in 0.5 <= x <= 1.0, "
"i=%u", i);
prev_h = h;

View File

@ -5,21 +5,21 @@ TEST_BEGIN(test_stats_summary) {
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
expect_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
expect_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
expect_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
expect_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_le(allocated, active,
expect_zu_le(allocated, active,
"allocated should be no larger than active");
assert_zu_lt(active, resident,
expect_zu_lt(active, resident,
"active should be less than resident");
assert_zu_lt(active, mapped,
expect_zu_lt(active, mapped,
"active should be less than mapped");
}
}
@ -34,30 +34,30 @@ TEST_BEGIN(test_stats_large) {
int expected = config_stats ? 0 : ENOENT;
p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.large.allocated",
expect_d_eq(mallctl("stats.arenas.0.large.allocated",
(void *)&allocated, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.large.nrequests",
expect_d_eq(mallctl("stats.arenas.0.large.nrequests",
(void *)&nrequests, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
expect_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
expect_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_le(nmalloc, nrequests,
expect_u64_le(nmalloc, nrequests,
"nmalloc should no larger than nrequests");
}
@ -75,54 +75,54 @@ TEST_BEGIN(test_stats_arenas_summary) {
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
assert_ptr_not_null(little, "Unexpected mallocx() failure");
expect_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx((1U << SC_LG_LARGE_MINCLASS),
MALLOCX_ARENA(0));
assert_ptr_not_null(large, "Unexpected mallocx() failure");
expect_ptr_not_null(large, "Unexpected mallocx() failure");
dallocx(little, 0);
dallocx(large, 0);
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
expect_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
0), expected, "Unexepected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.dirty_npurge",
expect_d_eq(mallctl("stats.arenas.0.dirty_npurge",
(void *)&dirty_npurge, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.dirty_nmadvise",
expect_d_eq(mallctl("stats.arenas.0.dirty_nmadvise",
(void *)&dirty_nmadvise, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.dirty_purged",
expect_d_eq(mallctl("stats.arenas.0.dirty_purged",
(void *)&dirty_purged, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.muzzy_npurge",
expect_d_eq(mallctl("stats.arenas.0.muzzy_npurge",
(void *)&muzzy_npurge, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise",
expect_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise",
(void *)&muzzy_nmadvise, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.muzzy_purged",
expect_d_eq(mallctl("stats.arenas.0.muzzy_purged",
(void *)&muzzy_purged, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
if (config_stats) {
if (!background_thread_enabled()) {
assert_u64_gt(dirty_npurge + muzzy_npurge, 0,
expect_u64_gt(dirty_npurge + muzzy_npurge, 0,
"At least one purge should have occurred");
}
assert_u64_le(dirty_nmadvise, dirty_purged,
expect_u64_le(dirty_nmadvise, dirty_purged,
"dirty_nmadvise should be no greater than dirty_purged");
assert_u64_le(muzzy_nmadvise, muzzy_purged,
expect_u64_le(muzzy_nmadvise, muzzy_purged,
"muzzy_nmadvise should be no greater than muzzy_purged");
}
}
@ -150,35 +150,35 @@ TEST_BEGIN(test_stats_arenas_small) {
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.small.allocated",
expect_d_eq(mallctl("stats.arenas.0.small.allocated",
(void *)&allocated, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
expect_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
expect_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.small.nrequests",
expect_d_eq(mallctl("stats.arenas.0.small.nrequests",
(void *)&nrequests, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
expect_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_gt(nmalloc, 0,
expect_u64_gt(nmalloc, 0,
"nmalloc should be no greater than zero");
assert_u64_ge(nmalloc, ndalloc,
expect_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
expect_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
}
@ -193,27 +193,27 @@ TEST_BEGIN(test_stats_arenas_large) {
int expected = config_stats ? 0 : ENOENT;
p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.large.allocated",
expect_d_eq(mallctl("stats.arenas.0.large.allocated",
(void *)&allocated, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
expect_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_gt(nmalloc, 0,
expect_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
expect_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
}
@ -234,85 +234,85 @@ TEST_BEGIN(test_stats_arenas_bins) {
int expected = config_stats ? 0 : ENOENT;
/* Make sure allocation below isn't satisfied by tcache. */
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
unsigned arena_ind, old_arena_ind;
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Arena creation failure");
sz = sizeof(arena_ind);
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
(void *)&arena_ind, sizeof(arena_ind)), 0,
"Unexpected mallctl() failure");
p = malloc(bin_infos[0].reg_size);
assert_ptr_not_null(p, "Unexpected malloc() failure");
expect_ptr_not_null(p, "Unexpected malloc() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
char cmd[128];
sz = sizeof(uint64_t);
gen_mallctl_str(cmd, "nmalloc", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected,
expect_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "ndalloc", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected,
expect_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "nrequests", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected,
expect_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t);
gen_mallctl_str(cmd, "curregs", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected,
expect_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(uint64_t);
gen_mallctl_str(cmd, "nfills", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected,
expect_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "nflushes", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected,
expect_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "nslabs", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
expect_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "nreslabs", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected,
expect_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t);
gen_mallctl_str(cmd, "curslabs", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
expect_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "nonfull_slabs", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
expect_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
if (config_stats) {
assert_u64_gt(nmalloc, 0,
expect_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
expect_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
expect_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
assert_zu_gt(curregs, 0,
expect_zu_gt(curregs, 0,
"allocated should be greater than zero");
if (opt_tcache) {
assert_u64_gt(nfills, 0,
expect_u64_gt(nfills, 0,
"At least one fill should have occurred");
assert_u64_gt(nflushes, 0,
expect_u64_gt(nflushes, 0,
"At least one flush should have occurred");
}
assert_u64_gt(nslabs, 0,
expect_u64_gt(nslabs, 0,
"At least one slab should have been allocated");
assert_zu_gt(curslabs, 0,
expect_zu_gt(curslabs, 0,
"At least one slab should be currently allocated");
assert_zu_eq(nonfull_slabs, 0,
expect_zu_eq(nonfull_slabs, 0,
"slabs_nonfull should be empty");
}
@ -327,33 +327,33 @@ TEST_BEGIN(test_stats_arenas_lextents) {
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL,
expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
p = mallocx(hsize, MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
expect_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc",
expect_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc",
(void *)&nmalloc, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc",
expect_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc",
(void *)&ndalloc, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents",
expect_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents",
(void *)&curlextents, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
assert_u64_gt(nmalloc, 0,
expect_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
expect_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(curlextents, 0,
expect_u64_gt(curlextents, 0,
"At least one extent should be currently allocated");
}

View File

@ -136,7 +136,7 @@ parser_tokenize(parser_t *parser) {
size_t token_line JEMALLOC_CC_SILENCE_INIT(1);
size_t token_col JEMALLOC_CC_SILENCE_INIT(0);
assert_zu_le(parser->pos, parser->len,
expect_zu_le(parser->pos, parser->len,
"Position is past end of buffer");
while (state != STATE_ACCEPT) {
@ -686,7 +686,7 @@ parser_parse_value(parser_t *parser) {
static bool
parser_parse_pair(parser_t *parser) {
assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
expect_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
"Pair should start with string");
if (parser_tokenize(parser)) {
return true;
@ -731,7 +731,7 @@ parser_parse_values(parser_t *parser) {
static bool
parser_parse_array(parser_t *parser) {
assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET,
expect_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET,
"Array should start with [");
if (parser_tokenize(parser)) {
return true;
@ -747,7 +747,7 @@ parser_parse_array(parser_t *parser) {
static bool
parser_parse_pairs(parser_t *parser) {
assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
expect_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
"Object should start with string");
if (parser_parse_pair(parser)) {
return true;
@ -782,7 +782,7 @@ parser_parse_pairs(parser_t *parser) {
static bool
parser_parse_object(parser_t *parser) {
assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE,
expect_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE,
"Object should start with {");
if (parser_tokenize(parser)) {
return true;
@ -899,9 +899,9 @@ TEST_BEGIN(test_json_parser) {
const char *input = invalid_inputs[i];
parser_t parser;
parser_init(&parser, false);
assert_false(parser_append(&parser, input),
expect_false(parser_append(&parser, input),
"Unexpected input appending failure");
assert_true(parser_parse(&parser),
expect_true(parser_parse(&parser),
"Unexpected parse success for input: %s", input);
parser_fini(&parser);
}
@ -910,9 +910,9 @@ TEST_BEGIN(test_json_parser) {
const char *input = valid_inputs[i];
parser_t parser;
parser_init(&parser, true);
assert_false(parser_append(&parser, input),
expect_false(parser_append(&parser, input),
"Unexpected input appending failure");
assert_false(parser_parse(&parser),
expect_false(parser_parse(&parser),
"Unexpected parse error for input: %s", input);
parser_fini(&parser);
}
@ -961,17 +961,17 @@ TEST_BEGIN(test_stats_print_json) {
break;
case 1: {
size_t sz = sizeof(arena_ind);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind,
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind,
&sz, NULL, 0), 0, "Unexpected mallctl failure");
break;
} case 2: {
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.destroy",
expect_d_eq(mallctlnametomib("arena.0.destroy",
mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
mib[1] = arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL,
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL,
0), 0, "Unexpected mallctlbymib failure");
break;
} default:
@ -983,7 +983,7 @@ TEST_BEGIN(test_stats_print_json) {
parser_init(&parser, true);
malloc_stats_print(write_cb, (void *)&parser, opts[j]);
assert_false(parser_parse(&parser),
expect_false(parser_parse(&parser),
"Unexpected parse error, opts=\"%s\"", opts[j]);
parser_fini(&parser);
}

View File

@ -17,16 +17,16 @@ func_to_hook(int arg1, int arg2) {
TEST_BEGIN(unhooked_call) {
test_hooks_libc_hook = NULL;
hook_called = false;
assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
assert_false(hook_called, "Nulling out hook didn't take.");
expect_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
expect_false(hook_called, "Nulling out hook didn't take.");
}
TEST_END
TEST_BEGIN(hooked_call) {
test_hooks_libc_hook = &hook;
hook_called = false;
assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
assert_true(hook_called, "Hook should have executed.");
expect_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
expect_true(hook_called, "Hook should have executed.");
}
TEST_END

View File

@ -15,7 +15,7 @@ TEST_BEGIN(test_next_event_fast_roll_back) {
ITERATE_OVER_ALL_EVENTS
#undef E
void *p = malloc(16U);
assert_ptr_not_null(p, "malloc() failed");
expect_ptr_not_null(p, "malloc() failed");
free(p);
}
TEST_END
@ -37,7 +37,7 @@ TEST_BEGIN(test_next_event_fast_resume) {
ITERATE_OVER_ALL_EVENTS
#undef E
void *p = malloc(SC_LOOKUP_MAXCLASS);
assert_ptr_not_null(p, "malloc() failed");
expect_ptr_not_null(p, "malloc() failed");
free(p);
}
TEST_END
@ -50,7 +50,7 @@ TEST_BEGIN(test_event_rollback) {
while (count-- != 0) {
te_alloc_rollback(tsd, diff);
uint64_t thread_allocated_after = thread_allocated_get(tsd);
assert_u64_eq(thread_allocated - thread_allocated_after, diff,
expect_u64_eq(thread_allocated - thread_allocated_after, diff,
"thread event counters are not properly rolled back");
thread_allocated = thread_allocated_after;
}

View File

@ -11,16 +11,16 @@ TEST_BEGIN(test_ticker_tick) {
ticker_init(&ticker, NTICKS);
for (i = 0; i < NREPS; i++) {
for (j = 0; j < NTICKS; j++) {
assert_u_eq(ticker_read(&ticker), NTICKS - j,
expect_u_eq(ticker_read(&ticker), NTICKS - j,
"Unexpected ticker value (i=%d, j=%d)", i, j);
assert_false(ticker_tick(&ticker),
expect_false(ticker_tick(&ticker),
"Unexpected ticker fire (i=%d, j=%d)", i, j);
}
assert_u32_eq(ticker_read(&ticker), 0,
expect_u32_eq(ticker_read(&ticker), 0,
"Expected ticker depletion");
assert_true(ticker_tick(&ticker),
expect_true(ticker_tick(&ticker),
"Expected ticker fire (i=%d)", i);
assert_u32_eq(ticker_read(&ticker), NTICKS,
expect_u32_eq(ticker_read(&ticker), NTICKS,
"Expected ticker reset");
}
#undef NTICKS
@ -33,14 +33,14 @@ TEST_BEGIN(test_ticker_ticks) {
ticker_init(&ticker, NTICKS);
assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
expect_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
expect_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
expect_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
expect_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
#undef NTICKS
}
TEST_END
@ -51,15 +51,15 @@ TEST_BEGIN(test_ticker_copy) {
ticker_init(&ta, NTICKS);
ticker_copy(&tb, &ta);
assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
expect_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
ticker_tick(&ta);
ticker_copy(&tb, &ta);
assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
expect_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
expect_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
#undef NTICKS
}
TEST_END

View File

@ -10,7 +10,7 @@ static int data_cleanup_count;
void
data_cleanup(int *data) {
if (data_cleanup_count == 0) {
assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
expect_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
"Argument passed into cleanup function should match tsd "
"value");
}
@ -38,7 +38,7 @@ data_cleanup(int *data) {
if (reincarnate) {
void *p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpeced mallocx() failure");
expect_ptr_not_null(p, "Unexpeced mallocx() failure");
dallocx(p, 0);
}
}
@ -49,18 +49,18 @@ thd_start(void *arg) {
void *p;
tsd_t *tsd = tsd_fetch();
assert_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
expect_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
"Initial tsd get should return initialization value");
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() failure");
expect_ptr_not_null(p, "Unexpected malloc() failure");
tsd_test_data_set(tsd, d);
assert_x_eq(tsd_test_data_get(tsd), d,
expect_x_eq(tsd_test_data_get(tsd), d,
"After tsd set, tsd get should return value that was set");
d = 0;
assert_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
expect_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
"Resetting local data should have no effect on tsd");
tsd_test_callback_set(tsd, &data_cleanup);
@ -84,7 +84,7 @@ TEST_BEGIN(test_tsd_sub_thread) {
* We reincarnate twice in the data cleanup, so it should execute at
* least 3 times.
*/
assert_x_ge(data_cleanup_count, 3,
expect_x_ge(data_cleanup_count, 3,
"Cleanup function should have executed multiple times.");
}
TEST_END
@ -95,28 +95,28 @@ thd_start_reincarnated(void *arg) {
assert(tsd);
void *p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() failure");
expect_ptr_not_null(p, "Unexpected malloc() failure");
/* Manually trigger reincarnation. */
assert_ptr_not_null(tsd_arena_get(tsd),
expect_ptr_not_null(tsd_arena_get(tsd),
"Should have tsd arena set.");
tsd_cleanup((void *)tsd);
assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
"TSD arena should have been cleared.");
assert_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
expect_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
"TSD state should be purgatory\n");
free(p);
assert_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
expect_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
"TSD state should be reincarnated\n");
p = mallocx(1, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected malloc() failure");
assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
expect_ptr_not_null(p, "Unexpected malloc() failure");
expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
"Should not have tsd arena set after reincarnation.");
free(p);
tsd_cleanup((void *)tsd);
assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
"TSD arena should have been cleared after 2nd cleanup.");
return NULL;
@ -206,46 +206,46 @@ TEST_BEGIN(test_tsd_global_slow) {
* Spin-wait.
*/
}
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_inc(tsd_tsdn(tsd));
free(mallocx(1, 0));
assert_false(tsd_fast(tsd), "");
expect_false(tsd_fast(tsd), "");
atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
/* PHASE 3 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
}
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
/* Increase again, so that we can test multiple fast/slow changes. */
tsd_global_slow_inc(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
free(mallocx(1, 0));
assert_false(tsd_fast(tsd), "");
expect_false(tsd_fast(tsd), "");
/* PHASE 5 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
}
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_dec(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
/* We only decreased once; things should still be slow. */
free(mallocx(1, 0));
assert_false(tsd_fast(tsd), "");
expect_false(tsd_fast(tsd), "");
/* PHASE 7 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
}
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_dec(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
/* We incremented and then decremented twice; we should be fast now. */
free(mallocx(1, 0));
assert_true(!originally_fast || tsd_fast(tsd), "");
expect_true(!originally_fast || tsd_fast(tsd), "");
/* PHASE 9 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
}
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
thd_join(thd, NULL);
}

View File

@ -34,7 +34,7 @@ witness_depth_error_intercept(const witness_list_t *witnesses,
static int
witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) {
assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
assert(oa == (void *)a);
assert(ob == (void *)b);
@ -45,7 +45,7 @@ witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) {
static int
witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b,
void *ob) {
assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
assert(oa == (void *)a);
assert(ob == (void *)b);
@ -121,9 +121,9 @@ TEST_BEGIN(test_witness_comp) {
witness_init(&c, "c", 1, witness_comp_reverse, &c);
witness_assert_not_owner(&witness_tsdn, &c);
assert_false(saw_lock_error, "Unexpected witness lock error");
expect_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(&witness_tsdn, &c);
assert_true(saw_lock_error, "Expected witness lock error");
expect_true(saw_lock_error, "Expected witness lock error");
witness_unlock(&witness_tsdn, &c);
witness_assert_depth(&witness_tsdn, 1);
@ -131,9 +131,9 @@ TEST_BEGIN(test_witness_comp) {
witness_init(&d, "d", 1, NULL, NULL);
witness_assert_not_owner(&witness_tsdn, &d);
assert_false(saw_lock_error, "Unexpected witness lock error");
expect_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(&witness_tsdn, &d);
assert_true(saw_lock_error, "Expected witness lock error");
expect_true(saw_lock_error, "Expected witness lock error");
witness_unlock(&witness_tsdn, &d);
witness_assert_depth(&witness_tsdn, 1);
@ -162,9 +162,9 @@ TEST_BEGIN(test_witness_reversal) {
witness_lock(&witness_tsdn, &b);
witness_assert_depth(&witness_tsdn, 1);
assert_false(saw_lock_error, "Unexpected witness lock error");
expect_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(&witness_tsdn, &a);
assert_true(saw_lock_error, "Expected witness lock error");
expect_true(saw_lock_error, "Expected witness lock error");
witness_unlock(&witness_tsdn, &a);
witness_assert_depth(&witness_tsdn, 1);
@ -195,11 +195,11 @@ TEST_BEGIN(test_witness_recursive) {
witness_init(&a, "a", 1, NULL, NULL);
witness_lock(&witness_tsdn, &a);
assert_false(saw_lock_error, "Unexpected witness lock error");
assert_false(saw_not_owner_error, "Unexpected witness not owner error");
expect_false(saw_lock_error, "Unexpected witness lock error");
expect_false(saw_not_owner_error, "Unexpected witness not owner error");
witness_lock(&witness_tsdn, &a);
assert_true(saw_lock_error, "Expected witness lock error");
assert_true(saw_not_owner_error, "Expected witness not owner error");
expect_true(saw_lock_error, "Expected witness lock error");
expect_true(saw_not_owner_error, "Expected witness not owner error");
witness_unlock(&witness_tsdn, &a);
@ -225,9 +225,9 @@ TEST_BEGIN(test_witness_unlock_not_owned) {
witness_init(&a, "a", 1, NULL, NULL);
assert_false(saw_owner_error, "Unexpected owner error");
expect_false(saw_owner_error, "Unexpected owner error");
witness_unlock(&witness_tsdn, &a);
assert_true(saw_owner_error, "Expected owner error");
expect_true(saw_owner_error, "Expected owner error");
witness_assert_lockless(&witness_tsdn);
@ -250,14 +250,14 @@ TEST_BEGIN(test_witness_depth) {
witness_init(&a, "a", 1, NULL, NULL);
assert_false(saw_depth_error, "Unexpected depth error");
expect_false(saw_depth_error, "Unexpected depth error");
witness_assert_lockless(&witness_tsdn);
witness_assert_depth(&witness_tsdn, 0);
witness_lock(&witness_tsdn, &a);
witness_assert_lockless(&witness_tsdn);
witness_assert_depth(&witness_tsdn, 0);
assert_true(saw_depth_error, "Expected depth error");
expect_true(saw_depth_error, "Expected depth error");
witness_unlock(&witness_tsdn, &a);

View File

@ -8,21 +8,21 @@ test_zero(size_t sz_min, size_t sz_max) {
sz_prev = 0;
s = (uint8_t *)mallocx(sz_min, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
expect_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
assert_u_eq(s[0], MAGIC,
expect_u_eq(s[0], MAGIC,
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
assert_u_eq(s[sz_prev-1], MAGIC,
expect_u_eq(s[sz_prev-1], MAGIC,
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
assert_u_eq(s[i], 0x0,
expect_u_eq(s[i], 0x0,
"Newly allocated byte %zu/%zu isn't zero-filled",
i, sz);
s[i] = MAGIC;
@ -30,7 +30,7 @@ test_zero(size_t sz_min, size_t sz_max) {
if (xallocx(s, sz+1, 0, 0) == sz) {
s = (uint8_t *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s,
expect_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
}
}

View File

@ -12,9 +12,9 @@ TEST_BEGIN(test_realloc_abort) {
abort_called = false;
safety_check_set_abort(&set_abort_called);
void *ptr = mallocx(42, 0);
assert_ptr_not_null(ptr, "Unexpected mallocx error");
expect_ptr_not_null(ptr, "Unexpected mallocx error");
ptr = realloc(ptr, 0);
assert_true(abort_called, "Realloc with zero size didn't abort");
expect_true(abort_called, "Realloc with zero size didn't abort");
}
TEST_END

View File

@ -7,20 +7,20 @@ deallocated() {
}
uint64_t deallocated;
size_t sz = sizeof(deallocated);
assert_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
NULL, 0), 0, "Unexpected mallctl failure");
return deallocated;
}
TEST_BEGIN(test_realloc_free) {
void *ptr = mallocx(42, 0);
assert_ptr_not_null(ptr, "Unexpected mallocx error");
expect_ptr_not_null(ptr, "Unexpected mallocx error");
uint64_t deallocated_before = deallocated();
ptr = realloc(ptr, 0);
uint64_t deallocated_after = deallocated();
assert_ptr_null(ptr, "Realloc didn't free");
expect_ptr_null(ptr, "Realloc didn't free");
if (config_stats) {
assert_u64_gt(deallocated_after, deallocated_before,
expect_u64_gt(deallocated_after, deallocated_before,
"Realloc didn't free");
}
}

View File

@ -7,7 +7,7 @@ allocated() {
}
uint64_t allocated;
size_t sz = sizeof(allocated);
assert_d_eq(mallctl("thread.allocated", (void *)&allocated, &sz, NULL,
expect_d_eq(mallctl("thread.allocated", (void *)&allocated, &sz, NULL,
0), 0, "Unexpected mallctl failure");
return allocated;
}
@ -19,23 +19,23 @@ deallocated() {
}
uint64_t deallocated;
size_t sz = sizeof(deallocated);
assert_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
NULL, 0), 0, "Unexpected mallctl failure");
return deallocated;
}
TEST_BEGIN(test_realloc_strict) {
void *ptr = mallocx(1, 0);
assert_ptr_not_null(ptr, "Unexpected mallocx error");
expect_ptr_not_null(ptr, "Unexpected mallocx error");
uint64_t allocated_before = allocated();
uint64_t deallocated_before = deallocated();
ptr = realloc(ptr, 0);
uint64_t allocated_after = allocated();
uint64_t deallocated_after = deallocated();
if (config_stats) {
assert_u64_lt(allocated_before, allocated_after,
expect_u64_lt(allocated_before, allocated_after,
"Unexpected stats change");
assert_u64_lt(deallocated_before, deallocated_after,
expect_u64_lt(deallocated_before, deallocated_after,
"Unexpected stats change");
}
dallocx(ptr, 0);

View File

@ -8,7 +8,7 @@ zero_reallocs() {
size_t count = 12345;
size_t sz = sizeof(count);
assert_d_eq(mallctl("stats.zero_reallocs", (void *)&count, &sz,
expect_d_eq(mallctl("stats.zero_reallocs", (void *)&count, &sz,
NULL, 0), 0, "Unexpected mallctl failure");
return count;
}
@ -18,13 +18,13 @@ TEST_BEGIN(test_zero_reallocs) {
for (size_t i = 0; i < 100; ++i) {
void *ptr = mallocx(i * i + 1, 0);
assert_ptr_not_null(ptr, "Unexpected mallocx error");
expect_ptr_not_null(ptr, "Unexpected mallocx error");
size_t count = zero_reallocs();
assert_zu_eq(i, count, "Incorrect zero realloc count");
expect_zu_eq(i, count, "Incorrect zero realloc count");
ptr = realloc(ptr, 0);
assert_ptr_null(ptr, "Realloc didn't free");
expect_ptr_null(ptr, "Realloc didn't free");
count = zero_reallocs();
assert_zu_eq(i + 1, count, "Realloc didn't adjust count");
expect_zu_eq(i + 1, count, "Realloc didn't adjust count");
}
}
TEST_END