2014-01-18 07:40:52 +08:00
|
|
|
#include "test/jemalloc_test.h"
|
|
|
|
|
|
|
|
#define NTHREADS 4
|
|
|
|
#define NALLOCS_PER_THREAD 50
|
|
|
|
#define DUMP_INTERVAL 1
|
|
|
|
#define BT_COUNT_CHECK_INTERVAL 5
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
const char *malloc_conf =
|
|
|
|
"prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0";
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int
|
|
|
|
prof_dump_open_intercept(bool propagate_err, const char *filename)
|
|
|
|
{
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
fd = open("/dev/null", O_WRONLY);
|
|
|
|
assert_d_ne(fd, -1, "Unexpected open() failure");
|
|
|
|
|
|
|
|
return (fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define alloc_n_proto(n) \
|
2014-01-30 02:56:51 +08:00
|
|
|
JEMALLOC_NOINLINE static void *alloc_##n(unsigned bits);
|
2014-01-18 07:40:52 +08:00
|
|
|
|
|
|
|
#define alloc_n_gen(n) \
|
2014-01-30 02:56:51 +08:00
|
|
|
JEMALLOC_NOINLINE static void * \
|
2014-01-18 07:40:52 +08:00
|
|
|
alloc_##n(unsigned bits) \
|
|
|
|
{ \
|
2014-01-22 06:59:40 +08:00
|
|
|
void *p; \
|
2014-01-18 07:40:52 +08:00
|
|
|
\
|
2014-01-22 06:59:40 +08:00
|
|
|
if (bits == 0) \
|
|
|
|
p = mallocx(1, 0); \
|
|
|
|
else { \
|
|
|
|
switch (bits & 0x1U) { \
|
|
|
|
case 0: \
|
|
|
|
p = alloc_0(bits >> 1); \
|
|
|
|
break; \
|
|
|
|
case 1: \
|
|
|
|
p = alloc_1(bits >> 1); \
|
|
|
|
break; \
|
|
|
|
default: not_reached(); \
|
|
|
|
} \
|
2014-01-18 07:40:52 +08:00
|
|
|
} \
|
2014-01-22 06:59:40 +08:00
|
|
|
/* Intentionally sabotage tail call optimization. */ \
|
|
|
|
assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
|
|
|
|
return (p); \
|
2014-01-18 07:40:52 +08:00
|
|
|
}
|
|
|
|
alloc_n_proto(0)
|
|
|
|
alloc_n_proto(1)
|
|
|
|
alloc_n_gen(0)
|
|
|
|
alloc_n_gen(1)
|
|
|
|
|
|
|
|
static void *
|
|
|
|
alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (alloc_0(thd_ind*NALLOCS_PER_THREAD + iteration));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
thd_start(void *varg)
|
|
|
|
{
|
|
|
|
unsigned thd_ind = *(unsigned *)varg;
|
|
|
|
size_t bt_count_prev, bt_count;
|
|
|
|
unsigned i_prev, i;
|
|
|
|
|
|
|
|
i_prev = 0;
|
|
|
|
bt_count_prev = 0;
|
|
|
|
for (i = 0; i < NALLOCS_PER_THREAD; i++) {
|
|
|
|
void *p = alloc_from_permuted_backtrace(thd_ind, i);
|
|
|
|
dallocx(p, 0);
|
|
|
|
if (i % DUMP_INTERVAL == 0) {
|
|
|
|
assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
|
|
|
|
0, "Unexpected error while dumping heap profile");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
|
|
|
|
i+1 == NALLOCS_PER_THREAD) {
|
|
|
|
bt_count = prof_bt_count();
|
|
|
|
assert_zu_le(bt_count_prev+(i-i_prev), bt_count,
|
2014-01-22 06:59:40 +08:00
|
|
|
"Expected larger backtrace count increase");
|
2014-01-18 07:40:52 +08:00
|
|
|
i_prev = i;
|
|
|
|
bt_count_prev = bt_count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_BEGIN(test_idump)
|
|
|
|
{
|
|
|
|
bool active;
|
|
|
|
thd_t thds[NTHREADS];
|
|
|
|
unsigned thd_args[NTHREADS];
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
test_skip_if(!config_prof);
|
|
|
|
|
|
|
|
active = true;
|
|
|
|
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
|
|
|
|
0, "Unexpected mallctl failure while activating profiling");
|
|
|
|
|
|
|
|
prof_dump_open = prof_dump_open_intercept;
|
|
|
|
|
|
|
|
for (i = 0; i < NTHREADS; i++) {
|
|
|
|
thd_args[i] = i;
|
|
|
|
thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
|
|
|
|
}
|
|
|
|
for (i = 0; i < NTHREADS; i++)
|
|
|
|
thd_join(thds[i], NULL);
|
|
|
|
}
|
|
|
|
TEST_END
|
|
|
|
|
|
|
|
int
|
|
|
|
main(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (test(
|
|
|
|
test_idump));
|
|
|
|
}
|