Implement thread-aware allocation event tracing.

This commit is contained in:
Jason Evans 2009-12-29 00:09:15 -08:00
parent 90895cf859
commit 569432cffd
8 changed files with 580 additions and 259 deletions

View File

@ -37,8 +37,7 @@ any of the following arguments (not a definitive list) to 'configure':
--enable-stats --enable-stats
Enable statistics gathering functionality. Use the 'P' option to print Enable statistics gathering functionality. Use the 'P' option to print
detailed allocation statistics at exit, and/or the 'U' option to print a detailed allocation statistics at exit.
detailed allocation trace log.
--disable-tiny --disable-tiny
Disable tiny (sub-quantum-sized) object support. Technically it is not Disable tiny (sub-quantum-sized) object support. Technically it is not

View File

@ -312,6 +312,28 @@ else
fi fi
AC_SUBST([roff_stats]) AC_SUBST([roff_stats])
dnl Do not enable tracing by default.
AC_ARG_ENABLE([trace],
[AS_HELP_STRING([--enable-trace], [Enable allocation tracing (logging)])],
[if test "x$enable_trace" = "xno" ; then
enable_trace="0"
else
enable_trace="1"
fi
],
[enable_trace="0"]
)
if test "x$enable_trace" = "x1" ; then
AC_DEFINE([JEMALLOC_TRACE], [ ])
fi
AC_SUBST([enable_trace])
if test "x$enable_trace" = "x0" ; then
roff_trace=".\\\" "
else
roff_trace=""
fi
AC_SUBST([roff_trace])
dnl Enable tiny allocations by default. dnl Enable tiny allocations by default.
AC_ARG_ENABLE([tiny], AC_ARG_ENABLE([tiny],
[AS_HELP_STRING([--disable-tiny], [Disable tiny (sub-quantum) allocations])], [AS_HELP_STRING([--disable-tiny], [Disable tiny (sub-quantum) allocations])],
@ -567,7 +589,7 @@ AC_SUBST([enable_lazy_lock])
dnl ============================================================================ dnl ============================================================================
dnl Configure libgd for mtrgraph. dnl Configure libgd for mtrgraph.
bins="${objroot}bin/mtrplay" bins="${objroot}bin/jemtr2mtr ${objroot}bin/mtrplay"
GDLIBS="" GDLIBS=""
have_libgd="yes" have_libgd="yes"
@ -629,6 +651,7 @@ AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}])
AC_MSG_RESULT([autogen : ${enable_autogen}]) AC_MSG_RESULT([autogen : ${enable_autogen}])
AC_MSG_RESULT([debug : ${enable_debug}]) AC_MSG_RESULT([debug : ${enable_debug}])
AC_MSG_RESULT([stats : ${enable_stats}]) AC_MSG_RESULT([stats : ${enable_stats}])
AC_MSG_RESULT([trace : ${enable_trace}])
AC_MSG_RESULT([tiny : ${enable_tiny}]) AC_MSG_RESULT([tiny : ${enable_tiny}])
AC_MSG_RESULT([mag : ${enable_mag}]) AC_MSG_RESULT([mag : ${enable_mag}])
AC_MSG_RESULT([balance : ${enable_balance}]) AC_MSG_RESULT([balance : ${enable_balance}])

View File

@ -290,10 +290,21 @@ The default value is 128 bytes.
@roff_mag@@roff_tls@to acquire and release objects in bulk. @roff_mag@@roff_tls@to acquire and release objects in bulk.
@roff_mag@@roff_tls@Increasing the magazine size decreases locking overhead, at @roff_mag@@roff_tls@Increasing the magazine size decreases locking overhead, at
@roff_mag@@roff_tls@the expense of increased memory usage. @roff_mag@@roff_tls@the expense of increased memory usage.
@roff_stats@.It U @roff_trace@.It T
@roff_stats@Generate a verbose trace log via @roff_trace@Write a verbose trace log to a set of files named according to the
@roff_stats@.Fn @jemalloc_prefix@malloc_message @roff_trace@pattern
@roff_stats@for all allocation operations. @roff_trace@.Pa jemtr.<pid>.<arena>
@roff_trace@for all allocation operations.
@roff_trace@The result can be converted from
@roff_trace@.Nm jemtr
@roff_trace@to
@roff_trace@.Nm mtr
@roff_trace@format via
@roff_trace@.Xr jemtr2mtr 1 ,
@roff_trace@the output of which can be used by
@roff_trace@.Xr mtrplay 1
@roff_trace@and
@roff_trace@.Xr mtrgraph 1 .
@roff_sysv@.It V @roff_sysv@.It V
@roff_sysv@Attempting to allocate zero bytes will return a @roff_sysv@Attempting to allocate zero bytes will return a
@roff_sysv@.Dv NULL @roff_sysv@.Dv NULL
@ -555,6 +566,9 @@ on calls to these functions:
@jemalloc_prefix@malloc_options = "X"; @jemalloc_prefix@malloc_options = "X";
.Ed .Ed
.Sh SEE ALSO .Sh SEE ALSO
.Xr mtrgraph 1 ,
.Xr mtrplay 1 ,
.Xr jemtr2mtr 1 ,
.Xr madvise 2 , .Xr madvise 2 ,
.Xr mmap 2 , .Xr mmap 2 ,
.Xr sbrk 2 , .Xr sbrk 2 ,

View File

@ -634,6 +634,13 @@ struct arena_s {
arena_stats_t stats; arena_stats_t stats;
#endif #endif
#ifdef JEMALLOC_TRACE
# define TRACE_BUF_SIZE 65536
unsigned trace_buf_end;
char trace_buf[TRACE_BUF_SIZE];
int trace_fd;
#endif
/* Tree of dirty-page-containing chunks this arena manages. */ /* Tree of dirty-page-containing chunks this arena manages. */
arena_chunk_tree_t chunks_dirty; arena_chunk_tree_t chunks_dirty;
@ -752,6 +759,13 @@ static bool isthreaded = false;
/* Number of CPUs. */ /* Number of CPUs. */
static unsigned ncpus; static unsigned ncpus;
#ifdef JEMALLOC_TRACE
static malloc_mutex_t trace_mtx;
static unsigned trace_next_tid = 1;
static unsigned __thread trace_tid;
#endif
/* /*
* Page size. STATIC_PAGE_SHIFT is determined by the configure script. If * Page size. STATIC_PAGE_SHIFT is determined by the configure script. If
* DYNAMIC_PAGE_SHIFT is enabled, only use the STATIC_PAGE_* macros where * DYNAMIC_PAGE_SHIFT is enabled, only use the STATIC_PAGE_* macros where
@ -1076,8 +1090,8 @@ static bool opt_print_stats = false;
static size_t opt_qspace_max_2pow = QSPACE_MAX_2POW_DEFAULT; static size_t opt_qspace_max_2pow = QSPACE_MAX_2POW_DEFAULT;
static size_t opt_cspace_max_2pow = CSPACE_MAX_2POW_DEFAULT; static size_t opt_cspace_max_2pow = CSPACE_MAX_2POW_DEFAULT;
static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT; static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT;
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_TRACE
static bool opt_utrace = false; static bool opt_trace = false;
#endif #endif
#ifdef JEMALLOC_SYSV #ifdef JEMALLOC_SYSV
static bool opt_sysv = false; static bool opt_sysv = false;
@ -1090,25 +1104,6 @@ static bool opt_zero = false;
#endif #endif
static int opt_narenas_lshift = 0; static int opt_narenas_lshift = 0;
#ifdef JEMALLOC_STATS
typedef struct {
void *p;
size_t s;
void *r;
} malloc_utrace_t;
#define UTRACE(a, b, c) \
if (opt_utrace) { \
malloc_utrace_t ut; \
ut.p = (a); \
ut.s = (b); \
ut.r = (c); \
utrace(&ut, sizeof(ut)); \
}
#else
#define UTRACE(a, b, c)
#endif
/******************************************************************************/ /******************************************************************************/
/* /*
* Begin function prototypes for non-inline static functions. * Begin function prototypes for non-inline static functions.
@ -1121,7 +1116,7 @@ static void wrtmessage(const char *p1, const char *p2, const char *p3,
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
static void malloc_printf(const char *format, ...); static void malloc_printf(const char *format, ...);
#endif #endif
static char *umax2s(uintmax_t x, char *s); static char *umax2s(uintmax_t x, unsigned base, char *s);
#ifdef JEMALLOC_DSS #ifdef JEMALLOC_DSS
static bool base_pages_alloc_dss(size_t minsize); static bool base_pages_alloc_dss(size_t minsize);
#endif #endif
@ -1187,7 +1182,7 @@ static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t size, size_t oldsize); void *ptr, size_t size, size_t oldsize);
static bool arena_ralloc_large(void *ptr, size_t size, size_t oldsize); static bool arena_ralloc_large(void *ptr, size_t size, size_t oldsize);
static void *arena_ralloc(void *ptr, size_t size, size_t oldsize); static void *arena_ralloc(void *ptr, size_t size, size_t oldsize);
static bool arena_new(arena_t *arena); static bool arena_new(arena_t *arena, unsigned ind);
static arena_t *arenas_extend(unsigned ind); static arena_t *arenas_extend(unsigned ind);
#ifdef JEMALLOC_MAG #ifdef JEMALLOC_MAG
static mag_t *mag_create(arena_t *arena, size_t binind); static mag_t *mag_create(arena_t *arena, size_t binind);
@ -1199,6 +1194,22 @@ static void *huge_malloc(size_t size, bool zero);
static void *huge_palloc(size_t alignment, size_t size); static void *huge_palloc(size_t alignment, size_t size);
static void *huge_ralloc(void *ptr, size_t size, size_t oldsize); static void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
static void huge_dalloc(void *ptr); static void huge_dalloc(void *ptr);
#ifdef JEMALLOC_TRACE
static arena_t *trace_arena(const void *ptr);
static void trace_flush(arena_t *arena);
static void trace_write(arena_t *arena, const char *s);
static unsigned trace_get_tid(void);
static void malloc_trace_flush_all(void);
static void trace_malloc(const void *ptr, size_t size);
static void trace_calloc(const void *ptr, size_t number, size_t size);
static void trace_posix_memalign(const void *ptr, size_t alignment,
size_t size);
static void trace_realloc(const void *ptr, const void *old_ptr,
size_t size, size_t old_size);
static void trace_free(const void *ptr, size_t size);
static void trace_malloc_usable_size(size_t size, const void *ptr);
static void trace_thread_exit(void);
#endif
static void malloc_print_stats(void); static void malloc_print_stats(void);
#ifdef JEMALLOC_DEBUG #ifdef JEMALLOC_DEBUG
static void size2bin_validate(void); static void size2bin_validate(void);
@ -1236,19 +1247,36 @@ void (*malloc_message)(const char *p1, const char *p2, const char *p3,
* integer printing functionality, so that malloc_printf() use can be limited to * integer printing functionality, so that malloc_printf() use can be limited to
* JEMALLOC_STATS code. * JEMALLOC_STATS code.
*/ */
#define UMAX2S_BUFSIZE 21 #define UMAX2S_BUFSIZE 65
static char * static char *
umax2s(uintmax_t x, char *s) umax2s(uintmax_t x, unsigned base, char *s)
{ {
unsigned i; unsigned i;
i = UMAX2S_BUFSIZE - 1; i = UMAX2S_BUFSIZE - 1;
s[i] = '\0'; s[i] = '\0';
switch (base) {
case 10:
do { do {
i--; i--;
s[i] = "0123456789"[x % 10]; s[i] = "0123456789"[x % 10];
x /= 10; x /= 10;
} while (x > 0); } while (x > 0);
break;
case 16:
do {
i--;
s[i] = "0123456789abcdef"[x & 0xf];
x >>= 4;
} while (x > 0);
break;
default:
do {
i--;
s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x % base];
x /= base;
} while (x > 0);
}
return (&s[i]); return (&s[i]);
} }
@ -1262,7 +1290,7 @@ umax2s(uintmax_t x, char *s)
if (!(e)) { \ if (!(e)) { \
char line_buf[UMAX2S_BUFSIZE]; \ char line_buf[UMAX2S_BUFSIZE]; \
malloc_message("<jemalloc>: ", __FILE__, ":", \ malloc_message("<jemalloc>: ", __FILE__, ":", \
umax2s(__LINE__, line_buf)); \ umax2s(__LINE__, 10, line_buf)); \
malloc_message(": Failed assertion: ", "\"", #e, \ malloc_message(": Failed assertion: ", "\"", #e, \
"\"\n"); \ "\"\n"); \
abort(); \ abort(); \
@ -1272,31 +1300,6 @@ umax2s(uintmax_t x, char *s)
#define assert(e) #define assert(e)
#endif #endif
#ifdef JEMALLOC_STATS
static int
utrace(const void *addr, size_t len)
{
malloc_utrace_t *ut = (malloc_utrace_t *)addr;
assert(len == sizeof(malloc_utrace_t));
if (ut->p == NULL && ut->s == 0 && ut->r == NULL)
malloc_printf("<jemalloc>:utrace: %d malloc_init()\n",
getpid());
else if (ut->p == NULL && ut->r != NULL) {
malloc_printf("<jemalloc>:utrace: %d %p = malloc(%zu)\n",
getpid(), ut->r, ut->s);
} else if (ut->p != NULL && ut->r != NULL) {
malloc_printf("<jemalloc>:utrace: %d %p = realloc(%p, %zu)\n",
getpid(), ut->r, ut->p, ut->s);
} else
malloc_printf("<jemalloc>:utrace: %d free(%p)\n", getpid(),
ut->p);
return (0);
}
#endif
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
/* /*
* Print to stderr in such a way as to (hopefully) avoid memory allocation. * Print to stderr in such a way as to (hopefully) avoid memory allocation.
@ -4216,7 +4219,7 @@ iralloc(void *ptr, size_t size)
} }
static bool static bool
arena_new(arena_t *arena) arena_new(arena_t *arena, unsigned ind)
{ {
unsigned i; unsigned i;
arena_bin_t *bin; arena_bin_t *bin;
@ -4229,6 +4232,49 @@ arena_new(arena_t *arena)
memset(&arena->stats, 0, sizeof(arena_stats_t)); memset(&arena->stats, 0, sizeof(arena_stats_t));
#endif #endif
#ifdef JEMALLOC_TRACE
if (opt_trace) {
/* "jemtr.<pid>.<arena>" */
char buf[UMAX2S_BUFSIZE];
char filename[6 + UMAX2S_BUFSIZE + 1 + UMAX2S_BUFSIZE + 1];
char *s;
unsigned i, slen;
arena->trace_buf_end = 0;
i = 0;
s = "jemtr.";
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
s = umax2s(getpid(), 10, buf);
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
s = ".";
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
s = umax2s(ind, 10, buf);
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
filename[i] = '\0';
arena->trace_fd = creat(filename, 0644);
if (arena->trace_fd == -1) {
malloc_message("<jemalloc>",
": creat(\"", filename, "\", O_RDWR) failed\n");
abort();
}
}
#endif
/* Initialize chunks. */ /* Initialize chunks. */
arena_chunk_tree_dirty_new(&arena->chunks_dirty); arena_chunk_tree_dirty_new(&arena->chunks_dirty);
arena->spare = NULL; arena->spare = NULL;
@ -4325,7 +4371,7 @@ arenas_extend(unsigned ind)
/* Allocate enough space for trailing bins. */ /* Allocate enough space for trailing bins. */
ret = (arena_t *)base_alloc(sizeof(arena_t) ret = (arena_t *)base_alloc(sizeof(arena_t)
+ (sizeof(arena_bin_t) * (nbins - 1))); + (sizeof(arena_bin_t) * (nbins - 1)));
if (ret != NULL && arena_new(ret) == false) { if (ret != NULL && arena_new(ret, ind) == false) {
arenas[ind] = ret; arenas[ind] = ret;
return (ret); return (ret);
} }
@ -4643,14 +4689,229 @@ huge_dalloc(void *ptr)
base_node_dealloc(node); base_node_dealloc(node);
} }
#ifdef JEMALLOC_TRACE
static arena_t *
trace_arena(const void *ptr)
{
arena_t *arena;
arena_chunk_t *chunk;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if ((void *)chunk == ptr)
arena = arenas[0];
else
arena = chunk->arena;
return (arena);
}
static void
trace_flush(arena_t *arena)
{
ssize_t err;
err = write(arena->trace_fd, arena->trace_buf, arena->trace_buf_end);
if (err == -1) {
malloc_message("<jemalloc>",
": write() failed during trace flush", "\n", "");
abort();
}
arena->trace_buf_end = 0;
}
static void
trace_write(arena_t *arena, const char *s)
{
unsigned i, slen, n;
i = 0;
slen = strlen(s);
while (i < slen) {
/* Flush the trace buffer if it is full. */
if (arena->trace_buf_end == TRACE_BUF_SIZE)
trace_flush(arena);
if (arena->trace_buf_end + slen <= TRACE_BUF_SIZE) {
/* Finish writing. */
n = slen - i;
} else {
/* Write as much of s as will fit. */
n = TRACE_BUF_SIZE - arena->trace_buf_end;
}
memcpy(&arena->trace_buf[arena->trace_buf_end], &s[i], n);
arena->trace_buf_end += n;
i += n;
}
}
static unsigned
trace_get_tid(void)
{
unsigned ret = trace_tid;
if (ret == 0) {
malloc_mutex_lock(&trace_mtx);
trace_tid = trace_next_tid;
trace_next_tid++;
malloc_mutex_unlock(&trace_mtx);
ret = trace_tid;
}
return (ret);
}
static void
malloc_trace_flush_all(void)
{
unsigned i;
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL) {
malloc_spin_lock(&arenas[i]->lock);
trace_flush(arenas[i]);
malloc_spin_unlock(&arenas[i]->lock);
}
}
}
static void
trace_malloc(const void *ptr, size_t size)
{
char buf[UMAX2S_BUFSIZE];
arena_t *arena = trace_arena(ptr);
malloc_spin_lock(&arena->lock);
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
trace_write(arena, " m 0x");
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
trace_write(arena, " ");
trace_write(arena, umax2s(size, 10, buf));
trace_write(arena, "\n");
malloc_spin_unlock(&arena->lock);
}
static void
trace_calloc(const void *ptr, size_t number, size_t size)
{
char buf[UMAX2S_BUFSIZE];
arena_t *arena = trace_arena(ptr);
malloc_spin_lock(&arena->lock);
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
trace_write(arena, " c 0x");
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
trace_write(arena, " ");
trace_write(arena, umax2s(number, 10, buf));
trace_write(arena, " ");
trace_write(arena, umax2s(size, 10, buf));
trace_write(arena, "\n");
malloc_spin_unlock(&arena->lock);
}
static void
trace_posix_memalign(const void *ptr, size_t alignment, size_t size)
{
char buf[UMAX2S_BUFSIZE];
arena_t *arena = trace_arena(ptr);
malloc_spin_lock(&arena->lock);
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
trace_write(arena, " a 0x");
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
trace_write(arena, " ");
trace_write(arena, umax2s(alignment, 10, buf));
trace_write(arena, " ");
trace_write(arena, umax2s(size, 10, buf));
trace_write(arena, "\n");
malloc_spin_unlock(&arena->lock);
}
static void
trace_realloc(const void *ptr, const void *old_ptr, size_t size,
size_t old_size)
{
char buf[UMAX2S_BUFSIZE];
arena_t *arena = trace_arena(ptr);
malloc_spin_lock(&arena->lock);
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
trace_write(arena, " r 0x");
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
trace_write(arena, " 0x");
trace_write(arena, umax2s((uintptr_t)old_ptr, 16, buf));
trace_write(arena, " ");
trace_write(arena, umax2s(size, 10, buf));
trace_write(arena, " ");
trace_write(arena, umax2s(old_size, 10, buf));
trace_write(arena, "\n");
malloc_spin_unlock(&arena->lock);
}
static void
trace_free(const void *ptr, size_t size)
{
char buf[UMAX2S_BUFSIZE];
arena_t *arena = trace_arena(ptr);
malloc_spin_lock(&arena->lock);
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
trace_write(arena, " f 0x");
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
trace_write(arena, " ");
trace_write(arena, umax2s(isalloc(ptr), 10, buf));
trace_write(arena, "\n");
malloc_spin_unlock(&arena->lock);
}
static void
trace_malloc_usable_size(size_t size, const void *ptr)
{
char buf[UMAX2S_BUFSIZE];
arena_t *arena = trace_arena(ptr);
malloc_spin_lock(&arena->lock);
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
trace_write(arena, " s ");
trace_write(arena, umax2s(size, 10, buf));
trace_write(arena, " 0x");
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
trace_write(arena, "\n");
malloc_spin_unlock(&arena->lock);
}
static void
trace_thread_exit(void)
{
char buf[UMAX2S_BUFSIZE];
arena_t *arena = choose_arena();
malloc_spin_lock(&arena->lock);
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
trace_write(arena, " x\n");
malloc_spin_unlock(&arena->lock);
}
#endif
static void static void
malloc_print_stats(void) malloc_print_stats(void)
{ {
if (opt_print_stats) {
char s[UMAX2S_BUFSIZE]; char s[UMAX2S_BUFSIZE];
malloc_message("___ Begin jemalloc statistics ___\n", "", "",
""); malloc_message("___ Begin jemalloc statistics ___\n", "", "", "");
malloc_message("Assertions ", malloc_message("Assertions ",
#ifdef NDEBUG #ifdef NDEBUG
"disabled", "disabled",
@ -4673,8 +4934,8 @@ malloc_print_stats(void)
malloc_message(opt_mmap ? "M" : "m", "", "", ""); malloc_message(opt_mmap ? "M" : "m", "", "", "");
#endif #endif
malloc_message("P", "", "", ""); malloc_message("P", "", "", "");
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_TRACE
malloc_message(opt_utrace ? "U" : "u", "", "", ""); malloc_message(opt_trace ? "T" : "t", "", "", "");
#endif #endif
#ifdef JEMALLOC_SYSV #ifdef JEMALLOC_SYSV
malloc_message(opt_sysv ? "V" : "v", "", "", ""); malloc_message(opt_sysv ? "V" : "v", "", "", "");
@ -4687,41 +4948,40 @@ malloc_print_stats(void)
#endif #endif
malloc_message("\n", "", "", ""); malloc_message("\n", "", "", "");
malloc_message("CPUs: ", umax2s(ncpus, s), "\n", ""); malloc_message("CPUs: ", umax2s(ncpus, 10, s), "\n", "");
malloc_message("Max arenas: ", umax2s(narenas, s), "\n", ""); malloc_message("Max arenas: ", umax2s(narenas, 10, s), "\n", "");
#ifdef JEMALLOC_BALANCE #ifdef JEMALLOC_BALANCE
malloc_message("Arena balance threshold: ", malloc_message("Arena balance threshold: ",
umax2s(opt_balance_threshold, s), "\n", ""); umax2s(opt_balance_threshold, 10, s), "\n", "");
#endif #endif
malloc_message("Pointer size: ", umax2s(sizeof(void *), s), malloc_message("Pointer size: ", umax2s(sizeof(void *), 10, s), "\n",
"\n", "");
malloc_message("Quantum size: ", umax2s(QUANTUM, s), "\n",
""); "");
malloc_message("Quantum size: ", umax2s(QUANTUM, 10, s), "\n", "");
malloc_message("Cacheline size (assumed): ", malloc_message("Cacheline size (assumed): ",
umax2s(CACHELINE, s), "\n", ""); umax2s(CACHELINE, 10, s), "\n", "");
#ifdef JEMALLOC_TINY #ifdef JEMALLOC_TINY
malloc_message("Tiny 2^n-spaced sizes: [", umax2s((1U << malloc_message("Tiny 2^n-spaced sizes: [", umax2s((1U << TINY_MIN_2POW),
TINY_MIN_2POW), s), "..", ""); 10, s), "..", "");
malloc_message(umax2s((qspace_min >> 1), s), "]\n", "", ""); malloc_message(umax2s((qspace_min >> 1), 10, s), "]\n", "", "");
#endif #endif
malloc_message("Quantum-spaced sizes: [", umax2s(qspace_min, malloc_message("Quantum-spaced sizes: [", umax2s(qspace_min, 10, s),
s), "..", ""); "..", "");
malloc_message(umax2s(qspace_max, s), "]\n", "", ""); malloc_message(umax2s(qspace_max, 10, s), "]\n", "", "");
malloc_message("Cacheline-spaced sizes: [", malloc_message("Cacheline-spaced sizes: [",
umax2s(cspace_min, s), "..", ""); umax2s(cspace_min, 10, s), "..", "");
malloc_message(umax2s(cspace_max, s), "]\n", "", ""); malloc_message(umax2s(cspace_max, 10, s), "]\n", "", "");
malloc_message("Subpage-spaced sizes: [", umax2s(sspace_min, malloc_message("Subpage-spaced sizes: [", umax2s(sspace_min, 10, s),
s), "..", ""); "..", "");
malloc_message(umax2s(sspace_max, s), "]\n", "", ""); malloc_message(umax2s(sspace_max, 10, s), "]\n", "", "");
#ifdef JEMALLOC_MAG #ifdef JEMALLOC_MAG
malloc_message("Rounds per magazine: ", umax2s(max_rounds, malloc_message("Rounds per magazine: ", umax2s(max_rounds, 10, s), "\n",
s), "\n", ""); "");
#endif #endif
malloc_message("Max dirty pages per arena: ", malloc_message("Max dirty pages per arena: ",
umax2s(opt_dirty_max, s), "\n", ""); umax2s(opt_dirty_max, 10, s), "\n", "");
malloc_message("Chunk size: ", umax2s(chunksize, s), "", ""); malloc_message("Chunk size: ", umax2s(chunksize, 10, s), "", "");
malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", ""); malloc_message(" (2^", umax2s(opt_chunk_2pow, 10, s), ")\n", "");
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
{ {
@ -4738,10 +4998,8 @@ malloc_print_stats(void)
for (i = 0, allocated = 0; i < narenas; i++) { for (i = 0, allocated = 0; i < narenas; i++) {
if (arenas[i] != NULL) { if (arenas[i] != NULL) {
malloc_spin_lock(&arenas[i]->lock); malloc_spin_lock(&arenas[i]->lock);
allocated += allocated += arenas[i]->stats.allocated_small;
arenas[i]->stats.allocated_small; allocated += arenas[i]->stats.allocated_large;
allocated +=
arenas[i]->stats.allocated_large;
#ifdef JEMALLOC_BALANCE #ifdef JEMALLOC_BALANCE
nbalance += arenas[i]->stats.nbalance; nbalance += arenas[i]->stats.nbalance;
#endif #endif
@ -4759,12 +5017,11 @@ malloc_print_stats(void)
mapped += base_mapped; mapped += base_mapped;
malloc_mutex_unlock(&base_mtx); malloc_mutex_unlock(&base_mtx);
malloc_printf("Allocated: %zu, mapped: %zu\n", malloc_printf("Allocated: %zu, mapped: %zu\n", allocated,
allocated, mapped); mapped);
#ifdef JEMALLOC_BALANCE #ifdef JEMALLOC_BALANCE
malloc_printf("Arena balance reassignments: %llu\n", malloc_printf("Arena balance reassignments: %llu\n", nbalance);
nbalance);
#endif #endif
/* Print chunk stats. */ /* Print chunk stats. */
@ -4778,23 +5035,21 @@ malloc_print_stats(void)
malloc_printf("chunks: nchunks " malloc_printf("chunks: nchunks "
"highchunks curchunks\n"); "highchunks curchunks\n");
malloc_printf(" %13llu%13lu%13lu\n", malloc_printf(" %13llu%13lu%13lu\n",
chunks_stats.nchunks, chunks_stats.nchunks, chunks_stats.highchunks,
chunks_stats.highchunks,
chunks_stats.curchunks); chunks_stats.curchunks);
} }
/* Print chunk stats. */ /* Print chunk stats. */
malloc_printf( malloc_printf(
"huge: nmalloc ndalloc allocated\n"); "huge: nmalloc ndalloc allocated\n");
malloc_printf(" %12llu %12llu %12zu\n", malloc_printf(" %12llu %12llu %12zu\n", huge_nmalloc,
huge_nmalloc, huge_ndalloc, huge_allocated); huge_ndalloc, huge_allocated);
/* Print stats for each arena. */ /* Print stats for each arena. */
for (i = 0; i < narenas; i++) { for (i = 0; i < narenas; i++) {
arena = arenas[i]; arena = arenas[i];
if (arena != NULL) { if (arena != NULL) {
malloc_printf( malloc_printf("\narenas[%u]:\n", i);
"\narenas[%u]:\n", i);
malloc_spin_lock(&arena->lock); malloc_spin_lock(&arena->lock);
stats_print(arena); stats_print(arena);
malloc_spin_unlock(&arena->lock); malloc_spin_unlock(&arena->lock);
@ -4802,9 +5057,7 @@ malloc_print_stats(void)
} }
} }
#endif /* #ifdef JEMALLOC_STATS */ #endif /* #ifdef JEMALLOC_STATS */
malloc_message("--- End jemalloc statistics ---\n", "", "", malloc_message("--- End jemalloc statistics ---\n", "", "", "");
"");
}
} }
#ifdef JEMALLOC_DEBUG #ifdef JEMALLOC_DEBUG
@ -5206,12 +5459,12 @@ MALLOC_OUT:
opt_mag_size_2pow--; opt_mag_size_2pow--;
break; break;
#endif #endif
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_TRACE
case 'u': case 't':
opt_utrace = false; opt_trace = false;
break; break;
case 'U': case 'T':
opt_utrace = true; opt_trace = true;
break; break;
#endif #endif
#ifdef JEMALLOC_SYSV #ifdef JEMALLOC_SYSV
@ -5259,7 +5512,13 @@ MALLOC_OUT:
opt_mmap = true; opt_mmap = true;
#endif #endif
/* Take care to call atexit() only once. */ #ifdef JEMALLOC_TRACE
if (opt_trace) {
malloc_mutex_init(&trace_mtx);
/* Flush trace buffers at exit. */
atexit(malloc_trace_flush_all);
}
#endif
if (opt_print_stats) { if (opt_print_stats) {
/* Print statistics at exit. */ /* Print statistics at exit. */
atexit(malloc_print_stats); atexit(malloc_print_stats);
@ -5305,7 +5564,7 @@ MALLOC_OUT:
if (nbins > 256) { if (nbins > 256) {
char line_buf[UMAX2S_BUFSIZE]; char line_buf[UMAX2S_BUFSIZE];
malloc_message("<jemalloc>: Too many size classes (", malloc_message("<jemalloc>: Too many size classes (",
umax2s(nbins, line_buf), " > 256)\n", ""); umax2s(nbins, 10, line_buf), " > 256)\n", "");
abort(); abort();
} }
@ -5333,8 +5592,6 @@ MALLOC_OUT:
arena_maxclass = chunksize - (arena_chunk_header_npages << arena_maxclass = chunksize - (arena_chunk_header_npages <<
PAGE_SHIFT); PAGE_SHIFT);
UTRACE(0, 0, 0);
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
memset(&stats_chunks, 0, sizeof(chunk_stats_t)); memset(&stats_chunks, 0, sizeof(chunk_stats_t));
#endif #endif
@ -5565,7 +5822,10 @@ RETURN:
errno = ENOMEM; errno = ENOMEM;
} }
UTRACE(0, size, ret); #ifdef JEMALLOC_TRACE
if (opt_trace)
trace_malloc(ret, size);
#endif
return (ret); return (ret);
} }
@ -5614,7 +5874,10 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
ret = 0; ret = 0;
RETURN: RETURN:
UTRACE(0, size, result); #ifdef JEMALLOC_TRACE
if (opt_trace)
trace_posix_memalign(result, alignment, size);
#endif
return (ret); return (ret);
} }
@ -5669,7 +5932,10 @@ RETURN:
errno = ENOMEM; errno = ENOMEM;
} }
UTRACE(0, num_size, ret); #ifdef JEMALLOC_TRACE
if (opt_trace)
trace_calloc(ret, num, size);
#endif
return (ret); return (ret);
} }
@ -5677,6 +5943,9 @@ void *
realloc(void *ptr, size_t size) realloc(void *ptr, size_t size)
{ {
void *ret; void *ret;
#ifdef JEMALLOC_TRACE
size_t old_size;
#endif
if (size == 0) { if (size == 0) {
#ifdef JEMALLOC_SYSV #ifdef JEMALLOC_SYSV
@ -5697,6 +5966,11 @@ realloc(void *ptr, size_t size)
assert(malloc_initialized || malloc_initializer == assert(malloc_initialized || malloc_initializer ==
pthread_self()); pthread_self());
#ifdef JEMALLOC_TRACE
if (opt_trace)
old_size = isalloc(ptr);
#endif
ret = iralloc(ptr, size); ret = iralloc(ptr, size);
if (ret == NULL) { if (ret == NULL) {
@ -5716,6 +5990,11 @@ realloc(void *ptr, size_t size)
else else
ret = imalloc(size); ret = imalloc(size);
#ifdef JEMALLOC_TRACE
if (opt_trace)
old_size = 0;
#endif
if (ret == NULL) { if (ret == NULL) {
#ifdef JEMALLOC_XMALLOC #ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) { if (opt_xmalloc) {
@ -5732,7 +6011,10 @@ realloc(void *ptr, size_t size)
#ifdef JEMALLOC_SYSV #ifdef JEMALLOC_SYSV
RETURN: RETURN:
#endif #endif
UTRACE(ptr, size, ret); #ifdef JEMALLOC_TRACE
if (opt_trace)
trace_realloc(ret, ptr, size, old_size);
#endif
return (ret); return (ret);
} }
@ -5744,8 +6026,11 @@ free(void *ptr)
assert(malloc_initialized || malloc_initializer == assert(malloc_initialized || malloc_initializer ==
pthread_self()); pthread_self());
#ifdef JEMALLOC_TRACE
if (opt_trace)
trace_free(ptr, isalloc(ptr));
#endif
idalloc(ptr); idalloc(ptr);
UTRACE(ptr, 0, 0);
} }
} }
@ -5760,10 +6045,16 @@ free(void *ptr)
size_t size_t
malloc_usable_size(const void *ptr) malloc_usable_size(const void *ptr)
{ {
size_t ret;
assert(ptr != NULL); assert(ptr != NULL);
ret = isalloc(ptr);
return (isalloc(ptr)); #ifdef JEMALLOC_TRACE
if (opt_trace)
trace_malloc_usable_size(ret, ptr);
#endif
return (ret);
} }
/* /*
@ -5796,6 +6087,10 @@ thread_cleanup(void *arg)
mag_rack = (void *)(uintptr_t)1; mag_rack = (void *)(uintptr_t)1;
} }
#endif #endif
#ifdef JEMALLOC_TRACE
if (opt_trace)
trace_thread_exit();
#endif
} }
/* /*

View File

@ -81,6 +81,9 @@
/* JEMALLOC_STATS enables statistics calculation. */ /* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS #undef JEMALLOC_STATS
/* JEMALLOC_TRACE enables allocation tracing. */
#undef JEMALLOC_TRACE
/* /*
* JEMALLOC_TINY enables support for tiny objects, which are smaller than one * JEMALLOC_TINY enables support for tiny objects, which are smaller than one
* quantum. * quantum.

43
jemalloc/src/jemtr2mtr.c Normal file
View File

@ -0,0 +1,43 @@
/*-
* Copyright (C) 2009 Jason Evans <jasone@canonware.com>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice(s), this list of conditions and the following disclaimer as
* the first lines of this file unmodified other than the possible
* addition of one or more copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
* notice(s), this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*******************************************************************************
*/
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char **argv)
{
// XXX Parse a jemtr trace and convert it to a mtr trace.
fprintf(stderr, "XXX Not implemented\n");
abort();
return 0;
}

View File

@ -1,33 +1,5 @@
/*- /*-
* Copyright (C) 2009 Facebook, Inc. * Copyright (C) 2006-2009 Jason Evans <jasone@FreeBSD.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of Facebook, Inc. nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*******************************************************************************
*
* Copyright (C) 2006-2007 Jason Evans <jasone@FreeBSD.org>.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

View File

@ -1,33 +1,5 @@
/*- /*-
* Copyright (C) 2009 Facebook, Inc. * Copyright (C) 2006-2009 Jason Evans <jasone@FreeBSD.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of Facebook, Inc. nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*******************************************************************************
*
* Copyright (C) 2006-2007 Jason Evans <jasone@FreeBSD.org>.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without