Refactor jemalloc.c into multiple source files.
Fix a stats bug in large object curruns accounting. Replace tcache_bin_fill() with arena_tcache_fill(), and fix a bug in an OOM error path. Fix API name mangling to coexist with __attribute__((malloc)).
This commit is contained in:
parent
64bd7661a8
commit
e476f8a161
@ -10,5 +10,5 @@ syntax: regexp
|
||||
^jemalloc/lib$
|
||||
^jemalloc/Makefile$
|
||||
^jemalloc/src/jemalloc_defs\.h$
|
||||
^jemalloc/src/[a-z0-9]+.o$
|
||||
^jemalloc/src/[a-z0-9]+.d$
|
||||
^jemalloc/src/[a-z0-9_]+.o$
|
||||
^jemalloc/src/[a-z0-9_]+.d$
|
||||
|
@ -36,21 +36,25 @@ REV := 0
|
||||
# List of files to be installed.
|
||||
BINS := @bins@
|
||||
CHDRS := @srcroot@src/jemalloc.h @objroot@src/jemalloc_defs.h
|
||||
CSRCS := @srcroot@src/jemalloc.c
|
||||
CSRCS := @srcroot@src/jemalloc.c @srcroot@src/jemalloc_arena.c \
|
||||
@srcroot@src/jemalloc_base.c @srcroot@src/jemalloc_chunk.c \
|
||||
@srcroot@src/jemalloc_extent.c @srcroot@src/jemalloc_huge.c \
|
||||
@srcroot@src/jemalloc_mutex.c @srcroot@src/jemalloc_stats.c \
|
||||
@srcroot@src/jemalloc_tcache.c @srcroot@src/jemalloc_trace.c
|
||||
DSOS := @objroot@lib/libjemalloc.so.$(REV) @objroot@lib/libjemalloc.so \
|
||||
@objroot@lib/libjemalloc_pic.a
|
||||
MAN3 := @objroot@doc/jemalloc.3
|
||||
|
||||
#
|
||||
# Include generated dependency files.
|
||||
#
|
||||
-include $(CSRCS:@srcroot@%.c=@objroot@%.d)
|
||||
|
||||
.PHONY: all dist install check clean distclean relclean
|
||||
|
||||
# Default target.
|
||||
all: $(DSOS) bins
|
||||
|
||||
#
|
||||
# Include generated dependency files.
|
||||
#
|
||||
-include $(CSRCS:@srcroot@%.c=@objroot@%.d)
|
||||
|
||||
@objroot@src/%.o: @srcroot@src/%.c
|
||||
$(CC) $(CFLAGS) -c $(CPPFLAGS) -o $@ $<
|
||||
@$(SHELL) -ec "$(CC) -MM $(CPPFLAGS) $< | sed \"s/\($(subst /,\/,$(notdir $(basename $@)))\)\.o\([ :]*\)/$(subst /,\/,$(strip $(dir $@)))\1.o \2/g\" > $(@:%.o=%.d)"
|
||||
|
@ -152,6 +152,9 @@ JE_COMPILABLE([__attribute__ syntax],
|
||||
[attribute])
|
||||
if test "x${attribute}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ])
|
||||
if test "x$GCC" = "xyes" ; then
|
||||
JE_CFLAGS_APPEND([-fvisibility=internal])
|
||||
fi
|
||||
fi
|
||||
|
||||
dnl Platform-specific settings. abi and RPATH can probably be determined
|
||||
@ -254,16 +257,7 @@ if test "x$JEMALLOC_PREFIX" != "x" ; then
|
||||
AC_DEFINE([JEMALLOC_PREFIX], [ ])
|
||||
jemalloc_prefix=$JEMALLOC_PREFIX
|
||||
AC_SUBST([jemalloc_prefix])
|
||||
AC_DEFINE_UNQUOTED([malloc], [${JEMALLOC_PREFIX}malloc])
|
||||
AC_DEFINE_UNQUOTED([calloc], [${JEMALLOC_PREFIX}calloc])
|
||||
AC_DEFINE_UNQUOTED([posix_memalign], [${JEMALLOC_PREFIX}posix_memalign])
|
||||
AC_DEFINE_UNQUOTED([realloc], [${JEMALLOC_PREFIX}realloc])
|
||||
AC_DEFINE_UNQUOTED([free], [${JEMALLOC_PREFIX}free])
|
||||
AC_DEFINE_UNQUOTED([malloc_usable_size], [${JEMALLOC_PREFIX}malloc_usable_size])
|
||||
AC_DEFINE_UNQUOTED([malloc_tcache_flush], [${JEMALLOC_PREFIX}malloc_tcache_flush])
|
||||
AC_DEFINE_UNQUOTED([malloc_stats_print], [${JEMALLOC_PREFIX}malloc_stats_print])
|
||||
AC_DEFINE_UNQUOTED([malloc_options], [${JEMALLOC_PREFIX}malloc_options])
|
||||
AC_DEFINE_UNQUOTED([malloc_message], [${JEMALLOC_PREFIX}malloc_message])
|
||||
AC_DEFINE_UNQUOTED([JEMALLOC_P(string_that_no_one_should_want_to_use_as_a_jemalloc_API_prefix)], [${JEMALLOC_PREFIX}##string_that_no_one_should_want_to_use_as_a_jemalloc_API_prefix])
|
||||
fi
|
||||
|
||||
dnl Do not compile with debugging by default.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -5,23 +5,26 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
#include "jemalloc_defs.h"
|
||||
#ifndef JEMALLOC_P
|
||||
# define JEMALLOC_P(s) s
|
||||
#endif
|
||||
|
||||
extern const char *malloc_options;
|
||||
extern void (*malloc_message)(const char *p1,
|
||||
extern const char *JEMALLOC_P(malloc_options);
|
||||
extern void (*JEMALLOC_P(malloc_message))(const char *p1,
|
||||
const char *p2, const char *p3, const char *p4);
|
||||
|
||||
void *malloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||
void *calloc(size_t num, size_t size) JEMALLOC_ATTR(malloc);
|
||||
int posix_memalign(void **memptr, size_t alignment, size_t size)
|
||||
void *JEMALLOC_P(malloc)(size_t size) JEMALLOC_ATTR(malloc);
|
||||
void *JEMALLOC_P(calloc)(size_t num, size_t size) JEMALLOC_ATTR(malloc);
|
||||
int JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
void *realloc(void *ptr, size_t size);
|
||||
void free(void *ptr);
|
||||
void *JEMALLOC_P(realloc)(void *ptr, size_t size);
|
||||
void JEMALLOC_P(free)(void *ptr);
|
||||
|
||||
size_t malloc_usable_size(const void *ptr);
|
||||
size_t JEMALLOC_P(malloc_usable_size)(const void *ptr);
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
void malloc_tcache_flush(void);
|
||||
void JEMALLOC_P(malloc_tcache_flush)(void);
|
||||
#endif
|
||||
void malloc_stats_print(const char *opts);
|
||||
void JEMALLOC_P(malloc_stats_print)(const char *opts);
|
||||
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
|
2299
jemalloc/src/jemalloc_arena.c
Normal file
2299
jemalloc/src/jemalloc_arena.c
Normal file
File diff suppressed because it is too large
Load Diff
460
jemalloc/src/jemalloc_arena.h
Normal file
460
jemalloc/src/jemalloc_arena.h
Normal file
@ -0,0 +1,460 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
/*
|
||||
* Subpages are an artificially designated partitioning of pages. Their only
|
||||
* purpose is to support subpage-spaced size classes.
|
||||
*
|
||||
* There must be at least 4 subpages per page, due to the way size classes are
|
||||
* handled.
|
||||
*/
|
||||
#define LG_SUBPAGE 8
|
||||
#define SUBPAGE ((size_t)(1U << LG_SUBPAGE))
|
||||
#define SUBPAGE_MASK (SUBPAGE - 1)
|
||||
|
||||
/* Return the smallest subpage multiple that is >= s. */
|
||||
#define SUBPAGE_CEILING(s) \
|
||||
(((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK)
|
||||
|
||||
#ifdef JEMALLOC_TINY
|
||||
/* Smallest size class to support. */
|
||||
# define LG_TINY_MIN 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Maximum size class that is a multiple of the quantum, but not (necessarily)
|
||||
* a power of 2. Above this size, allocations are rounded up to the nearest
|
||||
* power of 2.
|
||||
*/
|
||||
#define LG_QSPACE_MAX_DEFAULT 7
|
||||
|
||||
/*
|
||||
* Maximum size class that is a multiple of the cacheline, but not (necessarily)
|
||||
* a power of 2. Above this size, allocations are rounded up to the nearest
|
||||
* power of 2.
|
||||
*/
|
||||
#define LG_CSPACE_MAX_DEFAULT 9
|
||||
|
||||
/*
|
||||
* Maximum medium size class. This must not be more than 1/4 of a chunk
|
||||
* (LG_MEDIUM_MAX_DEFAULT <= LG_CHUNK_DEFAULT - 2).
|
||||
*/
|
||||
#define LG_MEDIUM_MAX_DEFAULT 15
|
||||
|
||||
/* Return the smallest medium size class that is >= s. */
|
||||
#define MEDIUM_CEILING(s) \
|
||||
(((s) + mspace_mask) & ~mspace_mask)
|
||||
|
||||
/*
|
||||
* Soft limit on the number of medium size classes. Spacing between medium
|
||||
* size classes never exceeds pagesize, which can force more than NBINS_MAX
|
||||
* medium size classes.
|
||||
*/
|
||||
#define NMBINS_MAX 16
|
||||
|
||||
/*
|
||||
* RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
|
||||
* as small as possible such that this setting is still honored, without
|
||||
* violating other constraints. The goal is to make runs as small as possible
|
||||
* without exceeding a per run external fragmentation threshold.
|
||||
*
|
||||
* We use binary fixed point math for overhead computations, where the binary
|
||||
* point is implicitly RUN_BFP bits to the left.
|
||||
*
|
||||
* Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
|
||||
* honored for some/all object sizes, since there is one bit of header overhead
|
||||
* per object (plus a constant). This constraint is relaxed (ignored) for runs
|
||||
* that are so small that the per-region overhead is greater than:
|
||||
*
|
||||
* (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
|
||||
*/
|
||||
#define RUN_BFP 12
|
||||
/* \/ Implicit binary fixed point. */
|
||||
#define RUN_MAX_OVRHD 0x0000003dU
|
||||
#define RUN_MAX_OVRHD_RELAX 0x00001800U
|
||||
|
||||
/* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */
|
||||
#define RUN_MAX_SMALL \
|
||||
(arena_maxclass <= (1U << (CHUNK_MAP_LG_PG_RANGE + PAGE_SHIFT)) \
|
||||
? arena_maxclass : (1U << (CHUNK_MAP_LG_PG_RANGE + \
|
||||
PAGE_SHIFT)))
|
||||
|
||||
/*
|
||||
* The minimum ratio of active:dirty pages per arena is computed as:
|
||||
*
|
||||
* (nactive >> opt_lg_dirty_mult) >= ndirty
|
||||
*
|
||||
* So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
|
||||
* times as many active pages as dirty pages.
|
||||
*/
|
||||
#define LG_DIRTY_MULT_DEFAULT 5
|
||||
|
||||
typedef struct arena_chunk_map_s arena_chunk_map_t;
|
||||
typedef struct arena_chunk_s arena_chunk_t;
|
||||
typedef struct arena_run_s arena_run_t;
|
||||
typedef struct arena_bin_s arena_bin_t;
|
||||
typedef struct arena_s arena_t;
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
/* Each element of the chunk map corresponds to one page within the chunk. */
|
||||
struct arena_chunk_map_s {
|
||||
/*
|
||||
* Linkage for run trees. There are two disjoint uses:
|
||||
*
|
||||
* 1) arena_t's runs_avail tree.
|
||||
* 2) arena_run_t conceptually uses this linkage for in-use non-full
|
||||
* runs, rather than directly embedding linkage.
|
||||
*/
|
||||
rb_node(arena_chunk_map_t) link;
|
||||
|
||||
/*
|
||||
* Run address (or size) and various flags are stored together. The bit
|
||||
* layout looks like (assuming 32-bit system):
|
||||
*
|
||||
* ???????? ???????? ????cccc ccccdzla
|
||||
*
|
||||
* ? : Unallocated: Run address for first/last pages, unset for internal
|
||||
* pages.
|
||||
* Small/medium: Don't care.
|
||||
* Large: Run size for first page, unset for trailing pages.
|
||||
* - : Unused.
|
||||
* c : refcount (could overflow for PAGE_SIZE >= 128 KiB)
|
||||
* d : dirty?
|
||||
* z : zeroed?
|
||||
* l : large?
|
||||
* a : allocated?
|
||||
*
|
||||
* Following are example bit patterns for the three types of runs.
|
||||
*
|
||||
* p : run page offset
|
||||
* s : run size
|
||||
* x : don't care
|
||||
* - : 0
|
||||
* [dzla] : bit set
|
||||
*
|
||||
* Unallocated:
|
||||
* ssssssss ssssssss ssss---- --------
|
||||
* xxxxxxxx xxxxxxxx xxxx---- ----d---
|
||||
* ssssssss ssssssss ssss---- -----z--
|
||||
*
|
||||
* Small/medium:
|
||||
* pppppppp ppppcccc cccccccc cccc---a
|
||||
* pppppppp ppppcccc cccccccc cccc---a
|
||||
* pppppppp ppppcccc cccccccc cccc---a
|
||||
*
|
||||
* Large:
|
||||
* ssssssss ssssssss ssss---- ------la
|
||||
* -------- -------- -------- ------la
|
||||
* -------- -------- -------- ------la
|
||||
*/
|
||||
size_t bits;
|
||||
#define CHUNK_MAP_PG_MASK ((size_t)0xfff00000U)
|
||||
#define CHUNK_MAP_PG_SHIFT 20
|
||||
#define CHUNK_MAP_LG_PG_RANGE 12
|
||||
|
||||
#define CHUNK_MAP_RC_MASK ((size_t)0xffff0U)
|
||||
#define CHUNK_MAP_RC_ONE ((size_t)0x00010U)
|
||||
|
||||
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU)
|
||||
#define CHUNK_MAP_DIRTY ((size_t)0x8U)
|
||||
#define CHUNK_MAP_ZEROED ((size_t)0x4U)
|
||||
#define CHUNK_MAP_LARGE ((size_t)0x2U)
|
||||
#define CHUNK_MAP_ALLOCATED ((size_t)0x1U)
|
||||
#define CHUNK_MAP_KEY (CHUNK_MAP_DIRTY | CHUNK_MAP_ALLOCATED)
|
||||
};
|
||||
typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
|
||||
typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
|
||||
|
||||
/* Arena chunk header. */
|
||||
struct arena_chunk_s {
|
||||
/* Arena that owns the chunk. */
|
||||
arena_t *arena;
|
||||
|
||||
/* Linkage for the arena's chunks_dirty tree. */
|
||||
rb_node(arena_chunk_t) link_dirty;
|
||||
|
||||
/*
|
||||
* True if the chunk is currently in the chunks_dirty tree, due to
|
||||
* having at some point contained one or more dirty pages. Removal
|
||||
* from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
|
||||
*/
|
||||
bool dirtied;
|
||||
|
||||
/* Number of dirty pages. */
|
||||
size_t ndirty;
|
||||
/* Map of pages within chunk that keeps track of free/large/small. */
|
||||
arena_chunk_map_t map[1]; /* Dynamically sized. */
|
||||
};
|
||||
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
|
||||
|
||||
struct arena_run_s {
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
uint32_t magic;
|
||||
# define ARENA_RUN_MAGIC 0x384adf93
|
||||
#endif
|
||||
|
||||
/* Bin this run is associated with. */
|
||||
arena_bin_t *bin;
|
||||
|
||||
/* Index of first element that might have a free region. */
|
||||
unsigned regs_minelm;
|
||||
|
||||
/* Number of free regions in run. */
|
||||
unsigned nfree;
|
||||
|
||||
/* Bitmask of in-use regions (0: in use, 1: free). */
|
||||
unsigned regs_mask[1]; /* Dynamically sized. */
|
||||
};
|
||||
|
||||
struct arena_bin_s {
|
||||
/*
|
||||
* Current run being used to service allocations of this bin's size
|
||||
* class.
|
||||
*/
|
||||
arena_run_t *runcur;
|
||||
|
||||
/*
|
||||
* Tree of non-full runs. This tree is used when looking for an
|
||||
* existing run when runcur is no longer usable. We choose the
|
||||
* non-full run that is lowest in memory; this policy tends to keep
|
||||
* objects packed well, and it can also help reduce the number of
|
||||
* almost-empty chunks.
|
||||
*/
|
||||
arena_run_tree_t runs;
|
||||
|
||||
/* Size of regions in a run for this bin's size class. */
|
||||
size_t reg_size;
|
||||
|
||||
/* Total size of a run for this bin's size class. */
|
||||
size_t run_size;
|
||||
|
||||
/* Total number of regions in a run for this bin's size class. */
|
||||
uint32_t nregs;
|
||||
|
||||
/* Number of elements in a run's regs_mask for this bin's size class. */
|
||||
uint32_t regs_mask_nelms;
|
||||
|
||||
/* Offset of first region in a run for this bin's size class. */
|
||||
uint32_t reg0_offset;
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
/* Bin statistics. */
|
||||
malloc_bin_stats_t stats;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct arena_s {
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
uint32_t magic;
|
||||
# define ARENA_MAGIC 0x947d3d24
|
||||
#endif
|
||||
|
||||
/* All operations on this arena require that lock be locked. */
|
||||
malloc_mutex_t lock;
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
arena_stats_t stats;
|
||||
# ifdef JEMALLOC_TCACHE
|
||||
/*
|
||||
* List of tcaches for extant threads associated with this arena.
|
||||
* Stats from these are merged incrementally, and at exit.
|
||||
*/
|
||||
ql_head(tcache_t) tcache_ql;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_TRACE
|
||||
# define TRACE_BUF_SIZE 65536
|
||||
unsigned trace_buf_end;
|
||||
char trace_buf[TRACE_BUF_SIZE];
|
||||
int trace_fd;
|
||||
#endif
|
||||
|
||||
/* Tree of dirty-page-containing chunks this arena manages. */
|
||||
arena_chunk_tree_t chunks_dirty;
|
||||
|
||||
/*
|
||||
* In order to avoid rapid chunk allocation/deallocation when an arena
|
||||
* oscillates right on the cusp of needing a new chunk, cache the most
|
||||
* recently freed chunk. The spare is left in the arena's chunk trees
|
||||
* until it is deleted.
|
||||
*
|
||||
* There is one spare chunk per arena, rather than one spare total, in
|
||||
* order to avoid interactions between multiple threads that could make
|
||||
* a single spare inadequate.
|
||||
*/
|
||||
arena_chunk_t *spare;
|
||||
|
||||
/* Number of pages in active runs. */
|
||||
size_t nactive;
|
||||
|
||||
/*
|
||||
* Current count of pages within unused runs that are potentially
|
||||
* dirty, and for which madvise(... MADV_DONTNEED) has not been called.
|
||||
* By tracking this, we can institute a limit on how much dirty unused
|
||||
* memory is mapped for each arena.
|
||||
*/
|
||||
size_t ndirty;
|
||||
|
||||
|
||||
/*
|
||||
* Size/address-ordered tree of this arena's available runs. This tree
|
||||
* is used for first-best-fit run allocation.
|
||||
*/
|
||||
arena_avail_tree_t runs_avail;
|
||||
|
||||
/*
|
||||
* bins is used to store trees of free regions of the following sizes,
|
||||
* assuming a 16-byte quantum, 4 KiB page size, and default
|
||||
* JEMALLOC_OPTIONS.
|
||||
*
|
||||
* bins[i] | size |
|
||||
* --------+--------+
|
||||
* 0 | 2 |
|
||||
* 1 | 4 |
|
||||
* 2 | 8 |
|
||||
* --------+--------+
|
||||
* 3 | 16 |
|
||||
* 4 | 32 |
|
||||
* 5 | 48 |
|
||||
* : :
|
||||
* 8 | 96 |
|
||||
* 9 | 112 |
|
||||
* 10 | 128 |
|
||||
* --------+--------+
|
||||
* 11 | 192 |
|
||||
* 12 | 256 |
|
||||
* 13 | 320 |
|
||||
* 14 | 384 |
|
||||
* 15 | 448 |
|
||||
* 16 | 512 |
|
||||
* --------+--------+
|
||||
* 17 | 768 |
|
||||
* 18 | 1024 |
|
||||
* 19 | 1280 |
|
||||
* : :
|
||||
* 27 | 3328 |
|
||||
* 28 | 3584 |
|
||||
* 29 | 3840 |
|
||||
* --------+--------+
|
||||
* 30 | 4 KiB |
|
||||
* 31 | 6 KiB |
|
||||
* 33 | 8 KiB |
|
||||
* : :
|
||||
* 43 | 28 KiB |
|
||||
* 44 | 30 KiB |
|
||||
* 45 | 32 KiB |
|
||||
* --------+--------+
|
||||
*/
|
||||
arena_bin_t bins[1]; /* Dynamically sized. */
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern size_t opt_lg_qspace_max;
|
||||
extern size_t opt_lg_cspace_max;
|
||||
extern size_t opt_lg_medium_max;
|
||||
extern ssize_t opt_lg_dirty_mult;
|
||||
extern uint8_t const *small_size2bin;
|
||||
|
||||
/* Various bin-related settings. */
|
||||
#ifdef JEMALLOC_TINY /* Number of (2^n)-spaced tiny bins. */
|
||||
# define ntbins ((unsigned)(LG_QUANTUM - LG_TINY_MIN))
|
||||
#else
|
||||
# define ntbins 0
|
||||
#endif
|
||||
extern unsigned nqbins; /* Number of quantum-spaced bins. */
|
||||
extern unsigned ncbins; /* Number of cacheline-spaced bins. */
|
||||
extern unsigned nsbins; /* Number of subpage-spaced bins. */
|
||||
extern unsigned nmbins; /* Number of medium bins. */
|
||||
extern unsigned nbins;
|
||||
extern unsigned mbin0; /* mbin offset (nbins - nmbins). */
|
||||
#ifdef JEMALLOC_TINY
|
||||
# define tspace_max ((size_t)(QUANTUM >> 1))
|
||||
#endif
|
||||
#define qspace_min QUANTUM
|
||||
extern size_t qspace_max;
|
||||
extern size_t cspace_min;
|
||||
extern size_t cspace_max;
|
||||
extern size_t sspace_min;
|
||||
extern size_t sspace_max;
|
||||
#define small_maxclass sspace_max
|
||||
#define medium_min PAGE_SIZE
|
||||
extern size_t medium_max;
|
||||
#define bin_maxclass medium_max
|
||||
|
||||
/* Spacing between medium size classes. */
|
||||
extern size_t lg_mspace;
|
||||
extern size_t mspace_mask;
|
||||
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
void arena_tcache_fill(arena_t *arena, tcache_bin_t *tbin, size_t binind);
|
||||
#endif
|
||||
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
|
||||
void *arena_malloc_medium(arena_t *arena, size_t size, bool zero);
|
||||
void *arena_malloc(size_t size, bool zero);
|
||||
void *arena_palloc(arena_t *arena, size_t alignment, size_t size,
|
||||
size_t alloc_size);
|
||||
size_t arena_salloc(const void *ptr);
|
||||
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
arena_chunk_map_t *mapelm);
|
||||
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
||||
#ifdef JEMALLOC_STATS
|
||||
void arena_stats_print(arena_t *arena, bool bins, bool large);
|
||||
#endif
|
||||
void *arena_ralloc(void *ptr, size_t size, size_t oldsize);
|
||||
bool arena_new(arena_t *arena, unsigned ind);
|
||||
bool arena_boot0(void);
|
||||
void arena_boot1(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
||||
JEMALLOC_INLINE void
|
||||
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
{
|
||||
size_t pageind;
|
||||
arena_chunk_map_t *mapelm;
|
||||
|
||||
assert(arena != NULL);
|
||||
assert(arena->magic == ARENA_MAGIC);
|
||||
assert(chunk->arena == arena);
|
||||
assert(ptr != NULL);
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT);
|
||||
mapelm = &chunk->map[pageind];
|
||||
assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
|
||||
/* Small allocation. */
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
tcache_t *tcache;
|
||||
|
||||
if ((tcache = tcache_get()) != NULL)
|
||||
tcache_dalloc(tcache, ptr);
|
||||
else {
|
||||
#endif
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_dalloc_bin(arena, chunk, ptr, mapelm);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
}
|
||||
#endif
|
||||
} else
|
||||
arena_dalloc_large(arena, chunk, ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
186
jemalloc/src/jemalloc_base.c
Normal file
186
jemalloc/src/jemalloc_base.c
Normal file
@ -0,0 +1,186 @@
|
||||
#define JEMALLOC_BASE_C_
|
||||
#include "jemalloc_internal.h"
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
size_t base_mapped;
|
||||
#endif
|
||||
|
||||
malloc_mutex_t base_mtx;
|
||||
|
||||
/*
|
||||
* Current pages that are being used for internal memory allocations. These
|
||||
* pages are carved up in cacheline-size quanta, so that there is no chance of
|
||||
* false cache line sharing.
|
||||
*/
|
||||
static void *base_pages;
|
||||
static void *base_next_addr;
|
||||
static void *base_past_addr; /* Addr immediately past base_pages. */
|
||||
static extent_node_t *base_nodes;
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
static bool base_pages_alloc_dss(size_t minsize);
|
||||
#endif
|
||||
static bool base_pages_alloc_mmap(size_t minsize);
|
||||
static bool base_pages_alloc(size_t minsize);
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
static bool
|
||||
base_pages_alloc_dss(size_t minsize)
|
||||
{
|
||||
|
||||
/*
|
||||
* Do special DSS allocation here, since base allocations don't need to
|
||||
* be chunk-aligned.
|
||||
*/
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if (dss_prev != (void *)-1) {
|
||||
intptr_t incr;
|
||||
size_t csize = CHUNK_CEILING(minsize);
|
||||
|
||||
do {
|
||||
/* Get the current end of the DSS. */
|
||||
dss_max = sbrk(0);
|
||||
|
||||
/*
|
||||
* Calculate how much padding is necessary to
|
||||
* chunk-align the end of the DSS. Don't worry about
|
||||
* dss_max not being chunk-aligned though.
|
||||
*/
|
||||
incr = (intptr_t)chunksize
|
||||
- (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
|
||||
assert(incr >= 0);
|
||||
if ((size_t)incr < minsize)
|
||||
incr += csize;
|
||||
|
||||
dss_prev = sbrk(incr);
|
||||
if (dss_prev == dss_max) {
|
||||
/* Success. */
|
||||
dss_max = (void *)((intptr_t)dss_prev + incr);
|
||||
base_pages = dss_prev;
|
||||
base_next_addr = base_pages;
|
||||
base_past_addr = dss_max;
|
||||
#ifdef JEMALLOC_STATS
|
||||
base_mapped += incr;
|
||||
#endif
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
return (false);
|
||||
}
|
||||
} while (dss_prev != (void *)-1);
|
||||
}
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
||||
return (true);
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool
|
||||
base_pages_alloc_mmap(size_t minsize)
|
||||
{
|
||||
size_t csize;
|
||||
|
||||
assert(minsize != 0);
|
||||
csize = PAGE_CEILING(minsize);
|
||||
base_pages = pages_map(NULL, csize);
|
||||
if (base_pages == NULL)
|
||||
return (true);
|
||||
base_next_addr = base_pages;
|
||||
base_past_addr = (void *)((uintptr_t)base_pages + csize);
|
||||
#ifdef JEMALLOC_STATS
|
||||
base_mapped += csize;
|
||||
#endif
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
base_pages_alloc(size_t minsize)
|
||||
{
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
if (base_pages_alloc_dss(minsize) == false)
|
||||
return (false);
|
||||
|
||||
if (minsize != 0)
|
||||
#endif
|
||||
{
|
||||
if (base_pages_alloc_mmap(minsize) == false)
|
||||
return (false);
|
||||
}
|
||||
|
||||
return (true);
|
||||
}
|
||||
|
||||
void *
|
||||
base_alloc(size_t size)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize;
|
||||
|
||||
/* Round size up to nearest multiple of the cacheline size. */
|
||||
csize = CACHELINE_CEILING(size);
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
/* Make sure there's enough space for the allocation. */
|
||||
if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
|
||||
if (base_pages_alloc(csize)) {
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
/* Allocate. */
|
||||
ret = base_next_addr;
|
||||
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
extent_node_t *
|
||||
base_node_alloc(void)
|
||||
{
|
||||
extent_node_t *ret;
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
if (base_nodes != NULL) {
|
||||
ret = base_nodes;
|
||||
base_nodes = *(extent_node_t **)ret;
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
} else {
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
base_node_dealloc(extent_node_t *node)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
*(extent_node_t **)node = base_nodes;
|
||||
base_nodes = node;
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
}
|
||||
|
||||
bool
|
||||
base_boot(void)
|
||||
{
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
base_mapped = 0;
|
||||
#endif
|
||||
#ifdef JEMALLOC_DSS
|
||||
/*
|
||||
* Allocate a base chunk here, since it doesn't actually have to be
|
||||
* chunk-aligned. Doing this before allocating any other chunks allows
|
||||
* the use of space that would otherwise be wasted.
|
||||
*/
|
||||
base_pages_alloc(0);
|
||||
#endif
|
||||
base_nodes = NULL;
|
||||
if (malloc_mutex_init(&base_mtx))
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
27
jemalloc/src/jemalloc_base.h
Normal file
27
jemalloc/src/jemalloc_base.h
Normal file
@ -0,0 +1,27 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
extern size_t base_mapped;
|
||||
#endif
|
||||
extern malloc_mutex_t base_mtx;
|
||||
|
||||
void *base_alloc(size_t size);
|
||||
extent_node_t *base_node_alloc(void);
|
||||
void base_node_dealloc(extent_node_t *node);
|
||||
bool base_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
525
jemalloc/src/jemalloc_chunk.c
Normal file
525
jemalloc/src/jemalloc_chunk.c
Normal file
@ -0,0 +1,525 @@
|
||||
#define JEMALLOC_CHUNK_C_
|
||||
#include "jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
chunk_stats_t stats_chunks;
|
||||
#endif
|
||||
|
||||
/* Various chunk-related settings. */
|
||||
size_t chunksize;
|
||||
size_t chunksize_mask; /* (chunksize - 1). */
|
||||
size_t chunk_npages;
|
||||
size_t arena_chunk_header_npages;
|
||||
size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
malloc_mutex_t dss_mtx;
|
||||
void *dss_base;
|
||||
void *dss_prev;
|
||||
void *dss_max;
|
||||
|
||||
/*
|
||||
* Trees of chunks that were previously allocated (trees differ only in node
|
||||
* ordering). These are used when allocating chunks, in an attempt to re-use
|
||||
* address space. Depending on function, different tree orderings are needed,
|
||||
* which is why there are two trees with the same contents.
|
||||
*/
|
||||
static extent_tree_t dss_chunks_szad;
|
||||
static extent_tree_t dss_chunks_ad;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
|
||||
* potentially avoid some system calls. We can get away without TLS here,
|
||||
* since the state of mmap_unaligned only affects performance, rather than
|
||||
* correct function.
|
||||
*/
|
||||
static
|
||||
#ifndef NO_TLS
|
||||
__thread
|
||||
#endif
|
||||
bool mmap_unaligned
|
||||
#ifndef NO_TLS
|
||||
JEMALLOC_ATTR(tls_model("initial-exec"))
|
||||
#endif
|
||||
;
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void pages_unmap(void *addr, size_t size);
|
||||
#ifdef JEMALLOC_DSS
|
||||
static void *chunk_alloc_dss(size_t size);
|
||||
static void *chunk_recycle_dss(size_t size, bool zero);
|
||||
#endif
|
||||
static void *chunk_alloc_mmap_slow(size_t size, bool unaligned);
|
||||
static void *chunk_alloc_mmap(size_t size);
|
||||
#ifdef JEMALLOC_DSS
|
||||
static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
|
||||
static bool chunk_dealloc_dss(void *chunk, size_t size);
|
||||
#endif
|
||||
static void chunk_dealloc_mmap(void *chunk, size_t size);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
pages_map(void *addr, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
/*
|
||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||
* of existing mappings, and we only want to create new mappings.
|
||||
*/
|
||||
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
||||
-1, 0);
|
||||
assert(ret != NULL);
|
||||
|
||||
if (ret == MAP_FAILED)
|
||||
ret = NULL;
|
||||
else if (addr != NULL && ret != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right place.
|
||||
*/
|
||||
if (munmap(ret, size) == -1) {
|
||||
char buf[STRERROR_BUF];
|
||||
|
||||
strerror_r(errno, buf, sizeof(buf));
|
||||
malloc_write4("<jemalloc>", ": Error in munmap(): ",
|
||||
buf, "\n");
|
||||
if (opt_abort)
|
||||
abort();
|
||||
}
|
||||
ret = NULL;
|
||||
}
|
||||
|
||||
assert(ret == NULL || (addr == NULL && ret != addr)
|
||||
|| (addr != NULL && ret == addr));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
pages_unmap(void *addr, size_t size)
|
||||
{
|
||||
|
||||
if (munmap(addr, size) == -1) {
|
||||
char buf[STRERROR_BUF];
|
||||
|
||||
strerror_r(errno, buf, sizeof(buf));
|
||||
malloc_write4("<jemalloc>", ": Error in munmap(): ", buf, "\n");
|
||||
if (opt_abort)
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
static void *
|
||||
chunk_alloc_dss(size_t size)
|
||||
{
|
||||
|
||||
/*
|
||||
* sbrk() uses a signed increment argument, so take care not to
|
||||
* interpret a huge allocation request as a negative increment.
|
||||
*/
|
||||
if ((intptr_t)size < 0)
|
||||
return (NULL);
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if (dss_prev != (void *)-1) {
|
||||
intptr_t incr;
|
||||
|
||||
/*
|
||||
* The loop is necessary to recover from races with other
|
||||
* threads that are using the DSS for something other than
|
||||
* malloc.
|
||||
*/
|
||||
do {
|
||||
void *ret;
|
||||
|
||||
/* Get the current end of the DSS. */
|
||||
dss_max = sbrk(0);
|
||||
|
||||
/*
|
||||
* Calculate how much padding is necessary to
|
||||
* chunk-align the end of the DSS.
|
||||
*/
|
||||
incr = (intptr_t)size
|
||||
- (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
|
||||
if (incr == (intptr_t)size)
|
||||
ret = dss_max;
|
||||
else {
|
||||
ret = (void *)((intptr_t)dss_max + incr);
|
||||
incr += size;
|
||||
}
|
||||
|
||||
dss_prev = sbrk(incr);
|
||||
if (dss_prev == dss_max) {
|
||||
/* Success. */
|
||||
dss_max = (void *)((intptr_t)dss_prev + incr);
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
return (ret);
|
||||
}
|
||||
} while (dss_prev != (void *)-1);
|
||||
}
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_recycle_dss(size_t size, bool zero)
|
||||
{
|
||||
extent_node_t *node, key;
|
||||
|
||||
key.addr = NULL;
|
||||
key.size = size;
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
node = extent_tree_szad_nsearch(&dss_chunks_szad, &key);
|
||||
if (node != NULL) {
|
||||
void *ret = node->addr;
|
||||
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(&dss_chunks_szad, node);
|
||||
if (node->size == size) {
|
||||
extent_tree_ad_remove(&dss_chunks_ad, node);
|
||||
base_node_dealloc(node);
|
||||
} else {
|
||||
/*
|
||||
* Insert the remainder of node's address range as a
|
||||
* smaller chunk. Its position within dss_chunks_ad
|
||||
* does not change.
|
||||
*/
|
||||
assert(node->size > size);
|
||||
node->addr = (void *)((uintptr_t)node->addr + size);
|
||||
node->size -= size;
|
||||
extent_tree_szad_insert(&dss_chunks_szad, node);
|
||||
}
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
||||
if (zero)
|
||||
memset(ret, 0, size);
|
||||
return (ret);
|
||||
}
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void *
|
||||
chunk_alloc_mmap_slow(size_t size, bool unaligned)
|
||||
{
|
||||
void *ret;
|
||||
size_t offset;
|
||||
|
||||
/* Beware size_t wrap-around. */
|
||||
if (size + chunksize <= size)
|
||||
return (NULL);
|
||||
|
||||
ret = pages_map(NULL, size + chunksize);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
/* Clean up unneeded leading/trailing space. */
|
||||
offset = CHUNK_ADDR2OFFSET(ret);
|
||||
if (offset != 0) {
|
||||
/* Note that mmap() returned an unaligned mapping. */
|
||||
unaligned = true;
|
||||
|
||||
/* Leading space. */
|
||||
pages_unmap(ret, chunksize - offset);
|
||||
|
||||
ret = (void *)((uintptr_t)ret +
|
||||
(chunksize - offset));
|
||||
|
||||
/* Trailing space. */
|
||||
pages_unmap((void *)((uintptr_t)ret + size),
|
||||
offset);
|
||||
} else {
|
||||
/* Trailing space only. */
|
||||
pages_unmap((void *)((uintptr_t)ret + size),
|
||||
chunksize);
|
||||
}
|
||||
|
||||
/*
|
||||
* If mmap() returned an aligned mapping, reset mmap_unaligned so that
|
||||
* the next chunk_alloc_mmap() execution tries the fast allocation
|
||||
* method.
|
||||
*/
|
||||
if (unaligned == false)
|
||||
mmap_unaligned = false;
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_mmap(size_t size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
/*
|
||||
* Ideally, there would be a way to specify alignment to mmap() (like
|
||||
* NetBSD has), but in the absence of such a feature, we have to work
|
||||
* hard to efficiently create aligned mappings. The reliable, but
|
||||
* slow method is to create a mapping that is over-sized, then trim the
|
||||
* excess. However, that always results in at least one call to
|
||||
* pages_unmap().
|
||||
*
|
||||
* A more optimistic approach is to try mapping precisely the right
|
||||
* amount, then try to append another mapping if alignment is off. In
|
||||
* practice, this works out well as long as the application is not
|
||||
* interleaving mappings via direct mmap() calls. If we do run into a
|
||||
* situation where there is an interleaved mapping and we are unable to
|
||||
* extend an unaligned mapping, our best option is to switch to the
|
||||
* slow method until mmap() returns another aligned mapping. This will
|
||||
* tend to leave a gap in the memory map that is too small to cause
|
||||
* later problems for the optimistic method.
|
||||
*
|
||||
* Another possible confounding factor is address space layout
|
||||
* randomization (ASLR), which causes mmap(2) to disregard the
|
||||
* requested address. mmap_unaligned tracks whether the previous
|
||||
* chunk_alloc_mmap() execution received any unaligned or relocated
|
||||
* mappings, and if so, the current execution will immediately fall
|
||||
* back to the slow method. However, we keep track of whether the fast
|
||||
* method would have succeeded, and if so, we make a note to try the
|
||||
* fast method next time.
|
||||
*/
|
||||
|
||||
if (mmap_unaligned == false) {
|
||||
size_t offset;
|
||||
|
||||
ret = pages_map(NULL, size);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
offset = CHUNK_ADDR2OFFSET(ret);
|
||||
if (offset != 0) {
|
||||
mmap_unaligned = true;
|
||||
/* Try to extend chunk boundary. */
|
||||
if (pages_map((void *)((uintptr_t)ret + size),
|
||||
chunksize - offset) == NULL) {
|
||||
/*
|
||||
* Extension failed. Clean up, then revert to
|
||||
* the reliable-but-expensive method.
|
||||
*/
|
||||
pages_unmap(ret, size);
|
||||
ret = chunk_alloc_mmap_slow(size, true);
|
||||
} else {
|
||||
/* Clean up unneeded leading space. */
|
||||
pages_unmap(ret, chunksize - offset);
|
||||
ret = (void *)((uintptr_t)ret + (chunksize -
|
||||
offset));
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = chunk_alloc_mmap_slow(size, false);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc(size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
ret = chunk_recycle_dss(size, zero);
|
||||
if (ret != NULL) {
|
||||
goto RETURN;
|
||||
}
|
||||
|
||||
ret = chunk_alloc_dss(size);
|
||||
if (ret != NULL)
|
||||
goto RETURN;
|
||||
|
||||
#endif
|
||||
ret = chunk_alloc_mmap(size);
|
||||
if (ret != NULL)
|
||||
goto RETURN;
|
||||
|
||||
/* All strategies for allocation failed. */
|
||||
ret = NULL;
|
||||
RETURN:
|
||||
#ifdef JEMALLOC_STATS
|
||||
if (ret != NULL) {
|
||||
stats_chunks.nchunks += (size / chunksize);
|
||||
stats_chunks.curchunks += (size / chunksize);
|
||||
}
|
||||
if (stats_chunks.curchunks > stats_chunks.highchunks)
|
||||
stats_chunks.highchunks = stats_chunks.curchunks;
|
||||
#endif
|
||||
|
||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
static extent_node_t *
|
||||
chunk_dealloc_dss_record(void *chunk, size_t size)
|
||||
{
|
||||
extent_node_t *node, *prev, key;
|
||||
|
||||
key.addr = (void *)((uintptr_t)chunk + size);
|
||||
node = extent_tree_ad_nsearch(&dss_chunks_ad, &key);
|
||||
/* Try to coalesce forward. */
|
||||
if (node != NULL && node->addr == key.addr) {
|
||||
/*
|
||||
* Coalesce chunk with the following address range. This does
|
||||
* not change the position within dss_chunks_ad, so only
|
||||
* remove/insert from/into dss_chunks_szad.
|
||||
*/
|
||||
extent_tree_szad_remove(&dss_chunks_szad, node);
|
||||
node->addr = chunk;
|
||||
node->size += size;
|
||||
extent_tree_szad_insert(&dss_chunks_szad, node);
|
||||
} else {
|
||||
/*
|
||||
* Coalescing forward failed, so insert a new node. Drop
|
||||
* dss_mtx during node allocation, since it is possible that a
|
||||
* new base chunk will be allocated.
|
||||
*/
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
node = base_node_alloc();
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
node->addr = chunk;
|
||||
node->size = size;
|
||||
extent_tree_ad_insert(&dss_chunks_ad, node);
|
||||
extent_tree_szad_insert(&dss_chunks_szad, node);
|
||||
}
|
||||
|
||||
/* Try to coalesce backward. */
|
||||
prev = extent_tree_ad_prev(&dss_chunks_ad, node);
|
||||
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
|
||||
chunk) {
|
||||
/*
|
||||
* Coalesce chunk with the previous address range. This does
|
||||
* not change the position within dss_chunks_ad, so only
|
||||
* remove/insert node from/into dss_chunks_szad.
|
||||
*/
|
||||
extent_tree_szad_remove(&dss_chunks_szad, prev);
|
||||
extent_tree_ad_remove(&dss_chunks_ad, prev);
|
||||
|
||||
extent_tree_szad_remove(&dss_chunks_szad, node);
|
||||
node->addr = prev->addr;
|
||||
node->size += prev->size;
|
||||
extent_tree_szad_insert(&dss_chunks_szad, node);
|
||||
|
||||
base_node_dealloc(prev);
|
||||
}
|
||||
|
||||
return (node);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_dealloc_dss(void *chunk, size_t size)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||
&& (uintptr_t)chunk < (uintptr_t)dss_max) {
|
||||
extent_node_t *node;
|
||||
|
||||
/* Try to coalesce with other unused chunks. */
|
||||
node = chunk_dealloc_dss_record(chunk, size);
|
||||
if (node != NULL) {
|
||||
chunk = node->addr;
|
||||
size = node->size;
|
||||
}
|
||||
|
||||
/* Get the current end of the DSS. */
|
||||
dss_max = sbrk(0);
|
||||
|
||||
/*
|
||||
* Try to shrink the DSS if this chunk is at the end of the
|
||||
* DSS. The sbrk() call here is subject to a race condition
|
||||
* with threads that use brk(2) or sbrk(2) directly, but the
|
||||
* alternative would be to leak memory for the sake of poorly
|
||||
* designed multi-threaded programs.
|
||||
*/
|
||||
if ((void *)((uintptr_t)chunk + size) == dss_max
|
||||
&& (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
|
||||
/* Success. */
|
||||
dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
|
||||
|
||||
if (node != NULL) {
|
||||
extent_tree_szad_remove(&dss_chunks_szad, node);
|
||||
extent_tree_ad_remove(&dss_chunks_ad, node);
|
||||
base_node_dealloc(node);
|
||||
}
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
} else {
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
madvise(chunk, size, MADV_DONTNEED);
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
||||
return (true);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
chunk_dealloc_mmap(void *chunk, size_t size)
|
||||
{
|
||||
|
||||
pages_unmap(chunk, size);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dealloc(void *chunk, size_t size)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
stats_chunks.curchunks -= (size / chunksize);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
if (chunk_dealloc_dss(chunk, size) == false)
|
||||
return;
|
||||
|
||||
#endif
|
||||
chunk_dealloc_mmap(chunk, size);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_boot(void)
|
||||
{
|
||||
|
||||
/* Set variables according to the value of opt_lg_chunk. */
|
||||
chunksize = (1LU << opt_lg_chunk);
|
||||
assert(chunksize >= PAGE_SIZE);
|
||||
chunksize_mask = chunksize - 1;
|
||||
chunk_npages = (chunksize >> PAGE_SHIFT);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
if (malloc_mutex_init(&dss_mtx))
|
||||
return (true);
|
||||
dss_base = sbrk(0);
|
||||
dss_prev = dss_base;
|
||||
dss_max = dss_base;
|
||||
extent_tree_szad_new(&dss_chunks_szad);
|
||||
extent_tree_ad_new(&dss_chunks_ad);
|
||||
#endif
|
||||
|
||||
return (false);
|
||||
}
|
66
jemalloc/src/jemalloc_chunk.h
Normal file
66
jemalloc/src/jemalloc_chunk.h
Normal file
@ -0,0 +1,66 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
/*
|
||||
* Size and alignment of memory chunks that are allocated by the OS's virtual
|
||||
* memory system.
|
||||
*/
|
||||
#define LG_CHUNK_DEFAULT 22
|
||||
|
||||
/* Return the chunk address for allocation address a. */
|
||||
#define CHUNK_ADDR2BASE(a) \
|
||||
((void *)((uintptr_t)(a) & ~chunksize_mask))
|
||||
|
||||
/* Return the chunk offset of address a. */
|
||||
#define CHUNK_ADDR2OFFSET(a) \
|
||||
((size_t)((uintptr_t)(a) & chunksize_mask))
|
||||
|
||||
/* Return the smallest chunk multiple that is >= s. */
|
||||
#define CHUNK_CEILING(s) \
|
||||
(((s) + chunksize_mask) & ~chunksize_mask)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
/* Chunk statistics. */
|
||||
extern chunk_stats_t stats_chunks;
|
||||
#endif
|
||||
|
||||
extern size_t opt_lg_chunk;
|
||||
extern size_t chunksize;
|
||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
||||
extern size_t chunk_npages;
|
||||
extern size_t arena_chunk_header_npages;
|
||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
/*
|
||||
* Protects sbrk() calls. This avoids malloc races among threads, though it
|
||||
* does not protect against races with threads that call sbrk() directly.
|
||||
*/
|
||||
extern malloc_mutex_t dss_mtx;
|
||||
/* Base address of the DSS. */
|
||||
extern void *dss_base;
|
||||
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
|
||||
extern void *dss_prev;
|
||||
/* Current upper limit on DSS addresses. */
|
||||
extern void *dss_max;
|
||||
#endif
|
||||
|
||||
void *pages_map(void *addr, size_t size);
|
||||
void *chunk_alloc(size_t size, bool zero);
|
||||
void chunk_dealloc(void *chunk, size_t size);
|
||||
bool chunk_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
@ -19,16 +19,7 @@
|
||||
*/
|
||||
#undef JEMALLOC_PREFIX
|
||||
#if (defined(JEMALLOC_PREFIX) && defined(JEMALLOC_MANGLE))
|
||||
#undef malloc
|
||||
#undef calloc
|
||||
#undef posix_memalign
|
||||
#undef realloc
|
||||
#undef free
|
||||
#undef malloc_usable_size
|
||||
#undef malloc_tcache_flush
|
||||
#undef malloc_stats_print
|
||||
#undef malloc_options
|
||||
#undef malloc_message
|
||||
#undef JEMALLOC_P
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
41
jemalloc/src/jemalloc_extent.c
Normal file
41
jemalloc/src/jemalloc_extent.c
Normal file
@ -0,0 +1,41 @@
|
||||
#define JEMALLOC_EXTENT_C_
|
||||
#include "jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
static inline int
|
||||
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
||||
{
|
||||
int ret;
|
||||
size_t a_size = a->size;
|
||||
size_t b_size = b->size;
|
||||
|
||||
ret = (a_size > b_size) - (a_size < b_size);
|
||||
if (ret == 0) {
|
||||
uintptr_t a_addr = (uintptr_t)a->addr;
|
||||
uintptr_t b_addr = (uintptr_t)b->addr;
|
||||
|
||||
ret = (a_addr > b_addr) - (a_addr < b_addr);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/* Wrap red-black tree macros in functions. */
|
||||
rb_wrap(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
|
||||
extent_szad_comp)
|
||||
#endif
|
||||
|
||||
static inline int
|
||||
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
||||
{
|
||||
uintptr_t a_addr = (uintptr_t)a->addr;
|
||||
uintptr_t b_addr = (uintptr_t)b->addr;
|
||||
|
||||
return ((a_addr > b_addr) - (a_addr < b_addr));
|
||||
}
|
||||
|
||||
/* Wrap red-black tree macros in functions. */
|
||||
rb_wrap(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
|
||||
extent_ad_comp)
|
44
jemalloc/src/jemalloc_extent.h
Normal file
44
jemalloc/src/jemalloc_extent.h
Normal file
@ -0,0 +1,44 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct extent_node_s extent_node_t;
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
/* Tree of extents. */
|
||||
struct extent_node_s {
|
||||
#ifdef JEMALLOC_DSS
|
||||
/* Linkage for the size/address-ordered tree. */
|
||||
rb_node(extent_node_t) link_szad;
|
||||
#endif
|
||||
|
||||
/* Linkage for the address-ordered tree. */
|
||||
rb_node(extent_node_t) link_ad;
|
||||
|
||||
/* Pointer to the extent that this tree node is responsible for. */
|
||||
void *addr;
|
||||
|
||||
/* Total region size. */
|
||||
size_t size;
|
||||
};
|
||||
typedef rb_tree(extent_node_t) extent_tree_t;
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#ifdef JEMALLOC_DSS
|
||||
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
|
||||
#endif
|
||||
|
||||
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
257
jemalloc/src/jemalloc_huge.c
Normal file
257
jemalloc/src/jemalloc_huge.c
Normal file
@ -0,0 +1,257 @@
|
||||
#define JEMALLOC_HUGE_C_
|
||||
#include "jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
uint64_t huge_nmalloc;
|
||||
uint64_t huge_ndalloc;
|
||||
size_t huge_allocated;
|
||||
#endif
|
||||
|
||||
malloc_mutex_t huge_mtx;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* Tree of chunks that are stand-alone huge allocations. */
|
||||
static extent_tree_t huge;
|
||||
|
||||
void *
|
||||
huge_malloc(size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize;
|
||||
extent_node_t *node;
|
||||
|
||||
/* Allocate one or more contiguous chunks for this request. */
|
||||
|
||||
csize = CHUNK_CEILING(size);
|
||||
if (csize == 0) {
|
||||
/* size is large enough to cause size_t wrap-around. */
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/* Allocate an extent node with which to track the chunk. */
|
||||
node = base_node_alloc();
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
|
||||
ret = chunk_alloc(csize, zero);
|
||||
if (ret == NULL) {
|
||||
base_node_dealloc(node);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/* Insert node into huge. */
|
||||
node->addr = ret;
|
||||
node->size = csize;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
extent_tree_ad_insert(&huge, node);
|
||||
#ifdef JEMALLOC_STATS
|
||||
huge_nmalloc++;
|
||||
huge_allocated += csize;
|
||||
#endif
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
#ifdef JEMALLOC_FILL
|
||||
if (zero == false) {
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, csize);
|
||||
else if (opt_zero)
|
||||
memset(ret, 0, csize);
|
||||
}
|
||||
#endif
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/* Only handles large allocations that require more than chunk alignment. */
|
||||
void *
|
||||
huge_palloc(size_t alignment, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
size_t alloc_size, chunk_size, offset;
|
||||
extent_node_t *node;
|
||||
|
||||
/*
|
||||
* This allocation requires alignment that is even larger than chunk
|
||||
* alignment. This means that huge_malloc() isn't good enough.
|
||||
*
|
||||
* Allocate almost twice as many chunks as are demanded by the size or
|
||||
* alignment, in order to assure the alignment can be achieved, then
|
||||
* unmap leading and trailing chunks.
|
||||
*/
|
||||
assert(alignment >= chunksize);
|
||||
|
||||
chunk_size = CHUNK_CEILING(size);
|
||||
|
||||
if (size >= alignment)
|
||||
alloc_size = chunk_size + alignment - chunksize;
|
||||
else
|
||||
alloc_size = (alignment << 1) - chunksize;
|
||||
|
||||
/* Allocate an extent node with which to track the chunk. */
|
||||
node = base_node_alloc();
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
|
||||
ret = chunk_alloc(alloc_size, false);
|
||||
if (ret == NULL) {
|
||||
base_node_dealloc(node);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
offset = (uintptr_t)ret & (alignment - 1);
|
||||
assert((offset & chunksize_mask) == 0);
|
||||
assert(offset < alloc_size);
|
||||
if (offset == 0) {
|
||||
/* Trim trailing space. */
|
||||
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
|
||||
- chunk_size);
|
||||
} else {
|
||||
size_t trailsize;
|
||||
|
||||
/* Trim leading space. */
|
||||
chunk_dealloc(ret, alignment - offset);
|
||||
|
||||
ret = (void *)((uintptr_t)ret + (alignment - offset));
|
||||
|
||||
trailsize = alloc_size - (alignment - offset) - chunk_size;
|
||||
if (trailsize != 0) {
|
||||
/* Trim trailing space. */
|
||||
assert(trailsize < alloc_size);
|
||||
chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
|
||||
trailsize);
|
||||
}
|
||||
}
|
||||
|
||||
/* Insert node into huge. */
|
||||
node->addr = ret;
|
||||
node->size = chunk_size;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
extent_tree_ad_insert(&huge, node);
|
||||
#ifdef JEMALLOC_STATS
|
||||
huge_nmalloc++;
|
||||
huge_allocated += chunk_size;
|
||||
#endif
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
#ifdef JEMALLOC_FILL
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, chunk_size);
|
||||
else if (opt_zero)
|
||||
memset(ret, 0, chunk_size);
|
||||
#endif
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
huge_ralloc(void *ptr, size_t size, size_t oldsize)
|
||||
{
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
|
||||
/* Avoid moving the allocation if the size class would not change. */
|
||||
if (oldsize > arena_maxclass &&
|
||||
CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
|
||||
#ifdef JEMALLOC_FILL
|
||||
if (opt_junk && size < oldsize) {
|
||||
memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
|
||||
- size);
|
||||
} else if (opt_zero && size > oldsize) {
|
||||
memset((void *)((uintptr_t)ptr + oldsize), 0, size
|
||||
- oldsize);
|
||||
}
|
||||
#endif
|
||||
return (ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we get here, then size and oldsize are different enough that we
|
||||
* need to use a different size class. In that case, fall back to
|
||||
* allocating new space and copying.
|
||||
*/
|
||||
ret = huge_malloc(size, false);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
memcpy(ret, ptr, copysize);
|
||||
idalloc(ptr);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
huge_dalloc(void *ptr)
|
||||
{
|
||||
extent_node_t *node, key;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = ptr;
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
assert(node->addr == ptr);
|
||||
extent_tree_ad_remove(&huge, node);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
huge_ndalloc++;
|
||||
huge_allocated -= node->size;
|
||||
#endif
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
/* Unmap chunk. */
|
||||
#ifdef JEMALLOC_FILL
|
||||
#ifdef JEMALLOC_DSS
|
||||
if (opt_junk)
|
||||
memset(node->addr, 0x5a, node->size);
|
||||
#endif
|
||||
#endif
|
||||
chunk_dealloc(node->addr, node->size);
|
||||
|
||||
base_node_dealloc(node);
|
||||
}
|
||||
|
||||
size_t
|
||||
huge_salloc(const void *ptr)
|
||||
{
|
||||
size_t ret;
|
||||
extent_node_t *node, key;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = __DECONST(void *, ptr);
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
|
||||
ret = node->size;
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
bool
|
||||
huge_boot(void)
|
||||
{
|
||||
|
||||
/* Initialize chunks data. */
|
||||
if (malloc_mutex_init(&huge_mtx))
|
||||
return (true);
|
||||
extent_tree_ad_new(&huge);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
huge_nmalloc = 0;
|
||||
huge_ndalloc = 0;
|
||||
huge_allocated = 0;
|
||||
#endif
|
||||
|
||||
return (false);
|
||||
}
|
34
jemalloc/src/jemalloc_huge.h
Normal file
34
jemalloc/src/jemalloc_huge.h
Normal file
@ -0,0 +1,34 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
/* Huge allocation statistics. */
|
||||
extern uint64_t huge_nmalloc;
|
||||
extern uint64_t huge_ndalloc;
|
||||
extern size_t huge_allocated;
|
||||
#endif
|
||||
|
||||
/* Protects chunk-related data structures. */
|
||||
extern malloc_mutex_t huge_mtx;
|
||||
|
||||
void *huge_malloc(size_t size, bool zero);
|
||||
void *huge_palloc(size_t alignment, size_t size);
|
||||
void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
|
||||
void huge_dalloc(void *ptr);
|
||||
size_t huge_salloc(const void *ptr);
|
||||
bool huge_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
419
jemalloc/src/jemalloc_internal.h
Normal file
419
jemalloc/src/jemalloc_internal.h
Normal file
@ -0,0 +1,419 @@
|
||||
#include <sys/mman.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/uio.h>
|
||||
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#ifndef SIZE_T_MAX
|
||||
# define SIZE_T_MAX SIZE_MAX
|
||||
#endif
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#define JEMALLOC_MANGLE
|
||||
#include "jemalloc.h"
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
#include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
#include "rb.h"
|
||||
#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
|
||||
#include "qr.h"
|
||||
#include "ql.h"
|
||||
#endif
|
||||
|
||||
extern void (*JEMALLOC_P(malloc_message))(const char *p1, const char *p2,
|
||||
const char *p3, const char *p4);
|
||||
|
||||
/*
|
||||
* Define a custom assert() in order to reduce the chances of deadlock during
|
||||
* assertion failure.
|
||||
*/
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
# define assert(e) do { \
|
||||
if (!(e)) { \
|
||||
char line_buf[UMAX2S_BUFSIZE]; \
|
||||
malloc_write4("<jemalloc>: ", __FILE__, ":", \
|
||||
umax2s(__LINE__, 10, line_buf)); \
|
||||
malloc_write4(": Failed assertion: ", "\"", #e, \
|
||||
"\"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
#define assert(e)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* jemalloc can conceptually be broken into components (arena, tcache, trace,
|
||||
* etc.), but there are circular dependencies that cannot be broken without
|
||||
* substantial performance degradation. In order to reduce the effect on
|
||||
* visual code flow, read the header files in multiple passes, with one of the
|
||||
* following cpp variables defined during each pass:
|
||||
*
|
||||
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
|
||||
* types.
|
||||
* JEMALLOC_H_STRUCTS : Data structures.
|
||||
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
|
||||
* JEMALLOC_H_INLINES : Inline functions.
|
||||
*/
|
||||
/******************************************************************************/
|
||||
#define JEMALLOC_H_TYPES
|
||||
|
||||
#ifndef __DECONST
|
||||
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
/* Disable inlining to make debugging easier. */
|
||||
# define JEMALLOC_INLINE
|
||||
# define inline
|
||||
#else
|
||||
# define JEMALLOC_ENABLE_INLINE
|
||||
# define JEMALLOC_INLINE static inline
|
||||
#endif
|
||||
|
||||
/* Size of stack-allocated buffer passed to strerror_r(). */
|
||||
#define STRERROR_BUF 64
|
||||
|
||||
/* Minimum alignment of allocations is 2^LG_QUANTUM bytes. */
|
||||
#ifdef __i386__
|
||||
# define LG_QUANTUM 4
|
||||
#endif
|
||||
#ifdef __ia64__
|
||||
# define LG_QUANTUM 4
|
||||
#endif
|
||||
#ifdef __alpha__
|
||||
# define LG_QUANTUM 4
|
||||
#endif
|
||||
#ifdef __sparc__
|
||||
# define LG_QUANTUM 4
|
||||
#endif
|
||||
#ifdef __amd64__
|
||||
# define LG_QUANTUM 4
|
||||
#endif
|
||||
#ifdef __arm__
|
||||
# define LG_QUANTUM 3
|
||||
#endif
|
||||
#ifdef __mips__
|
||||
# define LG_QUANTUM 3
|
||||
#endif
|
||||
#ifdef __powerpc__
|
||||
# define LG_QUANTUM 4
|
||||
#endif
|
||||
#ifdef __s390x__
|
||||
# define LG_QUANTUM 4
|
||||
#endif
|
||||
|
||||
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
|
||||
#define QUANTUM_MASK (QUANTUM - 1)
|
||||
|
||||
/* Return the smallest quantum multiple that is >= a. */
|
||||
#define QUANTUM_CEILING(a) \
|
||||
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
|
||||
|
||||
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
|
||||
|
||||
/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
|
||||
#if (!defined(PIC) && !defined(NO_TLS))
|
||||
# define NO_TLS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
|
||||
* In addition, this controls the spacing of cacheline-spaced size classes.
|
||||
*/
|
||||
#define LG_CACHELINE 6
|
||||
#define CACHELINE ((size_t)(1U << LG_CACHELINE))
|
||||
#define CACHELINE_MASK (CACHELINE - 1)
|
||||
|
||||
/* Return the smallest cacheline multiple that is >= s. */
|
||||
#define CACHELINE_CEILING(s) \
|
||||
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
|
||||
|
||||
/*
|
||||
* Page size. STATIC_PAGE_SHIFT is determined by the configure script. If
|
||||
* DYNAMIC_PAGE_SHIFT is enabled, only use the STATIC_PAGE_* macros where
|
||||
* compile-time values are required for the purposes of defining data
|
||||
* structures.
|
||||
*/
|
||||
#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
|
||||
#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
|
||||
|
||||
#ifdef DYNAMIC_PAGE_SHIFT
|
||||
# define PAGE_SHIFT lg_pagesize
|
||||
# define PAGE_SIZE pagesize
|
||||
# define PAGE_MASK pagesize_mask
|
||||
#else
|
||||
# define PAGE_SHIFT STATIC_PAGE_SHIFT
|
||||
# define PAGE_SIZE STATIC_PAGE_SIZE
|
||||
# define PAGE_MASK STATIC_PAGE_MASK
|
||||
#endif
|
||||
|
||||
/* Return the smallest pagesize multiple that is >= s. */
|
||||
#define PAGE_CEILING(s) \
|
||||
(((s) + PAGE_MASK) & ~PAGE_MASK)
|
||||
|
||||
#include "jemalloc_stats.h"
|
||||
#include "jemalloc_mutex.h"
|
||||
#include "jemalloc_extent.h"
|
||||
#include "jemalloc_arena.h"
|
||||
#include "jemalloc_base.h"
|
||||
#include "jemalloc_chunk.h"
|
||||
#include "jemalloc_huge.h"
|
||||
#include "jemalloc_tcache.h"
|
||||
#include "jemalloc_trace.h"
|
||||
|
||||
#undef JEMALLOC_H_TYPES
|
||||
/******************************************************************************/
|
||||
#define JEMALLOC_H_STRUCTS
|
||||
|
||||
#include "jemalloc_stats.h"
|
||||
#include "jemalloc_mutex.h"
|
||||
#include "jemalloc_extent.h"
|
||||
#include "jemalloc_arena.h"
|
||||
#include "jemalloc_base.h"
|
||||
#include "jemalloc_chunk.h"
|
||||
#include "jemalloc_huge.h"
|
||||
#include "jemalloc_tcache.h"
|
||||
#include "jemalloc_trace.h"
|
||||
|
||||
#undef JEMALLOC_H_STRUCTS
|
||||
/******************************************************************************/
|
||||
#define JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_abort;
|
||||
#ifdef JEMALLOC_FILL
|
||||
extern bool opt_junk;
|
||||
#endif
|
||||
#ifdef JEMALLOC_SYSV
|
||||
extern bool opt_sysv;
|
||||
#endif
|
||||
#ifdef JEMALLOC_XMALLOC
|
||||
extern bool opt_xmalloc;
|
||||
#endif
|
||||
#ifdef JEMALLOC_FILL
|
||||
extern bool opt_zero;
|
||||
#endif
|
||||
|
||||
#ifdef DYNAMIC_PAGE_SHIFT
|
||||
extern size_t pagesize;
|
||||
extern size_t pagesize_mask;
|
||||
extern size_t lg_pagesize;
|
||||
#endif
|
||||
|
||||
/* Number of CPUs. */
|
||||
extern unsigned ncpus;
|
||||
|
||||
#ifndef NO_TLS
|
||||
/*
|
||||
* Map of pthread_self() --> arenas[???], used for selecting an arena to use
|
||||
* for allocations.
|
||||
*/
|
||||
extern __thread arena_t *arenas_map JEMALLOC_ATTR(tls_model("initial-exec"));
|
||||
#endif
|
||||
/*
|
||||
* Arenas that are used to service external requests. Not all elements of the
|
||||
* arenas array are necessarily used; arenas are created lazily as needed.
|
||||
*/
|
||||
extern arena_t **arenas;
|
||||
extern unsigned narenas;
|
||||
|
||||
arena_t *arenas_extend(unsigned ind);
|
||||
#ifndef NO_TLS
|
||||
arena_t *choose_arena_hard(void);
|
||||
#endif
|
||||
|
||||
#include "jemalloc_stats.h"
|
||||
#include "jemalloc_mutex.h"
|
||||
#include "jemalloc_extent.h"
|
||||
#include "jemalloc_arena.h"
|
||||
#include "jemalloc_base.h"
|
||||
#include "jemalloc_chunk.h"
|
||||
#include "jemalloc_huge.h"
|
||||
#include "jemalloc_tcache.h"
|
||||
#include "jemalloc_trace.h"
|
||||
|
||||
#undef JEMALLOC_H_EXTERNS
|
||||
/******************************************************************************/
|
||||
#define JEMALLOC_H_INLINES
|
||||
|
||||
#include "jemalloc_stats.h"
|
||||
#include "jemalloc_mutex.h"
|
||||
#include "jemalloc_extent.h"
|
||||
#include "jemalloc_base.h"
|
||||
#include "jemalloc_chunk.h"
|
||||
#include "jemalloc_huge.h"
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void malloc_write4(const char *p1, const char *p2, const char *p3,
|
||||
const char *p4);
|
||||
arena_t *choose_arena(void);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||
/*
|
||||
* Wrapper around malloc_message() that avoids the need for
|
||||
* JEMALLOC_P(malloc_message)(...) throughout the code.
|
||||
*/
|
||||
JEMALLOC_INLINE void
|
||||
malloc_write4(const char *p1, const char *p2, const char *p3, const char *p4)
|
||||
{
|
||||
|
||||
JEMALLOC_P(malloc_message)(p1, p2, p3, p4);
|
||||
}
|
||||
|
||||
/*
|
||||
* Choose an arena based on a per-thread value (fast-path code, calls slow-path
|
||||
* code if necessary).
|
||||
*/
|
||||
JEMALLOC_INLINE arena_t *
|
||||
choose_arena(void)
|
||||
{
|
||||
arena_t *ret;
|
||||
|
||||
/*
|
||||
* We can only use TLS if this is a PIC library, since for the static
|
||||
* library version, libc's malloc is used by TLS allocation, which
|
||||
* introduces a bootstrapping issue.
|
||||
*/
|
||||
#ifndef NO_TLS
|
||||
ret = arenas_map;
|
||||
if (ret == NULL) {
|
||||
ret = choose_arena_hard();
|
||||
assert(ret != NULL);
|
||||
}
|
||||
#else
|
||||
if (isthreaded && narenas > 1) {
|
||||
unsigned long ind;
|
||||
|
||||
/*
|
||||
* Hash pthread_self() to one of the arenas. There is a prime
|
||||
* number of arenas, so this has a reasonable chance of
|
||||
* working. Even so, the hashing can be easily thwarted by
|
||||
* inconvenient pthread_self() values. Without specific
|
||||
* knowledge of how pthread_self() calculates values, we can't
|
||||
* easily do much better than this.
|
||||
*/
|
||||
ind = (unsigned long) pthread_self() % narenas;
|
||||
|
||||
/*
|
||||
* Optimistially assume that arenas[ind] has been initialized.
|
||||
* At worst, we find out that some other thread has already
|
||||
* done so, after acquiring the lock in preparation. Note that
|
||||
* this lazy locking also has the effect of lazily forcing
|
||||
* cache coherency; without the lock acquisition, there's no
|
||||
* guarantee that modification of arenas[ind] by another thread
|
||||
* would be seen on this CPU for an arbitrary amount of time.
|
||||
*
|
||||
* In general, this approach to modifying a synchronized value
|
||||
* isn't a good idea, but in this case we only ever modify the
|
||||
* value once, so things work out well.
|
||||
*/
|
||||
ret = arenas[ind];
|
||||
if (ret == NULL) {
|
||||
/*
|
||||
* Avoid races with another thread that may have already
|
||||
* initialized arenas[ind].
|
||||
*/
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
if (arenas[ind] == NULL)
|
||||
ret = arenas_extend((unsigned)ind);
|
||||
else
|
||||
ret = arenas[ind];
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
}
|
||||
} else
|
||||
ret = arenas[0];
|
||||
#endif
|
||||
|
||||
assert(ret != NULL);
|
||||
return (ret);
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "jemalloc_tcache.h"
|
||||
#include "jemalloc_arena.h"
|
||||
#include "jemalloc_trace.h"
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void *imalloc(size_t size);
|
||||
void *icalloc(size_t size);
|
||||
void idalloc(void *ptr);
|
||||
size_t isalloc(const void *ptr);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||
JEMALLOC_INLINE void *
|
||||
imalloc(size_t size)
|
||||
{
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
if (size <= arena_maxclass)
|
||||
return (arena_malloc(size, false));
|
||||
else
|
||||
return (huge_malloc(size, false));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
icalloc(size_t size)
|
||||
{
|
||||
|
||||
if (size <= arena_maxclass)
|
||||
return (arena_malloc(size, true));
|
||||
else
|
||||
return (huge_malloc(size, true));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
idalloc(void *ptr)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
assert(ptr != NULL);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr)
|
||||
arena_dalloc(chunk->arena, chunk, ptr);
|
||||
else
|
||||
huge_dalloc(ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
isalloc(const void *ptr)
|
||||
{
|
||||
size_t ret;
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
assert(ptr != NULL);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr) {
|
||||
/* Region. */
|
||||
assert(chunk->arena->magic == ARENA_MAGIC);
|
||||
|
||||
ret = arena_salloc(ptr);
|
||||
} else
|
||||
ret = huge_salloc(ptr);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
#endif
|
||||
|
||||
#undef JEMALLOC_H_INLINES
|
||||
/******************************************************************************/
|
71
jemalloc/src/jemalloc_mutex.c
Normal file
71
jemalloc/src/jemalloc_mutex.c
Normal file
@ -0,0 +1,71 @@
|
||||
#define JEMALLOC_MUTEX_C_
|
||||
#include "jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
bool isthreaded = false;
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
static void pthread_create_once(void);
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* We intercept pthread_create() calls in order to toggle isthreaded if the
|
||||
* process goes multi-threaded.
|
||||
*/
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
|
||||
void *(*)(void *), void *__restrict);
|
||||
|
||||
static void
|
||||
pthread_create_once(void)
|
||||
{
|
||||
|
||||
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
|
||||
if (pthread_create_fptr == NULL) {
|
||||
malloc_write4("<jemalloc>",
|
||||
": Error in dlsym(RTLD_NEXT, \"pthread_create\")\n", "",
|
||||
"");
|
||||
abort();
|
||||
}
|
||||
|
||||
isthreaded = true;
|
||||
}
|
||||
|
||||
JEMALLOC_ATTR(visibility("default"))
|
||||
int
|
||||
pthread_create(pthread_t *__restrict thread,
|
||||
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
|
||||
void *__restrict arg)
|
||||
{
|
||||
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
|
||||
|
||||
pthread_once(&once_control, pthread_create_once);
|
||||
|
||||
return (pthread_create_fptr(thread, attr, start_routine, arg));
|
||||
}
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
bool
|
||||
malloc_mutex_init(malloc_mutex_t *mutex)
|
||||
{
|
||||
pthread_mutexattr_t attr;
|
||||
|
||||
if (pthread_mutexattr_init(&attr) != 0)
|
||||
return (true);
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
|
||||
if (pthread_mutex_init(mutex, &attr) != 0) {
|
||||
pthread_mutexattr_destroy(&attr);
|
||||
return (true);
|
||||
}
|
||||
pthread_mutexattr_destroy(&attr);
|
||||
|
||||
return (false);
|
||||
}
|
50
jemalloc/src/jemalloc_mutex.h
Normal file
50
jemalloc/src/jemalloc_mutex.h
Normal file
@ -0,0 +1,50 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef pthread_mutex_t malloc_mutex_t;
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
extern bool isthreaded;
|
||||
#else
|
||||
# define isthreaded true
|
||||
#endif
|
||||
|
||||
bool malloc_mutex_init(malloc_mutex_t *mutex);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void malloc_mutex_lock(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_unlock(malloc_mutex_t *mutex);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_lock(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
if (isthreaded)
|
||||
pthread_mutex_lock(mutex);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_unlock(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
if (isthreaded)
|
||||
pthread_mutex_unlock(mutex);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
248
jemalloc/src/jemalloc_stats.c
Normal file
248
jemalloc/src/jemalloc_stats.c
Normal file
@ -0,0 +1,248 @@
|
||||
#define JEMALLOC_STATS_C_
|
||||
#include "jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
bool opt_stats_print = false;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/*
|
||||
* We don't want to depend on vsnprintf() for production builds, since that can
|
||||
* cause unnecessary bloat for static binaries. umax2s() provides minimal
|
||||
* integer printing functionality, so that malloc_printf() use can be limited to
|
||||
* JEMALLOC_STATS code.
|
||||
*/
|
||||
char *
|
||||
umax2s(uintmax_t x, unsigned base, char *s)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
i = UMAX2S_BUFSIZE - 1;
|
||||
s[i] = '\0';
|
||||
switch (base) {
|
||||
case 10:
|
||||
do {
|
||||
i--;
|
||||
s[i] = "0123456789"[x % 10];
|
||||
x /= 10;
|
||||
} while (x > 0);
|
||||
break;
|
||||
case 16:
|
||||
do {
|
||||
i--;
|
||||
s[i] = "0123456789abcdef"[x & 0xf];
|
||||
x >>= 4;
|
||||
} while (x > 0);
|
||||
break;
|
||||
default:
|
||||
do {
|
||||
i--;
|
||||
s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x % base];
|
||||
x /= base;
|
||||
} while (x > 0);
|
||||
}
|
||||
|
||||
return (&s[i]);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
/*
|
||||
* Print to stderr in such a way as to (hopefully) avoid memory allocation.
|
||||
*/
|
||||
void
|
||||
malloc_printf(const char *format, ...)
|
||||
{
|
||||
char buf[4096];
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
vsnprintf(buf, sizeof(buf), format, ap);
|
||||
va_end(ap);
|
||||
malloc_write4(buf, "", "", "");
|
||||
}
|
||||
#endif
|
||||
|
||||
JEMALLOC_ATTR(visibility("default"))
|
||||
void
|
||||
JEMALLOC_P(malloc_stats_print)(const char *opts)
|
||||
{
|
||||
char s[UMAX2S_BUFSIZE];
|
||||
bool general = true;
|
||||
bool bins = true;
|
||||
bool large = true;
|
||||
|
||||
if (opts != NULL) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; opts[i] != '\0'; i++) {
|
||||
switch (opts[i]) {
|
||||
case 'g':
|
||||
general = false;
|
||||
break;
|
||||
case 'b':
|
||||
bins = false;
|
||||
break;
|
||||
case 'l':
|
||||
large = false;
|
||||
break;
|
||||
default:;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
malloc_write4("___ Begin jemalloc statistics ___\n", "", "", "");
|
||||
if (general) {
|
||||
malloc_write4("Assertions ",
|
||||
#ifdef NDEBUG
|
||||
"disabled",
|
||||
#else
|
||||
"enabled",
|
||||
#endif
|
||||
"\n", "");
|
||||
malloc_write4("Boolean JEMALLOC_OPTIONS: ",
|
||||
opt_abort ? "A" : "a", "", "");
|
||||
#ifdef JEMALLOC_FILL
|
||||
malloc_write4(opt_junk ? "J" : "j", "", "", "");
|
||||
#endif
|
||||
malloc_write4("P", "", "", "");
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
malloc_write4(opt_tcache_sort ? "S" : "s", "", "", "");
|
||||
#endif
|
||||
#ifdef JEMALLOC_TRACE
|
||||
malloc_write4(opt_trace ? "T" : "t", "", "", "");
|
||||
#endif
|
||||
#ifdef JEMALLOC_SYSV
|
||||
malloc_write4(opt_sysv ? "V" : "v", "", "", "");
|
||||
#endif
|
||||
#ifdef JEMALLOC_XMALLOC
|
||||
malloc_write4(opt_xmalloc ? "X" : "x", "", "", "");
|
||||
#endif
|
||||
#ifdef JEMALLOC_FILL
|
||||
malloc_write4(opt_zero ? "Z" : "z", "", "", "");
|
||||
#endif
|
||||
malloc_write4("\n", "", "", "");
|
||||
|
||||
malloc_write4("CPUs: ", umax2s(ncpus, 10, s), "\n", "");
|
||||
malloc_write4("Max arenas: ", umax2s(narenas, 10, s), "\n", "");
|
||||
malloc_write4("Pointer size: ", umax2s(sizeof(void *), 10, s),
|
||||
"\n", "");
|
||||
malloc_write4("Quantum size: ", umax2s(QUANTUM, 10, s), "\n",
|
||||
"");
|
||||
malloc_write4("Cacheline size (assumed): ",
|
||||
umax2s(CACHELINE, 10, s), "\n", "");
|
||||
malloc_write4("Subpage spacing: ", umax2s(SUBPAGE, 10, s),
|
||||
"\n", "");
|
||||
malloc_write4("Medium spacing: ", umax2s((1U << lg_mspace), 10,
|
||||
s), "\n", "");
|
||||
#ifdef JEMALLOC_TINY
|
||||
malloc_write4("Tiny 2^n-spaced sizes: [", umax2s((1U <<
|
||||
LG_TINY_MIN), 10, s), "..", "");
|
||||
malloc_write4(umax2s((qspace_min >> 1), 10, s), "]\n", "", "");
|
||||
#endif
|
||||
malloc_write4("Quantum-spaced sizes: [", umax2s(qspace_min, 10,
|
||||
s), "..", "");
|
||||
malloc_write4(umax2s(qspace_max, 10, s), "]\n", "", "");
|
||||
malloc_write4("Cacheline-spaced sizes: [",
|
||||
umax2s(cspace_min, 10, s), "..", "");
|
||||
malloc_write4(umax2s(cspace_max, 10, s), "]\n", "", "");
|
||||
malloc_write4("Subpage-spaced sizes: [", umax2s(sspace_min, 10,
|
||||
s), "..", "");
|
||||
malloc_write4(umax2s(sspace_max, 10, s), "]\n", "", "");
|
||||
malloc_write4("Medium sizes: [", umax2s(medium_min, 10, s),
|
||||
"..", "");
|
||||
malloc_write4(umax2s(medium_max, 10, s), "]\n", "", "");
|
||||
if (opt_lg_dirty_mult >= 0) {
|
||||
malloc_write4(
|
||||
"Min active:dirty page ratio per arena: ",
|
||||
umax2s((1U << opt_lg_dirty_mult), 10, s), ":1\n",
|
||||
"");
|
||||
} else {
|
||||
malloc_write4(
|
||||
"Min active:dirty page ratio per arena: N/A\n",
|
||||
"", "", "");
|
||||
}
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
malloc_write4("Thread cache slots per size class: ",
|
||||
tcache_nslots ? umax2s(tcache_nslots, 10, s) : "N/A",
|
||||
"\n", "");
|
||||
malloc_write4("Thread cache GC sweep interval: ",
|
||||
(tcache_nslots && tcache_gc_incr > 0) ?
|
||||
umax2s((1U << opt_lg_tcache_gc_sweep), 10, s) : "N/A",
|
||||
"", "");
|
||||
malloc_write4(" (increment interval: ",
|
||||
(tcache_nslots && tcache_gc_incr > 0) ?
|
||||
umax2s(tcache_gc_incr, 10, s) : "N/A",
|
||||
")\n", "");
|
||||
#endif
|
||||
malloc_write4("Chunk size: ", umax2s(chunksize, 10, s), "", "");
|
||||
malloc_write4(" (2^", umax2s(opt_lg_chunk, 10, s), ")\n", "");
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
{
|
||||
size_t allocated, mapped;
|
||||
unsigned i;
|
||||
arena_t *arena;
|
||||
|
||||
/* Calculate and print allocated/mapped stats. */
|
||||
|
||||
/* arenas. */
|
||||
for (i = 0, allocated = 0; i < narenas; i++) {
|
||||
if (arenas[i] != NULL) {
|
||||
malloc_mutex_lock(&arenas[i]->lock);
|
||||
allocated += arenas[i]->stats.allocated_small;
|
||||
allocated += arenas[i]->stats.allocated_large;
|
||||
malloc_mutex_unlock(&arenas[i]->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/* huge/base. */
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
allocated += huge_allocated;
|
||||
mapped = stats_chunks.curchunks * chunksize;
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
mapped += base_mapped;
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
|
||||
malloc_printf("Allocated: %zu, mapped: %zu\n", allocated,
|
||||
mapped);
|
||||
|
||||
/* Print chunk stats. */
|
||||
{
|
||||
chunk_stats_t chunks_stats;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
chunks_stats = stats_chunks;
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
malloc_printf("chunks: nchunks "
|
||||
"highchunks curchunks\n");
|
||||
malloc_printf(" %13llu%13lu%13lu\n",
|
||||
chunks_stats.nchunks, chunks_stats.highchunks,
|
||||
chunks_stats.curchunks);
|
||||
}
|
||||
|
||||
/* Print chunk stats. */
|
||||
malloc_printf(
|
||||
"huge: nmalloc ndalloc allocated\n");
|
||||
malloc_printf(" %12llu %12llu %12zu\n", huge_nmalloc,
|
||||
huge_ndalloc, huge_allocated);
|
||||
|
||||
/* Print stats for each arena. */
|
||||
for (i = 0; i < narenas; i++) {
|
||||
arena = arenas[i];
|
||||
if (arena != NULL) {
|
||||
malloc_printf("\narenas[%u]:\n", i);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_stats_print(arena, bins, large);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* #ifdef JEMALLOC_STATS */
|
||||
malloc_write4("--- End jemalloc statistics ---\n", "", "", "");
|
||||
}
|
142
jemalloc/src/jemalloc_stats.h
Normal file
142
jemalloc/src/jemalloc_stats.h
Normal file
@ -0,0 +1,142 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#define UMAX2S_BUFSIZE 65
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
typedef struct chunk_stats_s chunk_stats_t;
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_STATS
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
struct tcache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct malloc_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
/* Number of tcache flushes to this bin. */
|
||||
uint64_t nflushes;
|
||||
#endif
|
||||
|
||||
/* Total number of runs created for this bin's size class. */
|
||||
uint64_t nruns;
|
||||
|
||||
/*
|
||||
* Total number of runs reused by extracting them from the runs tree for
|
||||
* this bin's size class.
|
||||
*/
|
||||
uint64_t reruns;
|
||||
|
||||
/* High-water mark for this bin. */
|
||||
size_t highruns;
|
||||
|
||||
/* Current number of runs in this bin. */
|
||||
size_t curruns;
|
||||
};
|
||||
|
||||
struct malloc_large_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to this size class.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/* High-water mark for this size class. */
|
||||
size_t highruns;
|
||||
|
||||
/* Current number of runs of this size class. */
|
||||
size_t curruns;
|
||||
};
|
||||
|
||||
struct arena_stats_s {
|
||||
/* Number of bytes currently mapped. */
|
||||
size_t mapped;
|
||||
|
||||
/*
|
||||
* Total number of purge sweeps, total number of madvise calls made,
|
||||
* and total pages purged in order to keep dirty unused memory under
|
||||
* control.
|
||||
*/
|
||||
uint64_t npurge;
|
||||
uint64_t nmadvise;
|
||||
uint64_t purged;
|
||||
|
||||
/* Per-size-category statistics. */
|
||||
size_t allocated_small;
|
||||
uint64_t nmalloc_small;
|
||||
uint64_t ndalloc_small;
|
||||
|
||||
size_t allocated_medium;
|
||||
uint64_t nmalloc_medium;
|
||||
uint64_t ndalloc_medium;
|
||||
|
||||
size_t allocated_large;
|
||||
uint64_t nmalloc_large;
|
||||
uint64_t ndalloc_large;
|
||||
|
||||
/*
|
||||
* One element for each possible size class, including sizes that
|
||||
* overlap with bin size classes. This is necessary because ipalloc()
|
||||
* sometimes has to use such large objects in order to assure proper
|
||||
* alignment.
|
||||
*/
|
||||
malloc_large_stats_t *lstats;
|
||||
};
|
||||
|
||||
struct chunk_stats_s {
|
||||
/* Number of chunks that were allocated. */
|
||||
uint64_t nchunks;
|
||||
|
||||
/* High-water mark for number of chunks allocated. */
|
||||
unsigned long highchunks;
|
||||
|
||||
/*
|
||||
* Current number of chunks allocated. This value isn't maintained for
|
||||
* any other purpose, so keep track of it in order to be able to set
|
||||
* highchunks.
|
||||
*/
|
||||
unsigned long curchunks;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
#endif /* JEMALLOC_STATS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_stats_print;
|
||||
|
||||
char *umax2s(uintmax_t x, unsigned base, char *s);
|
||||
#ifdef JEMALLOC_STATS
|
||||
void malloc_printf(const char *format, ...);
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_STATS
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
#endif /* JEMALLOC_STATS */
|
||||
/******************************************************************************/
|
374
jemalloc/src/jemalloc_tcache.c
Normal file
374
jemalloc/src/jemalloc_tcache.c
Normal file
@ -0,0 +1,374 @@
|
||||
#define JEMALLOC_TCACHE_C_
|
||||
#include "jemalloc_internal.h"
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
size_t opt_lg_tcache_nslots = LG_TCACHE_NSLOTS_DEFAULT;
|
||||
ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT;
|
||||
bool opt_tcache_sort = true;
|
||||
|
||||
/* Map of thread-specific caches. */
|
||||
__thread tcache_t *tcache_tls JEMALLOC_ATTR(tls_model("initial-exec"));
|
||||
|
||||
/*
|
||||
* Same contents as tcache, but initialized such that the TSD destructor is
|
||||
* called when a thread exits, so that the cache can be cleaned up.
|
||||
*/
|
||||
static pthread_key_t tcache_tsd;
|
||||
|
||||
size_t tcache_nslots;
|
||||
unsigned tcache_gc_incr;
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void tcache_thread_cleanup(void *arg);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
tcache_alloc_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
arena_tcache_fill(tcache->arena, tbin, binind);
|
||||
ret = tcache_bin_alloc(tbin);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static inline void
|
||||
tcache_bin_merge(void **to, void **fr, unsigned lcnt, unsigned rcnt)
|
||||
{
|
||||
void **l, **r;
|
||||
unsigned li, ri, i;
|
||||
|
||||
l = fr;
|
||||
r = &fr[lcnt];
|
||||
li = ri = i = 0;
|
||||
while (li < lcnt && ri < rcnt) {
|
||||
/* High pointers come first in sorted result. */
|
||||
if ((uintptr_t)l[li] > (uintptr_t)r[ri]) {
|
||||
to[i] = l[li];
|
||||
li++;
|
||||
} else {
|
||||
to[i] = r[ri];
|
||||
ri++;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
if (li < lcnt)
|
||||
memcpy(&to[i], &l[li], sizeof(void *) * (lcnt - li));
|
||||
else if (ri < rcnt)
|
||||
memcpy(&to[i], &r[ri], sizeof(void *) * (rcnt - ri));
|
||||
}
|
||||
|
||||
static inline void
|
||||
tcache_bin_sort(tcache_bin_t *tbin)
|
||||
{
|
||||
unsigned e, i;
|
||||
void **fr, **to;
|
||||
void *mslots[tcache_nslots];
|
||||
|
||||
/*
|
||||
* Perform iterative merge sort, swapping source and destination arrays
|
||||
* during each iteration.
|
||||
*/
|
||||
|
||||
fr = mslots; to = tbin->slots;
|
||||
for (e = 1; e < tbin->ncached; e <<= 1) {
|
||||
void **tmp = fr; fr = to; to = tmp;
|
||||
for (i = 0; i + (e << 1) <= tbin->ncached; i += (e << 1))
|
||||
tcache_bin_merge(&to[i], &fr[i], e, e);
|
||||
if (i + e <= tbin->ncached) {
|
||||
tcache_bin_merge(&to[i], &fr[i],
|
||||
e, tbin->ncached - (i + e));
|
||||
} else if (i < tbin->ncached)
|
||||
tcache_bin_merge(&to[i], &fr[i], tbin->ncached - i, 0);
|
||||
}
|
||||
|
||||
/* Copy the final result out of mslots, if necessary. */
|
||||
if (to == mslots)
|
||||
memcpy(tbin->slots, mslots, sizeof(void *) * tbin->ncached);
|
||||
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
for (i = 1; i < tbin->ncached; i++)
|
||||
assert(tbin->slots[i-1] > tbin->slots[i]);
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush(tcache_bin_t *tbin, size_t binind, unsigned rem)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
arena_t *arena;
|
||||
void *ptr;
|
||||
unsigned i, ndeferred, ncached;
|
||||
|
||||
if (opt_tcache_sort && rem > 0) {
|
||||
assert(rem < tbin->ncached);
|
||||
/* Sort pointers such that the highest objects will be freed. */
|
||||
tcache_bin_sort(tbin);
|
||||
}
|
||||
|
||||
for (ndeferred = tbin->ncached - rem; ndeferred > 0;) {
|
||||
ncached = ndeferred;
|
||||
/* Lock the arena associated with the first object. */
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(tbin->slots[0]);
|
||||
arena = chunk->arena;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
/* Deallocate every object that belongs to the locked arena. */
|
||||
for (i = ndeferred = 0; i < ncached; i++) {
|
||||
ptr = tbin->slots[i];
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == arena) {
|
||||
size_t pageind = (((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> PAGE_SHIFT);
|
||||
arena_chunk_map_t *mapelm =
|
||||
&chunk->map[pageind];
|
||||
arena_dalloc_bin(arena, chunk, ptr, mapelm);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
* arena than the one that is currently locked.
|
||||
* Stash the object, so that it can be handled
|
||||
* in a future pass.
|
||||
*/
|
||||
tbin->slots[ndeferred] = ptr;
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
#ifdef JEMALLOC_STATS
|
||||
arena->bins[binind].stats.nflushes++;
|
||||
{
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
if (bin->reg_size <= small_maxclass) {
|
||||
arena->stats.nmalloc_small +=
|
||||
tbin->tstats.nrequests;
|
||||
} else {
|
||||
arena->stats.nmalloc_medium +=
|
||||
tbin->tstats.nrequests;
|
||||
}
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
#endif
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
if (rem > 0) {
|
||||
/*
|
||||
* Shift the remaining valid pointers to the base of the slots
|
||||
* array.
|
||||
*/
|
||||
memmove(&tbin->slots[0], &tbin->slots[tbin->ncached - rem],
|
||||
rem * sizeof(void *));
|
||||
}
|
||||
tbin->ncached = rem;
|
||||
}
|
||||
|
||||
tcache_bin_t *
|
||||
tcache_bin_create(arena_t *arena)
|
||||
{
|
||||
tcache_bin_t *ret;
|
||||
size_t tsize;
|
||||
|
||||
tsize = sizeof(tcache_bin_t) + (sizeof(void *) * (tcache_nslots - 1));
|
||||
if (tsize <= small_maxclass)
|
||||
ret = (tcache_bin_t *)arena_malloc_small(arena, tsize, false);
|
||||
else if (tsize <= bin_maxclass)
|
||||
ret = (tcache_bin_t *)arena_malloc_medium(arena, tsize, false);
|
||||
else
|
||||
ret = (tcache_bin_t *)imalloc(tsize);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
#ifdef JEMALLOC_STATS
|
||||
memset(&ret->tstats, 0, sizeof(tcache_bin_stats_t));
|
||||
#endif
|
||||
ret->low_water = 0;
|
||||
ret->high_water = 0;
|
||||
ret->ncached = 0;
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_destroy(tcache_t *tcache, tcache_bin_t *tbin, unsigned binind)
|
||||
{
|
||||
arena_t *arena;
|
||||
arena_chunk_t *chunk;
|
||||
size_t pageind, tsize;
|
||||
arena_chunk_map_t *mapelm;
|
||||
|
||||
chunk = CHUNK_ADDR2BASE(tbin);
|
||||
arena = chunk->arena;
|
||||
pageind = (((uintptr_t)tbin - (uintptr_t)chunk) >> PAGE_SHIFT);
|
||||
mapelm = &chunk->map[pageind];
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
if (tbin->tstats.nrequests != 0) {
|
||||
arena_t *arena = tcache->arena;
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
if (bin->reg_size <= small_maxclass)
|
||||
arena->stats.nmalloc_small += tbin->tstats.nrequests;
|
||||
else
|
||||
arena->stats.nmalloc_medium += tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(tbin->ncached == 0);
|
||||
tsize = sizeof(tcache_bin_t) + (sizeof(void *) * (tcache_nslots - 1));
|
||||
if (tsize <= bin_maxclass) {
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_dalloc_bin(arena, chunk, tbin, mapelm);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
} else
|
||||
idalloc(tbin);
|
||||
}
|
||||
|
||||
tcache_t *
|
||||
tcache_create(arena_t *arena)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
|
||||
if (sizeof(tcache_t) + (sizeof(tcache_bin_t *) * (nbins - 1)) <=
|
||||
small_maxclass) {
|
||||
tcache = (tcache_t *)arena_malloc_small(arena, sizeof(tcache_t)
|
||||
+ (sizeof(tcache_bin_t *) * (nbins - 1)), true);
|
||||
} else if (sizeof(tcache_t) + (sizeof(tcache_bin_t *) * (nbins - 1)) <=
|
||||
bin_maxclass) {
|
||||
tcache = (tcache_t *)arena_malloc_medium(arena, sizeof(tcache_t)
|
||||
+ (sizeof(tcache_bin_t *) * (nbins - 1)), true);
|
||||
} else {
|
||||
tcache = (tcache_t *)icalloc(sizeof(tcache_t) +
|
||||
(sizeof(tcache_bin_t *) * (nbins - 1)));
|
||||
}
|
||||
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
/* Link into list of extant tcaches. */
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
ql_elm_new(tcache, link);
|
||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
#endif
|
||||
|
||||
tcache->arena = arena;
|
||||
|
||||
tcache_tls = tcache;
|
||||
pthread_setspecific(tcache_tsd, tcache);
|
||||
|
||||
return (tcache);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_destroy(tcache_t *tcache)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(&tcache->arena->lock);
|
||||
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
||||
tcache_stats_merge(tcache, tcache->arena);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < nbins; i++) {
|
||||
tcache_bin_t *tbin = tcache->tbins[i];
|
||||
if (tbin != NULL) {
|
||||
tcache_bin_flush(tbin, i, 0);
|
||||
tcache_bin_destroy(tcache, tbin, i);
|
||||
}
|
||||
}
|
||||
|
||||
if (arena_salloc(tcache) <= bin_maxclass) {
|
||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
||||
arena_t *arena = chunk->arena;
|
||||
size_t pageind = (((uintptr_t)tcache - (uintptr_t)chunk) >>
|
||||
PAGE_SHIFT);
|
||||
arena_chunk_map_t *mapelm = &chunk->map[pageind];
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_dalloc_bin(arena, chunk, tcache, mapelm);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
} else
|
||||
idalloc(tcache);
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_thread_cleanup(void *arg)
|
||||
{
|
||||
tcache_t *tcache = (tcache_t *)arg;
|
||||
|
||||
assert(tcache == tcache_tls);
|
||||
if (tcache != NULL) {
|
||||
assert(tcache != (void *)(uintptr_t)1);
|
||||
tcache_destroy(tcache);
|
||||
tcache_tls = (void *)(uintptr_t)1;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
void
|
||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Merge and reset tcache stats. */
|
||||
for (i = 0; i < mbin0; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
tcache_bin_t *tbin = tcache->tbins[i];
|
||||
if (tbin != NULL) {
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
arena->stats.nmalloc_small += tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
}
|
||||
for (; i < nbins; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
tcache_bin_t *tbin = tcache->tbins[i];
|
||||
if (tbin != NULL) {
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
arena->stats.nmalloc_medium += tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
tcache_boot(void)
|
||||
{
|
||||
|
||||
if (opt_lg_tcache_nslots > 0) {
|
||||
tcache_nslots = (1U << opt_lg_tcache_nslots);
|
||||
|
||||
/* Compute incremental GC event threshold. */
|
||||
if (opt_lg_tcache_gc_sweep >= 0) {
|
||||
tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) /
|
||||
nbins) + (((1U << opt_lg_tcache_gc_sweep) % nbins ==
|
||||
0) ? 0 : 1);
|
||||
} else
|
||||
tcache_gc_incr = 0;
|
||||
} else
|
||||
tcache_nslots = 0;
|
||||
|
||||
if (tcache_nslots != 0) {
|
||||
if (pthread_key_create(&tcache_tsd, tcache_thread_cleanup) !=
|
||||
0) {
|
||||
malloc_write4("<jemalloc>",
|
||||
": Error in pthread_key_create()\n", "", "");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_TCACHE */
|
269
jemalloc/src/jemalloc_tcache.h
Normal file
269
jemalloc/src/jemalloc_tcache.h
Normal file
@ -0,0 +1,269 @@
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct tcache_bin_s tcache_bin_t;
|
||||
typedef struct tcache_s tcache_t;
|
||||
|
||||
/*
|
||||
* Default number of cache slots for each bin in the thread cache (0:
|
||||
* disabled).
|
||||
*/
|
||||
#define LG_TCACHE_NSLOTS_DEFAULT 7
|
||||
/*
|
||||
* (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation
|
||||
* events between full GC sweeps (-1: disabled). Integer rounding may cause
|
||||
* the actual number to be slightly higher, since GC is performed
|
||||
* incrementally.
|
||||
*/
|
||||
#define LG_TCACHE_GC_SWEEP_DEFAULT 13
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct tcache_bin_s {
|
||||
# ifdef JEMALLOC_STATS
|
||||
tcache_bin_stats_t tstats;
|
||||
# endif
|
||||
unsigned low_water; /* Min # cached since last GC. */
|
||||
unsigned high_water; /* Max # cached since last GC. */
|
||||
unsigned ncached; /* # of cached objects. */
|
||||
void *slots[1]; /* Dynamically sized. */
|
||||
};
|
||||
|
||||
struct tcache_s {
|
||||
# ifdef JEMALLOC_STATS
|
||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||
# endif
|
||||
arena_t *arena; /* This thread's arena. */
|
||||
unsigned ev_cnt; /* Event count since incremental GC. */
|
||||
unsigned next_gc_bin; /* Next bin to GC. */
|
||||
tcache_bin_t *tbins[1]; /* Dynamically sized. */
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern size_t opt_lg_tcache_nslots;
|
||||
extern ssize_t opt_lg_tcache_gc_sweep;
|
||||
extern bool opt_tcache_sort;
|
||||
|
||||
/* Map of thread-specific caches. */
|
||||
extern __thread tcache_t *tcache_tls
|
||||
JEMALLOC_ATTR(tls_model("initial-exec"));
|
||||
|
||||
/*
|
||||
* Number of cache slots for each bin in the thread cache, or 0 if tcache is
|
||||
* disabled.
|
||||
*/
|
||||
extern size_t tcache_nslots;
|
||||
|
||||
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
||||
extern unsigned tcache_gc_incr;
|
||||
|
||||
void tcache_bin_flush(tcache_bin_t *tbin, size_t binind, unsigned rem);
|
||||
tcache_t *tcache_create(arena_t *arena);
|
||||
void tcache_bin_destroy(tcache_t *tcache, tcache_bin_t *tbin,
|
||||
unsigned binind);
|
||||
void *tcache_alloc_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind);
|
||||
tcache_bin_t *tcache_bin_create(arena_t *arena);
|
||||
void tcache_destroy(tcache_t *tcache);
|
||||
#ifdef JEMALLOC_STATS
|
||||
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
|
||||
#endif
|
||||
void tcache_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void tcache_event(tcache_t *tcache);
|
||||
tcache_t *tcache_get(void);
|
||||
void *tcache_bin_alloc(tcache_bin_t *tbin);
|
||||
void *tcache_alloc(tcache_t *tcache, size_t size, bool zero);
|
||||
void tcache_dalloc(tcache_t *tcache, void *ptr);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
||||
JEMALLOC_INLINE tcache_t *
|
||||
tcache_get(void)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
|
||||
if (isthreaded == false || tcache_nslots == 0)
|
||||
return (NULL);
|
||||
|
||||
tcache = tcache_tls;
|
||||
if ((uintptr_t)tcache <= (uintptr_t)1) {
|
||||
if (tcache == NULL) {
|
||||
tcache = tcache_create(choose_arena());
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
} else
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
return (tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
tcache_event(tcache_t *tcache)
|
||||
{
|
||||
|
||||
if (tcache_gc_incr == 0)
|
||||
return;
|
||||
|
||||
tcache->ev_cnt++;
|
||||
assert(tcache->ev_cnt <= tcache_gc_incr);
|
||||
if (tcache->ev_cnt >= tcache_gc_incr) {
|
||||
size_t binind = tcache->next_gc_bin;
|
||||
tcache_bin_t *tbin = tcache->tbins[binind];
|
||||
|
||||
if (tbin != NULL) {
|
||||
if (tbin->high_water == 0) {
|
||||
/*
|
||||
* This bin went completely unused for an
|
||||
* entire GC cycle, so throw away the tbin.
|
||||
*/
|
||||
assert(tbin->ncached == 0);
|
||||
tcache_bin_destroy(tcache, tbin, binind);
|
||||
tcache->tbins[binind] = NULL;
|
||||
} else {
|
||||
if (tbin->low_water > 0) {
|
||||
/*
|
||||
* Flush (ceiling) half of the objects
|
||||
* below the low water mark.
|
||||
*/
|
||||
tcache_bin_flush(tbin, binind,
|
||||
tbin->ncached - (tbin->low_water >>
|
||||
1) - (tbin->low_water & 1));
|
||||
}
|
||||
tbin->low_water = tbin->ncached;
|
||||
tbin->high_water = tbin->ncached;
|
||||
}
|
||||
}
|
||||
|
||||
tcache->next_gc_bin++;
|
||||
if (tcache->next_gc_bin == nbins)
|
||||
tcache->next_gc_bin = 0;
|
||||
tcache->ev_cnt = 0;
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
tcache_bin_alloc(tcache_bin_t *tbin)
|
||||
{
|
||||
|
||||
if (tbin->ncached == 0)
|
||||
return (NULL);
|
||||
tbin->ncached--;
|
||||
if (tbin->ncached < tbin->low_water)
|
||||
tbin->low_water = tbin->ncached;
|
||||
return (tbin->slots[tbin->ncached]);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
tcache_alloc(tcache_t *tcache, size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
tcache_bin_t *tbin;
|
||||
size_t binind;
|
||||
|
||||
if (size <= small_maxclass)
|
||||
binind = small_size2bin[size];
|
||||
else {
|
||||
binind = mbin0 + ((MEDIUM_CEILING(size) - medium_min) >>
|
||||
lg_mspace);
|
||||
}
|
||||
assert(binind < nbins);
|
||||
tbin = tcache->tbins[binind];
|
||||
if (tbin == NULL) {
|
||||
tbin = tcache_bin_create(tcache->arena);
|
||||
if (tbin == NULL)
|
||||
return (NULL);
|
||||
tcache->tbins[binind] = tbin;
|
||||
}
|
||||
|
||||
ret = tcache_bin_alloc(tbin);
|
||||
if (ret == NULL) {
|
||||
ret = tcache_alloc_hard(tcache, tbin, binind);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if (zero == false) {
|
||||
#ifdef JEMALLOC_FILL
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, size);
|
||||
else if (opt_zero)
|
||||
memset(ret, 0, size);
|
||||
#endif
|
||||
} else
|
||||
memset(ret, 0, size);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
tbin->tstats.nrequests++;
|
||||
#endif
|
||||
tcache_event(tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
tcache_dalloc(tcache_t *tcache, void *ptr)
|
||||
{
|
||||
arena_t *arena;
|
||||
arena_chunk_t *chunk;
|
||||
arena_run_t *run;
|
||||
arena_bin_t *bin;
|
||||
tcache_bin_t *tbin;
|
||||
size_t pageind, binind;
|
||||
arena_chunk_map_t *mapelm;
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena = chunk->arena;
|
||||
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT);
|
||||
mapelm = &chunk->map[pageind];
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
||||
((mapelm->bits & CHUNK_MAP_PG_MASK) >> CHUNK_MAP_PG_SHIFT)) <<
|
||||
PAGE_SHIFT));
|
||||
assert(run->magic == ARENA_RUN_MAGIC);
|
||||
bin = run->bin;
|
||||
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
|
||||
sizeof(arena_bin_t);
|
||||
assert(binind < nbins);
|
||||
|
||||
#ifdef JEMALLOC_FILL
|
||||
if (opt_junk)
|
||||
memset(ptr, 0x5a, arena->bins[binind].reg_size);
|
||||
#endif
|
||||
|
||||
tbin = tcache->tbins[binind];
|
||||
if (tbin == NULL) {
|
||||
tbin = tcache_bin_create(choose_arena());
|
||||
if (tbin == NULL) {
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_dalloc_bin(arena, chunk, ptr, mapelm);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
return;
|
||||
}
|
||||
tcache->tbins[binind] = tbin;
|
||||
}
|
||||
|
||||
if (tbin->ncached == tcache_nslots)
|
||||
tcache_bin_flush(tbin, binind, (tcache_nslots >> 1));
|
||||
assert(tbin->ncached < tcache_nslots);
|
||||
tbin->slots[tbin->ncached] = ptr;
|
||||
tbin->ncached++;
|
||||
if (tbin->ncached > tbin->high_water)
|
||||
tbin->high_water = tbin->ncached;
|
||||
|
||||
tcache_event(tcache);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_TCACHE */
|
272
jemalloc/src/jemalloc_trace.c
Normal file
272
jemalloc/src/jemalloc_trace.c
Normal file
@ -0,0 +1,272 @@
|
||||
#define JEMALLOC_TRACE_C_
|
||||
#include "jemalloc_internal.h"
|
||||
#ifdef JEMALLOC_TRACE
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
bool opt_trace = false;
|
||||
|
||||
static malloc_mutex_t trace_mtx;
|
||||
static unsigned trace_next_tid = 1;
|
||||
|
||||
static unsigned __thread trace_tid
|
||||
JEMALLOC_ATTR(tls_model("initial-exec"));
|
||||
/* Used to cause trace_cleanup() to be called. */
|
||||
static pthread_key_t trace_tsd;
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static arena_t *trace_arena(const void *ptr);
|
||||
static void trace_flush(arena_t *arena);
|
||||
static void trace_write(arena_t *arena, const char *s);
|
||||
static unsigned trace_get_tid(void);
|
||||
static void trace_thread_cleanup(void *arg);
|
||||
static void malloc_trace_flush_all(void);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static arena_t *
|
||||
trace_arena(const void *ptr)
|
||||
{
|
||||
arena_t *arena;
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if ((void *)chunk == ptr)
|
||||
arena = arenas[0];
|
||||
else
|
||||
arena = chunk->arena;
|
||||
|
||||
return (arena);
|
||||
}
|
||||
|
||||
static void
|
||||
trace_flush(arena_t *arena)
|
||||
{
|
||||
ssize_t err;
|
||||
|
||||
err = write(arena->trace_fd, arena->trace_buf, arena->trace_buf_end);
|
||||
if (err == -1) {
|
||||
malloc_write4("<jemalloc>",
|
||||
": write() failed during trace flush", "\n", "");
|
||||
abort();
|
||||
}
|
||||
arena->trace_buf_end = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
trace_write(arena_t *arena, const char *s)
|
||||
{
|
||||
unsigned i, slen, n;
|
||||
|
||||
i = 0;
|
||||
slen = strlen(s);
|
||||
while (i < slen) {
|
||||
/* Flush the trace buffer if it is full. */
|
||||
if (arena->trace_buf_end == TRACE_BUF_SIZE)
|
||||
trace_flush(arena);
|
||||
|
||||
if (arena->trace_buf_end + slen <= TRACE_BUF_SIZE) {
|
||||
/* Finish writing. */
|
||||
n = slen - i;
|
||||
} else {
|
||||
/* Write as much of s as will fit. */
|
||||
n = TRACE_BUF_SIZE - arena->trace_buf_end;
|
||||
}
|
||||
memcpy(&arena->trace_buf[arena->trace_buf_end], &s[i], n);
|
||||
arena->trace_buf_end += n;
|
||||
i += n;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned
|
||||
trace_get_tid(void)
|
||||
{
|
||||
unsigned ret = trace_tid;
|
||||
|
||||
if (ret == 0) {
|
||||
malloc_mutex_lock(&trace_mtx);
|
||||
trace_tid = trace_next_tid;
|
||||
trace_next_tid++;
|
||||
malloc_mutex_unlock(&trace_mtx);
|
||||
ret = trace_tid;
|
||||
|
||||
/*
|
||||
* Set trace_tsd to non-zero so that the cleanup function will
|
||||
* be called upon thread exit.
|
||||
*/
|
||||
pthread_setspecific(trace_tsd, (void *)ret);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
malloc_trace_flush_all(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < narenas; i++) {
|
||||
if (arenas[i] != NULL) {
|
||||
malloc_mutex_lock(&arenas[i]->lock);
|
||||
trace_flush(arenas[i]);
|
||||
malloc_mutex_unlock(&arenas[i]->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
trace_malloc(const void *ptr, size_t size)
|
||||
{
|
||||
char buf[UMAX2S_BUFSIZE];
|
||||
arena_t *arena = trace_arena(ptr);
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
|
||||
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
|
||||
trace_write(arena, " m 0x");
|
||||
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
|
||||
trace_write(arena, " ");
|
||||
trace_write(arena, umax2s(size, 10, buf));
|
||||
trace_write(arena, "\n");
|
||||
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
void
|
||||
trace_calloc(const void *ptr, size_t number, size_t size)
|
||||
{
|
||||
char buf[UMAX2S_BUFSIZE];
|
||||
arena_t *arena = trace_arena(ptr);
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
|
||||
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
|
||||
trace_write(arena, " c 0x");
|
||||
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
|
||||
trace_write(arena, " ");
|
||||
trace_write(arena, umax2s(number, 10, buf));
|
||||
trace_write(arena, " ");
|
||||
trace_write(arena, umax2s(size, 10, buf));
|
||||
trace_write(arena, "\n");
|
||||
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
void
|
||||
trace_posix_memalign(const void *ptr, size_t alignment, size_t size)
|
||||
{
|
||||
char buf[UMAX2S_BUFSIZE];
|
||||
arena_t *arena = trace_arena(ptr);
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
|
||||
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
|
||||
trace_write(arena, " a 0x");
|
||||
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
|
||||
trace_write(arena, " ");
|
||||
trace_write(arena, umax2s(alignment, 10, buf));
|
||||
trace_write(arena, " ");
|
||||
trace_write(arena, umax2s(size, 10, buf));
|
||||
trace_write(arena, "\n");
|
||||
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
void
|
||||
trace_realloc(const void *ptr, const void *old_ptr, size_t size,
|
||||
size_t old_size)
|
||||
{
|
||||
char buf[UMAX2S_BUFSIZE];
|
||||
arena_t *arena = trace_arena(ptr);
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
|
||||
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
|
||||
trace_write(arena, " r 0x");
|
||||
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
|
||||
trace_write(arena, " 0x");
|
||||
trace_write(arena, umax2s((uintptr_t)old_ptr, 16, buf));
|
||||
trace_write(arena, " ");
|
||||
trace_write(arena, umax2s(size, 10, buf));
|
||||
trace_write(arena, " ");
|
||||
trace_write(arena, umax2s(old_size, 10, buf));
|
||||
trace_write(arena, "\n");
|
||||
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
void
|
||||
trace_free(const void *ptr, size_t size)
|
||||
{
|
||||
char buf[UMAX2S_BUFSIZE];
|
||||
arena_t *arena = trace_arena(ptr);
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
|
||||
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
|
||||
trace_write(arena, " f 0x");
|
||||
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
|
||||
trace_write(arena, " ");
|
||||
trace_write(arena, umax2s(isalloc(ptr), 10, buf));
|
||||
trace_write(arena, "\n");
|
||||
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
void
|
||||
trace_malloc_usable_size(size_t size, const void *ptr)
|
||||
{
|
||||
char buf[UMAX2S_BUFSIZE];
|
||||
arena_t *arena = trace_arena(ptr);
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
|
||||
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
|
||||
trace_write(arena, " s ");
|
||||
trace_write(arena, umax2s(size, 10, buf));
|
||||
trace_write(arena, " 0x");
|
||||
trace_write(arena, umax2s((uintptr_t)ptr, 16, buf));
|
||||
trace_write(arena, "\n");
|
||||
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
void
|
||||
trace_thread_exit(void)
|
||||
{
|
||||
char buf[UMAX2S_BUFSIZE];
|
||||
arena_t *arena = choose_arena();
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
|
||||
trace_write(arena, umax2s(trace_get_tid(), 10, buf));
|
||||
trace_write(arena, " x\n");
|
||||
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
trace_thread_cleanup(void *arg)
|
||||
{
|
||||
|
||||
trace_thread_exit();
|
||||
}
|
||||
|
||||
void
|
||||
trace_boot(void)
|
||||
{
|
||||
|
||||
malloc_mutex_init(&trace_mtx);
|
||||
/* Flush trace buffers at exit. */
|
||||
atexit(malloc_trace_flush_all);
|
||||
/* Receive thread exit notifications. */
|
||||
if (pthread_key_create(&trace_tsd, trace_thread_cleanup) != 0) {
|
||||
malloc_write4("<jemalloc>",
|
||||
": Error in pthread_key_create()\n", "", "");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_TRACE */
|
32
jemalloc/src/jemalloc_trace.h
Normal file
32
jemalloc/src/jemalloc_trace.h
Normal file
@ -0,0 +1,32 @@
|
||||
#ifdef JEMALLOC_TRACE
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_trace;
|
||||
|
||||
void trace_malloc(const void *ptr, size_t size);
|
||||
void trace_calloc(const void *ptr, size_t number, size_t size);
|
||||
void trace_posix_memalign(const void *ptr, size_t alignment, size_t size);
|
||||
void trace_realloc(const void *ptr, const void *old_ptr, size_t size,
|
||||
size_t old_size);
|
||||
void trace_free(const void *ptr, size_t size);
|
||||
void trace_malloc_usable_size(size_t size, const void *ptr);
|
||||
void trace_thread_exit(void);
|
||||
|
||||
void trace_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_TRACE */
|
@ -646,6 +646,33 @@ struct { \
|
||||
(a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_r_s); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* The rb_proto() macro generates function prototypes that correspond to the
|
||||
* functions generated by an equivalently parameterized call to rb_wrap().
|
||||
*/
|
||||
|
||||
#define rb_proto(a_attr, a_prefix, a_tree_type, a_type) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_tree_type *tree); \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_tree_type *tree); \
|
||||
a_attr a_type * \
|
||||
a_prefix##last(a_tree_type *tree); \
|
||||
a_attr a_type * \
|
||||
a_prefix##next(a_tree_type *tree, a_type *node); \
|
||||
a_attr a_type * \
|
||||
a_prefix##prev(a_tree_type *tree, a_type *node); \
|
||||
a_attr a_type * \
|
||||
a_prefix##search(a_tree_type *tree, a_type *key); \
|
||||
a_attr a_type * \
|
||||
a_prefix##nsearch(a_tree_type *tree, a_type *key); \
|
||||
a_attr a_type * \
|
||||
a_prefix##psearch(a_tree_type *tree, a_type *key); \
|
||||
a_attr void \
|
||||
a_prefix##insert(a_tree_type *tree, a_type *node); \
|
||||
a_attr void \
|
||||
a_prefix##remove(a_tree_type *tree, a_type *node);
|
||||
|
||||
/*
|
||||
* The rb_wrap() macro provides a convenient way to wrap functions around the
|
||||
* cpp macros. The main benefits of wrapping are that 1) repeated macro
|
||||
|
Loading…
Reference in New Issue
Block a user