Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and recyclable chunks into arena_t, so that each arena can manage huge allocations and recyclable virtual memory completely independently of other arenas. Add chunk node caching to arenas, in order to avoid contention on the base allocator. Use chunks_rtree to look up huge allocations rather than a red-black tree. Maintain a per arena unsorted list of huge allocations (which will be needed to enumerate huge allocations during arena reset). Remove the --enable-ivsalloc option, make ivsalloc() always available, and use it for size queries if --enable-debug is enabled. The only practical implications to this removal are that 1) ivsalloc() is now always available during live debugging (and the underlying radix tree is available during core-based debugging), and 2) size query validation can no longer be enabled independent of --enable-debug. Remove the stats.chunks.{current,total,high} mallctls, and replace their underlying statistics with simpler atomically updated counters used exclusively for gdump triggering. These statistics are no longer very useful because each arena manages chunks independently, and per arena statistics provide similar information. Simplify chunk synchronization code, now that base chunk allocation cannot cause recursive lock acquisition.
This commit is contained in:
parent
f30e261c5b
commit
cbf3a6d703
6
INSTALL
6
INSTALL
@ -92,7 +92,6 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
--enable-debug
|
--enable-debug
|
||||||
Enable assertions and validation code. This incurs a substantial
|
Enable assertions and validation code. This incurs a substantial
|
||||||
performance hit, but is very useful during application development.
|
performance hit, but is very useful during application development.
|
||||||
Implies --enable-ivsalloc.
|
|
||||||
|
|
||||||
--enable-code-coverage
|
--enable-code-coverage
|
||||||
Enable code coverage support, for use during jemalloc test development.
|
Enable code coverage support, for use during jemalloc test development.
|
||||||
@ -107,11 +106,6 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
there are interactions between the various coverage targets, so it is
|
there are interactions between the various coverage targets, so it is
|
||||||
usually advisable to run 'make clean' between repeated code coverage runs.
|
usually advisable to run 'make clean' between repeated code coverage runs.
|
||||||
|
|
||||||
--enable-ivsalloc
|
|
||||||
Enable validation code, which verifies that pointers reside within
|
|
||||||
jemalloc-owned chunks before dereferencing them. This incurs a substantial
|
|
||||||
performance hit.
|
|
||||||
|
|
||||||
--disable-stats
|
--disable-stats
|
||||||
Disable statistics gathering functionality. See the "opt.stats_print"
|
Disable statistics gathering functionality. See the "opt.stats_print"
|
||||||
option documentation for usage details.
|
option documentation for usage details.
|
||||||
|
22
configure.ac
22
configure.ac
@ -625,7 +625,7 @@ fi
|
|||||||
|
|
||||||
dnl Do not compile with debugging by default.
|
dnl Do not compile with debugging by default.
|
||||||
AC_ARG_ENABLE([debug],
|
AC_ARG_ENABLE([debug],
|
||||||
[AS_HELP_STRING([--enable-debug], [Build debugging code (implies --enable-ivsalloc)])],
|
[AS_HELP_STRING([--enable-debug], [Build debugging code])],
|
||||||
[if test "x$enable_debug" = "xno" ; then
|
[if test "x$enable_debug" = "xno" ; then
|
||||||
enable_debug="0"
|
enable_debug="0"
|
||||||
else
|
else
|
||||||
@ -634,27 +634,8 @@ fi
|
|||||||
],
|
],
|
||||||
[enable_debug="0"]
|
[enable_debug="0"]
|
||||||
)
|
)
|
||||||
if test "x$enable_debug" = "x1" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_DEBUG], [ ])
|
|
||||||
enable_ivsalloc="1"
|
|
||||||
fi
|
|
||||||
AC_SUBST([enable_debug])
|
AC_SUBST([enable_debug])
|
||||||
|
|
||||||
dnl Do not validate pointers by default.
|
|
||||||
AC_ARG_ENABLE([ivsalloc],
|
|
||||||
[AS_HELP_STRING([--enable-ivsalloc], [Validate pointers passed through the public API])],
|
|
||||||
[if test "x$enable_ivsalloc" = "xno" ; then
|
|
||||||
enable_ivsalloc="0"
|
|
||||||
else
|
|
||||||
enable_ivsalloc="1"
|
|
||||||
fi
|
|
||||||
],
|
|
||||||
[enable_ivsalloc="0"]
|
|
||||||
)
|
|
||||||
if test "x$enable_ivsalloc" = "x1" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
|
|
||||||
fi
|
|
||||||
|
|
||||||
dnl Only optimize if not debugging.
|
dnl Only optimize if not debugging.
|
||||||
if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then
|
if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then
|
||||||
dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS.
|
dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS.
|
||||||
@ -1401,7 +1382,6 @@ if test "x${enable_zone_allocator}" = "x1" ; then
|
|||||||
if test "x${abi}" != "xmacho"; then
|
if test "x${abi}" != "xmacho"; then
|
||||||
AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
|
AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
|
||||||
fi
|
fi
|
||||||
AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
|
|
||||||
AC_DEFINE([JEMALLOC_ZONE], [ ])
|
AC_DEFINE([JEMALLOC_ZONE], [ ])
|
||||||
|
|
||||||
dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6
|
dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6
|
||||||
|
@ -1847,7 +1847,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
equal to <link
|
equal to <link
|
||||||
linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
|
linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
|
||||||
This does not include <link linkend="stats.arenas.i.pdirty">
|
This does not include <link linkend="stats.arenas.i.pdirty">
|
||||||
<mallctl>stats.arenas.<i>.pdirty</mallctl></link> and pages
|
<mallctl>stats.arenas.<i>.pdirty</mallctl></link>, nor pages
|
||||||
entirely devoted to allocator metadata.</para></listitem>
|
entirely devoted to allocator metadata.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
@ -1880,39 +1880,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
does not include inactive chunks.</para></listitem>
|
does not include inactive chunks.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.chunks.current">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.chunks.current</mallctl>
|
|
||||||
(<type>size_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Total number of chunks actively mapped on behalf of the
|
|
||||||
application. This does not include inactive chunks.
|
|
||||||
</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.chunks.total">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.chunks.total</mallctl>
|
|
||||||
(<type>uint64_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Cumulative number of chunks allocated.</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.chunks.high">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.chunks.high</mallctl>
|
|
||||||
(<type>size_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Maximum number of active chunks at any time thus far.
|
|
||||||
</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.dss">
|
<varlistentry id="stats.arenas.i.dss">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>stats.arenas.<i>.dss</mallctl>
|
<mallctl>stats.arenas.<i>.dss</mallctl>
|
||||||
|
@ -151,8 +151,12 @@ typedef ql_head(arena_chunk_map_misc_t) arena_chunk_miscelms_t;
|
|||||||
|
|
||||||
/* Arena chunk header. */
|
/* Arena chunk header. */
|
||||||
struct arena_chunk_s {
|
struct arena_chunk_s {
|
||||||
/* Arena that owns the chunk. */
|
/*
|
||||||
arena_t *arena;
|
* The arena that owns the chunk is node.arena. This field as a whole
|
||||||
|
* is used by chunks_rtree to support both ivsalloc() and core-based
|
||||||
|
* debugging.
|
||||||
|
*/
|
||||||
|
extent_node_t node;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map of pages within chunk that keeps track of free/large/small. The
|
* Map of pages within chunk that keeps track of free/large/small. The
|
||||||
@ -313,6 +317,27 @@ struct arena_s {
|
|||||||
/* List of dirty runs this arena manages. */
|
/* List of dirty runs this arena manages. */
|
||||||
arena_chunk_miscelms_t runs_dirty;
|
arena_chunk_miscelms_t runs_dirty;
|
||||||
|
|
||||||
|
/* Extant huge allocations. */
|
||||||
|
ql_head(extent_node_t) huge;
|
||||||
|
/* Synchronizes all huge allocation/update/deallocation. */
|
||||||
|
malloc_mutex_t huge_mtx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Trees of chunks that were previously allocated (trees differ only in
|
||||||
|
* node ordering). These are used when allocating chunks, in an attempt
|
||||||
|
* to re-use address space. Depending on function, different tree
|
||||||
|
* orderings are needed, which is why there are two trees with the same
|
||||||
|
* contents.
|
||||||
|
*/
|
||||||
|
extent_tree_t chunks_szad_mmap;
|
||||||
|
extent_tree_t chunks_ad_mmap;
|
||||||
|
extent_tree_t chunks_szad_dss;
|
||||||
|
extent_tree_t chunks_ad_dss;
|
||||||
|
malloc_mutex_t chunks_mtx;
|
||||||
|
/* Cache of nodes that were allocated via base_alloc(). */
|
||||||
|
ql_head(extent_node_t) node_cache;
|
||||||
|
malloc_mutex_t node_cache_mtx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* User-configurable chunk allocation and deallocation functions.
|
* User-configurable chunk allocation and deallocation functions.
|
||||||
*/
|
*/
|
||||||
@ -338,6 +363,8 @@ extern size_t arena_maxclass; /* Max size class for arenas. */
|
|||||||
extern unsigned nlclasses; /* Number of large size classes. */
|
extern unsigned nlclasses; /* Number of large size classes. */
|
||||||
extern unsigned nhclasses; /* Number of huge size classes. */
|
extern unsigned nhclasses; /* Number of huge size classes. */
|
||||||
|
|
||||||
|
extent_node_t *arena_node_alloc(arena_t *arena);
|
||||||
|
void arena_node_dalloc(arena_t *arena, extent_node_t *node);
|
||||||
void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
||||||
bool *zero);
|
bool *zero);
|
||||||
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
|
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
|
||||||
@ -453,8 +480,7 @@ void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
|||||||
tcache_t *tcache);
|
tcache_t *tcache);
|
||||||
arena_t *arena_aalloc(const void *ptr);
|
arena_t *arena_aalloc(const void *ptr);
|
||||||
size_t arena_salloc(const void *ptr, bool demote);
|
size_t arena_salloc(const void *ptr, bool demote);
|
||||||
void arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr,
|
void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
|
||||||
tcache_t *tcache);
|
|
||||||
void arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
|
void arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
|
||||||
tcache_t *tcache);
|
tcache_t *tcache);
|
||||||
#endif
|
#endif
|
||||||
@ -792,7 +818,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
|||||||
assert(binind != BININD_INVALID);
|
assert(binind != BININD_INVALID);
|
||||||
assert(binind < NBINS);
|
assert(binind < NBINS);
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
arena = chunk->arena;
|
arena = chunk->node.arena;
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||||
actual_mapbits = arena_mapbits_get(chunk, pageind);
|
actual_mapbits = arena_mapbits_get(chunk, pageind);
|
||||||
assert(mapbits == actual_mapbits);
|
assert(mapbits == actual_mapbits);
|
||||||
@ -980,7 +1006,7 @@ arena_aalloc(const void *ptr)
|
|||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
return (chunk->arena);
|
return (chunk->node.arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return the size of the allocation pointed to by ptr. */
|
/* Return the size of the allocation pointed to by ptr. */
|
||||||
@ -1024,11 +1050,18 @@ arena_salloc(const void *ptr, bool demote)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, tcache_t *tcache)
|
arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
||||||
{
|
{
|
||||||
|
arena_chunk_t *chunk;
|
||||||
size_t pageind, mapbits;
|
size_t pageind, mapbits;
|
||||||
|
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
|
if (unlikely(chunk == ptr)) {
|
||||||
|
huge_dalloc(tsd, ptr, tcache);
|
||||||
|
return;
|
||||||
|
}
|
||||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||||
|
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||||
@ -1040,8 +1073,10 @@ arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, tcache_t *tcache)
|
|||||||
index_t binind = arena_ptr_small_binind_get(ptr,
|
index_t binind = arena_ptr_small_binind_get(ptr,
|
||||||
mapbits);
|
mapbits);
|
||||||
tcache_dalloc_small(tsd, tcache, ptr, binind);
|
tcache_dalloc_small(tsd, tcache, ptr, binind);
|
||||||
} else
|
} else {
|
||||||
arena_dalloc_small(chunk->arena, chunk, ptr, pageind);
|
arena_dalloc_small(chunk->node.arena, chunk, ptr,
|
||||||
|
pageind);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
size_t size = arena_mapbits_large_size_get(chunk, pageind);
|
size_t size = arena_mapbits_large_size_get(chunk, pageind);
|
||||||
|
|
||||||
@ -1050,7 +1085,7 @@ arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, tcache_t *tcache)
|
|||||||
if (likely(tcache != NULL) && size <= tcache_maxclass)
|
if (likely(tcache != NULL) && size <= tcache_maxclass)
|
||||||
tcache_dalloc_large(tsd, tcache, ptr, size);
|
tcache_dalloc_large(tsd, tcache, ptr, size);
|
||||||
else
|
else
|
||||||
arena_dalloc_large(chunk->arena, chunk, ptr);
|
arena_dalloc_large(chunk->node.arena, chunk, ptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1081,7 +1116,8 @@ arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
|
|||||||
} else {
|
} else {
|
||||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
|
||||||
LG_PAGE;
|
LG_PAGE;
|
||||||
arena_dalloc_small(chunk->arena, chunk, ptr, pageind);
|
arena_dalloc_small(chunk->node.arena, chunk, ptr,
|
||||||
|
pageind);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
||||||
@ -1089,7 +1125,7 @@ arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
|
|||||||
if (likely(tcache != NULL) && size <= tcache_maxclass)
|
if (likely(tcache != NULL) && size <= tcache_maxclass)
|
||||||
tcache_dalloc_large(tsd, tcache, ptr, size);
|
tcache_dalloc_large(tsd, tcache, ptr, size);
|
||||||
else
|
else
|
||||||
arena_dalloc_large(chunk->arena, chunk, ptr);
|
arena_dalloc_large(chunk->node.arena, chunk, ptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||||
|
@ -52,7 +52,7 @@ void atomic_write_uint32(uint32_t *p, uint32_t x);
|
|||||||
void *atomic_add_p(void **p, void *x);
|
void *atomic_add_p(void **p, void *x);
|
||||||
void *atomic_sub_p(void **p, void *x);
|
void *atomic_sub_p(void **p, void *x);
|
||||||
bool atomic_cas_p(void **p, void *c, void *s);
|
bool atomic_cas_p(void **p, void *c, void *s);
|
||||||
void atomic_write_p(void **p, void *x);
|
void atomic_write_p(void **p, const void *x);
|
||||||
size_t atomic_add_z(size_t *p, size_t x);
|
size_t atomic_add_z(size_t *p, size_t x);
|
||||||
size_t atomic_sub_z(size_t *p, size_t x);
|
size_t atomic_sub_z(size_t *p, size_t x);
|
||||||
bool atomic_cas_z(size_t *p, size_t c, size_t s);
|
bool atomic_cas_z(size_t *p, size_t c, size_t s);
|
||||||
@ -538,7 +538,7 @@ atomic_cas_p(void **p, void *c, void *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
atomic_write_p(void **p, void *x)
|
atomic_write_p(void **p, const void *x)
|
||||||
{
|
{
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3)
|
#if (LG_SIZEOF_PTR == 3)
|
||||||
|
@ -10,8 +10,6 @@
|
|||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
void *base_alloc(size_t size);
|
void *base_alloc(size_t size);
|
||||||
extent_node_t *base_node_alloc(void);
|
|
||||||
void base_node_dalloc(extent_node_t *node);
|
|
||||||
size_t base_allocated_get(void);
|
size_t base_allocated_get(void);
|
||||||
bool base_boot(void);
|
bool base_boot(void);
|
||||||
void base_prefork(void);
|
void base_prefork(void);
|
||||||
|
@ -30,24 +30,21 @@
|
|||||||
extern size_t opt_lg_chunk;
|
extern size_t opt_lg_chunk;
|
||||||
extern const char *opt_dss;
|
extern const char *opt_dss;
|
||||||
|
|
||||||
/* Protects stats_chunks; currently not used for any other purpose. */
|
|
||||||
extern malloc_mutex_t chunks_mtx;
|
|
||||||
/* Chunk statistics. */
|
|
||||||
extern chunk_stats_t stats_chunks;
|
|
||||||
|
|
||||||
extern rtree_t chunks_rtree;
|
extern rtree_t chunks_rtree;
|
||||||
|
|
||||||
extern size_t chunksize;
|
extern size_t chunksize;
|
||||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
extern size_t chunksize_mask; /* (chunksize - 1). */
|
||||||
extern size_t chunk_npages;
|
extern size_t chunk_npages;
|
||||||
|
|
||||||
|
bool chunk_register(const void *chunk, const extent_node_t *node);
|
||||||
|
void chunk_deregister(const void *chunk, const extent_node_t *node);
|
||||||
void *chunk_alloc_base(size_t size);
|
void *chunk_alloc_base(size_t size);
|
||||||
void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
|
void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
|
||||||
chunk_dalloc_t *chunk_dalloc, unsigned arena_ind, void *new_addr,
|
chunk_dalloc_t *chunk_dalloc, unsigned arena_ind, void *new_addr,
|
||||||
size_t size, size_t alignment, bool *zero);
|
size_t size, size_t alignment, bool *zero);
|
||||||
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
|
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, unsigned arena_ind);
|
bool *zero, unsigned arena_ind);
|
||||||
void chunk_unmap(void *chunk, size_t size);
|
void chunk_unmap(arena_t *arena, void *chunk, size_t size);
|
||||||
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
|
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
void chunk_prefork(void);
|
void chunk_prefork(void);
|
||||||
@ -58,6 +55,19 @@ void chunk_postfork_child(void);
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
|
extent_node_t *chunk_lookup(const void *chunk);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
|
||||||
|
JEMALLOC_INLINE extent_node_t *
|
||||||
|
chunk_lookup(const void *chunk)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (rtree_get(&chunks_rtree, (uintptr_t)chunk));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
|
@ -23,8 +23,8 @@ extern const char *dss_prec_names[];
|
|||||||
|
|
||||||
dss_prec_t chunk_dss_prec_get(void);
|
dss_prec_t chunk_dss_prec_get(void);
|
||||||
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
||||||
void *chunk_alloc_dss(void *new_addr, size_t size, size_t alignment,
|
void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
|
||||||
bool *zero);
|
size_t alignment, bool *zero);
|
||||||
bool chunk_in_dss(void *chunk);
|
bool chunk_in_dss(void *chunk);
|
||||||
bool chunk_dss_boot(void);
|
bool chunk_dss_boot(void);
|
||||||
void chunk_dss_prefork(void);
|
void chunk_dss_prefork(void);
|
||||||
|
@ -54,11 +54,6 @@ struct ctl_stats_s {
|
|||||||
size_t active;
|
size_t active;
|
||||||
size_t metadata;
|
size_t metadata;
|
||||||
size_t mapped;
|
size_t mapped;
|
||||||
struct {
|
|
||||||
size_t current; /* stats_chunks.curchunks */
|
|
||||||
uint64_t total; /* stats_chunks.nchunks */
|
|
||||||
size_t high; /* stats_chunks.highchunks */
|
|
||||||
} chunks;
|
|
||||||
unsigned narenas;
|
unsigned narenas;
|
||||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
||||||
};
|
};
|
||||||
|
@ -9,21 +9,17 @@ typedef struct extent_node_s extent_node_t;
|
|||||||
|
|
||||||
/* Tree of extents. */
|
/* Tree of extents. */
|
||||||
struct extent_node_s {
|
struct extent_node_s {
|
||||||
/* Linkage for the size/address-ordered tree. */
|
/* Arena from which this extent came, if any. */
|
||||||
rb_node(extent_node_t) link_szad;
|
arena_t *arena;
|
||||||
|
|
||||||
/* Linkage for the address-ordered tree. */
|
|
||||||
rb_node(extent_node_t) link_ad;
|
|
||||||
|
|
||||||
/* Pointer to the extent that this tree node is responsible for. */
|
/* Pointer to the extent that this tree node is responsible for. */
|
||||||
void *addr;
|
void *addr;
|
||||||
|
|
||||||
/* Total region size. */
|
/*
|
||||||
|
* Total region size, or 0 if this node corresponds to an arena chunk.
|
||||||
|
*/
|
||||||
size_t size;
|
size_t size;
|
||||||
|
|
||||||
/* Arena from which this extent came, if any. */
|
|
||||||
arena_t *arena;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 'prof_tctx' and 'zeroed' are never needed at the same time, so
|
* 'prof_tctx' and 'zeroed' are never needed at the same time, so
|
||||||
* overlay them in order to fit extent_node_t in one cache line.
|
* overlay them in order to fit extent_node_t in one cache line.
|
||||||
@ -35,6 +31,17 @@ struct extent_node_s {
|
|||||||
/* True if zero-filled; used by chunk recycling code. */
|
/* True if zero-filled; used by chunk recycling code. */
|
||||||
bool zeroed;
|
bool zeroed;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
union {
|
||||||
|
/* Linkage for the size/address-ordered tree. */
|
||||||
|
rb_node(extent_node_t) link_szad;
|
||||||
|
|
||||||
|
/* Linkage for huge allocations and cached chunks nodes. */
|
||||||
|
ql_elm(extent_node_t) link_ql;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Linkage for the address-ordered tree. */
|
||||||
|
rb_node(extent_node_t) link_ad;
|
||||||
};
|
};
|
||||||
typedef rb_tree(extent_node_t) extent_tree_t;
|
typedef rb_tree(extent_node_t) extent_tree_t;
|
||||||
|
|
||||||
|
@ -27,10 +27,6 @@ arena_t *huge_aalloc(const void *ptr);
|
|||||||
size_t huge_salloc(const void *ptr);
|
size_t huge_salloc(const void *ptr);
|
||||||
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
|
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
|
||||||
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
||||||
bool huge_boot(void);
|
|
||||||
void huge_prefork(void);
|
|
||||||
void huge_postfork_parent(void);
|
|
||||||
void huge_postfork_child(void);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -119,13 +119,6 @@ static const bool config_xmalloc =
|
|||||||
false
|
false
|
||||||
#endif
|
#endif
|
||||||
;
|
;
|
||||||
static const bool config_ivsalloc =
|
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
|
||||||
true
|
|
||||||
#else
|
|
||||||
false
|
|
||||||
#endif
|
|
||||||
;
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_C11ATOMICS
|
#ifdef JEMALLOC_C11ATOMICS
|
||||||
#include <stdatomic.h>
|
#include <stdatomic.h>
|
||||||
@ -352,9 +345,9 @@ typedef unsigned index_t;
|
|||||||
#include "jemalloc/internal/arena.h"
|
#include "jemalloc/internal/arena.h"
|
||||||
#include "jemalloc/internal/bitmap.h"
|
#include "jemalloc/internal/bitmap.h"
|
||||||
#include "jemalloc/internal/base.h"
|
#include "jemalloc/internal/base.h"
|
||||||
|
#include "jemalloc/internal/rtree.h"
|
||||||
#include "jemalloc/internal/chunk.h"
|
#include "jemalloc/internal/chunk.h"
|
||||||
#include "jemalloc/internal/huge.h"
|
#include "jemalloc/internal/huge.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
|
||||||
#include "jemalloc/internal/tcache.h"
|
#include "jemalloc/internal/tcache.h"
|
||||||
#include "jemalloc/internal/hash.h"
|
#include "jemalloc/internal/hash.h"
|
||||||
#include "jemalloc/internal/quarantine.h"
|
#include "jemalloc/internal/quarantine.h"
|
||||||
@ -378,9 +371,9 @@ typedef unsigned index_t;
|
|||||||
#include "jemalloc/internal/extent.h"
|
#include "jemalloc/internal/extent.h"
|
||||||
#include "jemalloc/internal/arena.h"
|
#include "jemalloc/internal/arena.h"
|
||||||
#include "jemalloc/internal/base.h"
|
#include "jemalloc/internal/base.h"
|
||||||
|
#include "jemalloc/internal/rtree.h"
|
||||||
#include "jemalloc/internal/chunk.h"
|
#include "jemalloc/internal/chunk.h"
|
||||||
#include "jemalloc/internal/huge.h"
|
#include "jemalloc/internal/huge.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
|
||||||
#include "jemalloc/internal/tcache.h"
|
#include "jemalloc/internal/tcache.h"
|
||||||
#include "jemalloc/internal/hash.h"
|
#include "jemalloc/internal/hash.h"
|
||||||
#include "jemalloc/internal/quarantine.h"
|
#include "jemalloc/internal/quarantine.h"
|
||||||
@ -457,9 +450,9 @@ void jemalloc_postfork_child(void);
|
|||||||
#include "jemalloc/internal/extent.h"
|
#include "jemalloc/internal/extent.h"
|
||||||
#include "jemalloc/internal/arena.h"
|
#include "jemalloc/internal/arena.h"
|
||||||
#include "jemalloc/internal/base.h"
|
#include "jemalloc/internal/base.h"
|
||||||
|
#include "jemalloc/internal/rtree.h"
|
||||||
#include "jemalloc/internal/chunk.h"
|
#include "jemalloc/internal/chunk.h"
|
||||||
#include "jemalloc/internal/huge.h"
|
#include "jemalloc/internal/huge.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
|
||||||
#include "jemalloc/internal/tcache.h"
|
#include "jemalloc/internal/tcache.h"
|
||||||
#include "jemalloc/internal/hash.h"
|
#include "jemalloc/internal/hash.h"
|
||||||
#include "jemalloc/internal/quarantine.h"
|
#include "jemalloc/internal/quarantine.h"
|
||||||
@ -483,6 +476,7 @@ void jemalloc_postfork_child(void);
|
|||||||
#include "jemalloc/internal/mb.h"
|
#include "jemalloc/internal/mb.h"
|
||||||
#include "jemalloc/internal/extent.h"
|
#include "jemalloc/internal/extent.h"
|
||||||
#include "jemalloc/internal/base.h"
|
#include "jemalloc/internal/base.h"
|
||||||
|
#include "jemalloc/internal/rtree.h"
|
||||||
#include "jemalloc/internal/chunk.h"
|
#include "jemalloc/internal/chunk.h"
|
||||||
#include "jemalloc/internal/huge.h"
|
#include "jemalloc/internal/huge.h"
|
||||||
|
|
||||||
@ -777,7 +771,6 @@ arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "jemalloc/internal/bitmap.h"
|
#include "jemalloc/internal/bitmap.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
|
||||||
/*
|
/*
|
||||||
* Include portions of arena.h interleaved with tcache.h in order to resolve
|
* Include portions of arena.h interleaved with tcache.h in order to resolve
|
||||||
* circular dependencies.
|
* circular dependencies.
|
||||||
@ -966,10 +959,14 @@ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
|
|||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
ivsalloc(const void *ptr, bool demote)
|
ivsalloc(const void *ptr, bool demote)
|
||||||
{
|
{
|
||||||
|
extent_node_t *node;
|
||||||
|
|
||||||
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
|
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
|
||||||
if (rtree_get(&chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
|
node = chunk_lookup(CHUNK_ADDR2BASE(ptr));
|
||||||
|
if (node == NULL)
|
||||||
return (0);
|
return (0);
|
||||||
|
/* Only arena chunks should be looked up via interior pointers. */
|
||||||
|
assert(node->addr == ptr || node->size == 0);
|
||||||
|
|
||||||
return (isalloc(ptr, demote));
|
return (isalloc(ptr, demote));
|
||||||
}
|
}
|
||||||
@ -999,7 +996,6 @@ p2rz(const void *ptr)
|
|||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
|
idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
|
||||||
|
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
if (config_stats && is_metadata) {
|
if (config_stats && is_metadata) {
|
||||||
@ -1007,11 +1003,7 @@ idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
|
|||||||
config_prof));
|
config_prof));
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
arena_dalloc(tsd, ptr, tcache);
|
||||||
if (likely(chunk != ptr))
|
|
||||||
arena_dalloc(tsd, chunk, ptr, tcache);
|
|
||||||
else
|
|
||||||
huge_dalloc(tsd, ptr, tcache);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
@ -186,12 +186,6 @@
|
|||||||
#undef JEMALLOC_INTERNAL_FFSL
|
#undef JEMALLOC_INTERNAL_FFSL
|
||||||
#undef JEMALLOC_INTERNAL_FFS
|
#undef JEMALLOC_INTERNAL_FFS
|
||||||
|
|
||||||
/*
|
|
||||||
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
|
|
||||||
* within jemalloc-owned chunks before dereferencing them.
|
|
||||||
*/
|
|
||||||
#undef JEMALLOC_IVSALLOC
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||||
*/
|
*/
|
||||||
|
@ -60,6 +60,8 @@ arena_miscelm_to_pageind
|
|||||||
arena_miscelm_to_rpages
|
arena_miscelm_to_rpages
|
||||||
arena_nbound
|
arena_nbound
|
||||||
arena_new
|
arena_new
|
||||||
|
arena_node_alloc
|
||||||
|
arena_node_dalloc
|
||||||
arena_palloc
|
arena_palloc
|
||||||
arena_postfork_child
|
arena_postfork_child
|
||||||
arena_postfork_parent
|
arena_postfork_parent
|
||||||
@ -103,8 +105,6 @@ atomic_sub_z
|
|||||||
base_alloc
|
base_alloc
|
||||||
base_allocated_get
|
base_allocated_get
|
||||||
base_boot
|
base_boot
|
||||||
base_node_alloc
|
|
||||||
base_node_dalloc
|
|
||||||
base_postfork_child
|
base_postfork_child
|
||||||
base_postfork_parent
|
base_postfork_parent
|
||||||
base_prefork
|
base_prefork
|
||||||
@ -130,6 +130,7 @@ chunk_alloc_mmap
|
|||||||
chunk_boot
|
chunk_boot
|
||||||
chunk_dalloc_default
|
chunk_dalloc_default
|
||||||
chunk_dalloc_mmap
|
chunk_dalloc_mmap
|
||||||
|
chunk_deregister
|
||||||
chunk_dss_boot
|
chunk_dss_boot
|
||||||
chunk_dss_postfork_child
|
chunk_dss_postfork_child
|
||||||
chunk_dss_postfork_parent
|
chunk_dss_postfork_parent
|
||||||
@ -137,12 +138,13 @@ chunk_dss_prec_get
|
|||||||
chunk_dss_prec_set
|
chunk_dss_prec_set
|
||||||
chunk_dss_prefork
|
chunk_dss_prefork
|
||||||
chunk_in_dss
|
chunk_in_dss
|
||||||
|
chunk_lookup
|
||||||
chunk_npages
|
chunk_npages
|
||||||
chunk_postfork_child
|
chunk_postfork_child
|
||||||
chunk_postfork_parent
|
chunk_postfork_parent
|
||||||
chunk_prefork
|
chunk_prefork
|
||||||
|
chunk_register
|
||||||
chunk_unmap
|
chunk_unmap
|
||||||
chunks_mtx
|
|
||||||
chunks_rtree
|
chunks_rtree
|
||||||
chunksize
|
chunksize
|
||||||
chunksize_mask
|
chunksize_mask
|
||||||
@ -218,16 +220,12 @@ hash_x86_128
|
|||||||
hash_x86_32
|
hash_x86_32
|
||||||
huge_aalloc
|
huge_aalloc
|
||||||
huge_allocated
|
huge_allocated
|
||||||
huge_boot
|
|
||||||
huge_dalloc
|
huge_dalloc
|
||||||
huge_dalloc_junk
|
huge_dalloc_junk
|
||||||
huge_malloc
|
huge_malloc
|
||||||
huge_ndalloc
|
huge_ndalloc
|
||||||
huge_nmalloc
|
huge_nmalloc
|
||||||
huge_palloc
|
huge_palloc
|
||||||
huge_postfork_child
|
|
||||||
huge_postfork_parent
|
|
||||||
huge_prefork
|
|
||||||
huge_prof_tctx_get
|
huge_prof_tctx_get
|
||||||
huge_prof_tctx_set
|
huge_prof_tctx_set
|
||||||
huge_ralloc
|
huge_ralloc
|
||||||
|
@ -37,7 +37,7 @@ typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
|
|||||||
struct rtree_node_elm_s {
|
struct rtree_node_elm_s {
|
||||||
union {
|
union {
|
||||||
rtree_node_elm_t *child;
|
rtree_node_elm_t *child;
|
||||||
void *val;
|
extent_node_t *val;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -110,13 +110,14 @@ bool rtree_node_valid(rtree_node_elm_t *node);
|
|||||||
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
|
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
|
||||||
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
||||||
unsigned level);
|
unsigned level);
|
||||||
void *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm);
|
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm);
|
||||||
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, void *val);
|
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
|
||||||
|
const extent_node_t *val);
|
||||||
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
|
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
|
||||||
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
|
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
|
||||||
|
|
||||||
void *rtree_get(rtree_t *rtree, uintptr_t key);
|
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key);
|
||||||
bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
|
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
||||||
@ -173,18 +174,18 @@ rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
|
|||||||
return (child);
|
return (child);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
JEMALLOC_INLINE extent_node_t *
|
||||||
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm)
|
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (atomic_read_p(&elm->val));
|
return (atomic_read_p((void **)&elm->val));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, void *val)
|
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
|
||||||
{
|
{
|
||||||
|
|
||||||
atomic_write_p(&elm->val, val);
|
atomic_write_p((void **)&elm->val, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE rtree_node_elm_t *
|
JEMALLOC_INLINE rtree_node_elm_t *
|
||||||
@ -210,7 +211,7 @@ rtree_subtree_read(rtree_t *rtree, unsigned level)
|
|||||||
return (subtree);
|
return (subtree);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
JEMALLOC_INLINE extent_node_t *
|
||||||
rtree_get(rtree_t *rtree, uintptr_t key)
|
rtree_get(rtree_t *rtree, uintptr_t key)
|
||||||
{
|
{
|
||||||
uintptr_t subkey;
|
uintptr_t subkey;
|
||||||
@ -238,7 +239,7 @@ rtree_get(rtree_t *rtree, uintptr_t key)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
rtree_set(rtree_t *rtree, uintptr_t key, void *val)
|
rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
|
||||||
{
|
{
|
||||||
uintptr_t subkey;
|
uintptr_t subkey;
|
||||||
unsigned i, start_level;
|
unsigned i, start_level;
|
||||||
|
@ -135,21 +135,6 @@ struct arena_stats_s {
|
|||||||
malloc_huge_stats_t *hstats;
|
malloc_huge_stats_t *hstats;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct chunk_stats_s {
|
|
||||||
/* Number of chunks that were allocated. */
|
|
||||||
uint64_t nchunks;
|
|
||||||
|
|
||||||
/* High-water mark for number of chunks allocated. */
|
|
||||||
size_t highchunks;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Current number of chunks allocated. This value isn't maintained for
|
|
||||||
* any other purpose, so keep track of it in order to be able to set
|
|
||||||
* highchunks.
|
|
||||||
*/
|
|
||||||
size_t curchunks;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
74
src/arena.c
74
src/arena.c
@ -20,6 +20,7 @@ unsigned nhclasses; /* Number of huge size classes. */
|
|||||||
* definition.
|
* definition.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static void arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk);
|
||||||
static void arena_purge(arena_t *arena, bool all);
|
static void arena_purge(arena_t *arena, bool all);
|
||||||
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
|
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
|
||||||
bool cleaned);
|
bool cleaned);
|
||||||
@ -392,8 +393,7 @@ arena_chunk_init_spare(arena_t *arena)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static arena_chunk_t *
|
||||||
arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
|
arena_chunk_alloc_internal(arena_t *arena, bool *zero)
|
||||||
bool *zero)
|
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
chunk_alloc_t *chunk_alloc;
|
chunk_alloc_t *chunk_alloc;
|
||||||
@ -403,7 +403,16 @@ arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
|
|||||||
chunk_dalloc = arena->chunk_dalloc;
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
|
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
|
||||||
arena->ind, NULL, size, alignment, zero);
|
arena->ind, NULL, chunksize, chunksize, zero);
|
||||||
|
if (chunk != NULL) {
|
||||||
|
chunk->node.arena = arena;
|
||||||
|
chunk->node.addr = chunk;
|
||||||
|
chunk->node.size = 0; /* Indicates this is an arena chunk. */
|
||||||
|
if (chunk_register(chunk, &chunk->node)) {
|
||||||
|
chunk_dalloc((void *)chunk, chunksize, arena->ind);
|
||||||
|
chunk = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
if (config_stats && chunk != NULL) {
|
if (config_stats && chunk != NULL) {
|
||||||
arena->stats.mapped += chunksize;
|
arena->stats.mapped += chunksize;
|
||||||
@ -423,12 +432,10 @@ arena_chunk_init_hard(arena_t *arena)
|
|||||||
assert(arena->spare == NULL);
|
assert(arena->spare == NULL);
|
||||||
|
|
||||||
zero = false;
|
zero = false;
|
||||||
chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
|
chunk = arena_chunk_alloc_internal(arena, &zero);
|
||||||
if (chunk == NULL)
|
if (chunk == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
chunk->arena = arena;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the map to contain one maximal free untouched run. Mark
|
* Initialize the map to contain one maximal free untouched run. Mark
|
||||||
* the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
|
* the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
|
||||||
@ -514,6 +521,7 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
}
|
}
|
||||||
chunk_dalloc = arena->chunk_dalloc;
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
chunk_deregister(spare, &spare->node);
|
||||||
chunk_dalloc((void *)spare, chunksize, arena->ind);
|
chunk_dalloc((void *)spare, chunksize, arena->ind);
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -593,6 +601,32 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
|
|||||||
arena_huge_malloc_stats_update_undo(arena, usize);
|
arena_huge_malloc_stats_update_undo(arena, usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extent_node_t *
|
||||||
|
arena_node_alloc(arena_t *arena)
|
||||||
|
{
|
||||||
|
extent_node_t *node;
|
||||||
|
|
||||||
|
malloc_mutex_lock(&arena->node_cache_mtx);
|
||||||
|
node = ql_last(&arena->node_cache, link_ql);
|
||||||
|
if (node == NULL) {
|
||||||
|
malloc_mutex_unlock(&arena->node_cache_mtx);
|
||||||
|
return (base_alloc(sizeof(extent_node_t)));
|
||||||
|
}
|
||||||
|
ql_tail_remove(&arena->node_cache, extent_node_t, link_ql);
|
||||||
|
malloc_mutex_unlock(&arena->node_cache_mtx);
|
||||||
|
return (node);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
arena_node_dalloc(arena_t *arena, extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_lock(&arena->node_cache_mtx);
|
||||||
|
ql_elm_new(node, link_ql);
|
||||||
|
ql_tail_insert(&arena->node_cache, node, link_ql);
|
||||||
|
malloc_mutex_unlock(&arena->node_cache_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
||||||
bool *zero)
|
bool *zero)
|
||||||
@ -1782,7 +1816,7 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
|
|||||||
if (run == bin->runcur)
|
if (run == bin->runcur)
|
||||||
bin->runcur = NULL;
|
bin->runcur = NULL;
|
||||||
else {
|
else {
|
||||||
index_t binind = arena_bin_index(chunk->arena, bin);
|
index_t binind = arena_bin_index(chunk->node.arena, bin);
|
||||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||||
|
|
||||||
if (bin_info->nregs != 1) {
|
if (bin_info->nregs != 1) {
|
||||||
@ -2123,7 +2157,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
arena = chunk->arena;
|
arena = chunk->node.arena;
|
||||||
|
|
||||||
if (usize < oldsize) {
|
if (usize < oldsize) {
|
||||||
/* Fill before shrinking in order avoid a race. */
|
/* Fill before shrinking in order avoid a race. */
|
||||||
@ -2338,10 +2372,21 @@ arena_new(unsigned ind)
|
|||||||
|
|
||||||
arena->ind = ind;
|
arena->ind = ind;
|
||||||
arena->nthreads = 0;
|
arena->nthreads = 0;
|
||||||
|
if (malloc_mutex_init(&arena->lock))
|
||||||
|
return (NULL);
|
||||||
arena->chunk_alloc = chunk_alloc_default;
|
arena->chunk_alloc = chunk_alloc_default;
|
||||||
arena->chunk_dalloc = chunk_dalloc_default;
|
arena->chunk_dalloc = chunk_dalloc_default;
|
||||||
|
ql_new(&arena->huge);
|
||||||
if (malloc_mutex_init(&arena->lock))
|
if (malloc_mutex_init(&arena->huge_mtx))
|
||||||
|
return (NULL);
|
||||||
|
extent_tree_szad_new(&arena->chunks_szad_mmap);
|
||||||
|
extent_tree_ad_new(&arena->chunks_ad_mmap);
|
||||||
|
extent_tree_szad_new(&arena->chunks_szad_dss);
|
||||||
|
extent_tree_ad_new(&arena->chunks_ad_dss);
|
||||||
|
ql_new(&arena->node_cache);
|
||||||
|
if (malloc_mutex_init(&arena->chunks_mtx))
|
||||||
|
return (NULL);
|
||||||
|
if (malloc_mutex_init(&arena->node_cache_mtx))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -2551,6 +2596,9 @@ arena_prefork(arena_t *arena)
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
malloc_mutex_prefork(&arena->lock);
|
malloc_mutex_prefork(&arena->lock);
|
||||||
|
malloc_mutex_prefork(&arena->huge_mtx);
|
||||||
|
malloc_mutex_prefork(&arena->chunks_mtx);
|
||||||
|
malloc_mutex_prefork(&arena->node_cache_mtx);
|
||||||
for (i = 0; i < NBINS; i++)
|
for (i = 0; i < NBINS; i++)
|
||||||
malloc_mutex_prefork(&arena->bins[i].lock);
|
malloc_mutex_prefork(&arena->bins[i].lock);
|
||||||
}
|
}
|
||||||
@ -2562,6 +2610,9 @@ arena_postfork_parent(arena_t *arena)
|
|||||||
|
|
||||||
for (i = 0; i < NBINS; i++)
|
for (i = 0; i < NBINS; i++)
|
||||||
malloc_mutex_postfork_parent(&arena->bins[i].lock);
|
malloc_mutex_postfork_parent(&arena->bins[i].lock);
|
||||||
|
malloc_mutex_postfork_parent(&arena->node_cache_mtx);
|
||||||
|
malloc_mutex_postfork_parent(&arena->chunks_mtx);
|
||||||
|
malloc_mutex_postfork_parent(&arena->huge_mtx);
|
||||||
malloc_mutex_postfork_parent(&arena->lock);
|
malloc_mutex_postfork_parent(&arena->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2572,5 +2623,8 @@ arena_postfork_child(arena_t *arena)
|
|||||||
|
|
||||||
for (i = 0; i < NBINS; i++)
|
for (i = 0; i < NBINS; i++)
|
||||||
malloc_mutex_postfork_child(&arena->bins[i].lock);
|
malloc_mutex_postfork_child(&arena->bins[i].lock);
|
||||||
|
malloc_mutex_postfork_child(&arena->node_cache_mtx);
|
||||||
|
malloc_mutex_postfork_child(&arena->chunks_mtx);
|
||||||
|
malloc_mutex_postfork_child(&arena->huge_mtx);
|
||||||
malloc_mutex_postfork_child(&arena->lock);
|
malloc_mutex_postfork_child(&arena->lock);
|
||||||
}
|
}
|
||||||
|
65
src/base.c
65
src/base.c
@ -11,8 +11,9 @@ static size_t base_allocated;
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
|
/* base_mtx must be held. */
|
||||||
static extent_node_t *
|
static extent_node_t *
|
||||||
base_node_try_alloc_locked(void)
|
base_node_try_alloc(void)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
|
|
||||||
@ -24,8 +25,9 @@ base_node_try_alloc_locked(void)
|
|||||||
return (node);
|
return (node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* base_mtx must be held. */
|
||||||
static void
|
static void
|
||||||
base_node_dalloc_locked(extent_node_t *node)
|
base_node_dalloc(extent_node_t *node)
|
||||||
{
|
{
|
||||||
|
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||||
@ -42,14 +44,14 @@ base_chunk_alloc(size_t minsize)
|
|||||||
void *addr;
|
void *addr;
|
||||||
|
|
||||||
assert(minsize != 0);
|
assert(minsize != 0);
|
||||||
node = base_node_try_alloc_locked();
|
node = base_node_try_alloc();
|
||||||
/* Allocate enough space to also carve a node out if necessary. */
|
/* Allocate enough space to also carve a node out if necessary. */
|
||||||
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
|
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
|
||||||
csize = CHUNK_CEILING(minsize + nsize);
|
csize = CHUNK_CEILING(minsize + nsize);
|
||||||
addr = chunk_alloc_base(csize);
|
addr = chunk_alloc_base(csize);
|
||||||
if (addr == NULL) {
|
if (addr == NULL) {
|
||||||
if (node != NULL)
|
if (node != NULL)
|
||||||
base_node_dalloc_locked(node);
|
base_node_dalloc(node);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
@ -63,8 +65,13 @@ base_chunk_alloc(size_t minsize)
|
|||||||
return (node);
|
return (node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
/*
|
||||||
base_alloc_locked(size_t size)
|
* base_alloc() guarantees demand-zeroed memory, in order to make multi-page
|
||||||
|
* sparse data structures such as radix tree nodes efficient with respect to
|
||||||
|
* physical memory usage.
|
||||||
|
*/
|
||||||
|
void *
|
||||||
|
base_alloc(size_t size)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t csize;
|
size_t csize;
|
||||||
@ -79,6 +86,7 @@ base_alloc_locked(size_t size)
|
|||||||
|
|
||||||
key.addr = NULL;
|
key.addr = NULL;
|
||||||
key.size = csize;
|
key.size = csize;
|
||||||
|
malloc_mutex_lock(&base_mtx);
|
||||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||||
if (node != NULL) {
|
if (node != NULL) {
|
||||||
/* Use existing space. */
|
/* Use existing space. */
|
||||||
@ -87,8 +95,10 @@ base_alloc_locked(size_t size)
|
|||||||
/* Try to allocate more space. */
|
/* Try to allocate more space. */
|
||||||
node = base_chunk_alloc(csize);
|
node = base_chunk_alloc(csize);
|
||||||
}
|
}
|
||||||
if (node == NULL)
|
if (node == NULL) {
|
||||||
return (NULL);
|
ret = NULL;
|
||||||
|
goto label_return;
|
||||||
|
}
|
||||||
|
|
||||||
ret = node->addr;
|
ret = node->addr;
|
||||||
if (node->size > csize) {
|
if (node->size > csize) {
|
||||||
@ -96,50 +106,15 @@ base_alloc_locked(size_t size)
|
|||||||
node->size -= csize;
|
node->size -= csize;
|
||||||
extent_tree_szad_insert(&base_avail_szad, node);
|
extent_tree_szad_insert(&base_avail_szad, node);
|
||||||
} else
|
} else
|
||||||
base_node_dalloc_locked(node);
|
base_node_dalloc(node);
|
||||||
if (config_stats)
|
if (config_stats)
|
||||||
base_allocated += csize;
|
base_allocated += csize;
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
|
||||||
return (ret);
|
label_return:
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* base_alloc() guarantees demand-zeroed memory, in order to make multi-page
|
|
||||||
* sparse data structures such as radix tree nodes efficient with respect to
|
|
||||||
* physical memory usage.
|
|
||||||
*/
|
|
||||||
void *
|
|
||||||
base_alloc(size_t size)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
malloc_mutex_lock(&base_mtx);
|
|
||||||
ret = base_alloc_locked(size);
|
|
||||||
malloc_mutex_unlock(&base_mtx);
|
malloc_mutex_unlock(&base_mtx);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_node_t *
|
|
||||||
base_node_alloc(void)
|
|
||||||
{
|
|
||||||
extent_node_t *ret;
|
|
||||||
|
|
||||||
malloc_mutex_lock(&base_mtx);
|
|
||||||
if ((ret = base_node_try_alloc_locked()) == NULL)
|
|
||||||
ret = (extent_node_t *)base_alloc_locked(sizeof(extent_node_t));
|
|
||||||
malloc_mutex_unlock(&base_mtx);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
base_node_dalloc(extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
malloc_mutex_lock(&base_mtx);
|
|
||||||
base_node_dalloc_locked(node);
|
|
||||||
malloc_mutex_unlock(&base_mtx);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
base_allocated_get(void)
|
base_allocated_get(void)
|
||||||
{
|
{
|
||||||
|
275
src/chunk.c
275
src/chunk.c
@ -7,19 +7,9 @@
|
|||||||
const char *opt_dss = DSS_DEFAULT;
|
const char *opt_dss = DSS_DEFAULT;
|
||||||
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||||
|
|
||||||
malloc_mutex_t chunks_mtx;
|
/* Used exclusively for gdump triggering. */
|
||||||
chunk_stats_t stats_chunks;
|
static size_t curchunks;
|
||||||
|
static size_t highchunks;
|
||||||
/*
|
|
||||||
* Trees of chunks that were previously allocated (trees differ only in node
|
|
||||||
* ordering). These are used when allocating chunks, in an attempt to re-use
|
|
||||||
* address space. Depending on function, different tree orderings are needed,
|
|
||||||
* which is why there are two trees with the same contents.
|
|
||||||
*/
|
|
||||||
static extent_tree_t chunks_szad_mmap;
|
|
||||||
static extent_tree_t chunks_ad_mmap;
|
|
||||||
static extent_tree_t chunks_szad_dss;
|
|
||||||
static extent_tree_t chunks_ad_dss;
|
|
||||||
|
|
||||||
rtree_t chunks_rtree;
|
rtree_t chunks_rtree;
|
||||||
|
|
||||||
@ -29,18 +19,51 @@ size_t chunksize_mask; /* (chunksize - 1). */
|
|||||||
size_t chunk_npages;
|
size_t chunk_npages;
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/*
|
|
||||||
* Function prototypes for static functions that are referenced prior to
|
|
||||||
* definition.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static void chunk_dalloc_core(void *chunk, size_t size);
|
bool
|
||||||
|
chunk_register(const void *chunk, const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
/******************************************************************************/
|
assert(node->addr == chunk);
|
||||||
|
|
||||||
|
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
|
||||||
|
return (true);
|
||||||
|
if (config_prof && opt_prof) {
|
||||||
|
size_t nadd = (node->size == 0) ? 1 : node->size / chunksize;
|
||||||
|
size_t cur = atomic_add_z(&curchunks, nadd);
|
||||||
|
size_t high = atomic_read_z(&highchunks);
|
||||||
|
while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
|
||||||
|
/*
|
||||||
|
* Don't refresh cur, because it may have decreased
|
||||||
|
* since this thread lost the highchunks update race.
|
||||||
|
*/
|
||||||
|
high = atomic_read_z(&highchunks);
|
||||||
|
}
|
||||||
|
if (cur > high && prof_gdump_get_unlocked())
|
||||||
|
prof_gdump();
|
||||||
|
}
|
||||||
|
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
chunk_deregister(const void *chunk, const extent_node_t *node)
|
||||||
|
{
|
||||||
|
bool err;
|
||||||
|
|
||||||
|
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
||||||
|
assert(!err);
|
||||||
|
if (config_prof && opt_prof) {
|
||||||
|
size_t nsub = (node->size == 0) ? 1 : node->size / chunksize;
|
||||||
|
assert(atomic_read_z(&curchunks) >= nsub);
|
||||||
|
atomic_sub_z(&curchunks, nsub);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
|
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||||
void *new_addr, size_t size, size_t alignment, bool base, bool *zero)
|
extent_tree_t *chunks_ad, void *new_addr, size_t size, size_t alignment,
|
||||||
|
bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
@ -50,27 +73,17 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
|
|||||||
|
|
||||||
assert(new_addr == NULL || alignment == chunksize);
|
assert(new_addr == NULL || alignment == chunksize);
|
||||||
|
|
||||||
if (base) {
|
|
||||||
/*
|
|
||||||
* This function may need to call base_node_{,de}alloc(), but
|
|
||||||
* the current chunk allocation request is on behalf of the
|
|
||||||
* base allocator. Avoid deadlock (and if that weren't an
|
|
||||||
* issue, potential for infinite recursion) by returning NULL.
|
|
||||||
*/
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
alloc_size = size + alignment - chunksize;
|
alloc_size = size + alignment - chunksize;
|
||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
if (alloc_size < size)
|
if (alloc_size < size)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
key.addr = new_addr;
|
key.addr = new_addr;
|
||||||
key.size = alloc_size;
|
key.size = alloc_size;
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
malloc_mutex_lock(&arena->chunks_mtx);
|
||||||
node = (new_addr != NULL) ? extent_tree_ad_search(chunks_ad, &key) :
|
node = (new_addr != NULL) ? extent_tree_ad_search(chunks_ad, &key) :
|
||||||
extent_tree_szad_nsearch(chunks_szad, &key);
|
extent_tree_szad_nsearch(chunks_szad, &key);
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
|
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
|
||||||
@ -95,20 +108,12 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
|
|||||||
if (trailsize != 0) {
|
if (trailsize != 0) {
|
||||||
/* Insert the trailing space as a smaller chunk. */
|
/* Insert the trailing space as a smaller chunk. */
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
/*
|
node = arena_node_alloc(arena);
|
||||||
* An additional node is required, but
|
|
||||||
* base_node_alloc() can cause a new base chunk to be
|
|
||||||
* allocated. Drop chunks_mtx in order to avoid
|
|
||||||
* deadlock, and if node allocation fails, deallocate
|
|
||||||
* the result before returning an error.
|
|
||||||
*/
|
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
|
||||||
node = base_node_alloc();
|
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
chunk_dalloc_core(ret, size);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
|
chunk_unmap(arena, ret, size);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
|
||||||
}
|
}
|
||||||
node->addr = (void *)((uintptr_t)(ret) + size);
|
node->addr = (void *)((uintptr_t)(ret) + size);
|
||||||
node->size = trailsize;
|
node->size = trailsize;
|
||||||
@ -117,10 +122,10 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
|
|||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, node);
|
||||||
node = NULL;
|
node = NULL;
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
|
|
||||||
if (node != NULL)
|
if (node != NULL)
|
||||||
base_node_dalloc(node);
|
arena_node_dalloc(arena, node);
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
if (!zeroed)
|
if (!zeroed)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
@ -137,15 +142,15 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_core_dss(void *new_addr, size_t size, size_t alignment, bool base,
|
chunk_alloc_core_dss(arena_t *arena, void *new_addr, size_t size,
|
||||||
bool *zero)
|
size_t alignment, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
|
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss,
|
||||||
new_addr, size, alignment, base, zero)) != NULL)
|
&arena->chunks_ad_dss, new_addr, size, alignment, zero)) != NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
ret = chunk_alloc_dss(new_addr, size, alignment, zero);
|
ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -156,7 +161,7 @@ chunk_alloc_core_dss(void *new_addr, size_t size, size_t alignment, bool base,
|
|||||||
* them if they are returned.
|
* them if they are returned.
|
||||||
*/
|
*/
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
|
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, dss_prec_t dss_prec)
|
bool *zero, dss_prec_t dss_prec)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
@ -168,12 +173,13 @@ chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
|
|||||||
|
|
||||||
/* "primary" dss. */
|
/* "primary" dss. */
|
||||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||||
chunk_alloc_core_dss(new_addr, size, alignment, base, zero)) !=
|
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
||||||
NULL)
|
NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
/* mmap. */
|
/* mmap. */
|
||||||
if (!config_munmap && (ret = chunk_recycle(&chunks_szad_mmap,
|
if (!config_munmap && (ret = chunk_recycle(arena,
|
||||||
&chunks_ad_mmap, new_addr, size, alignment, base, zero)) != NULL)
|
&arena->chunks_szad_mmap, &arena->chunks_ad_mmap, new_addr, size,
|
||||||
|
alignment, zero)) != NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
/*
|
/*
|
||||||
* Requesting an address is not implemented for chunk_alloc_mmap(), so
|
* Requesting an address is not implemented for chunk_alloc_mmap(), so
|
||||||
@ -184,7 +190,7 @@ chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
|
|||||||
return (ret);
|
return (ret);
|
||||||
/* "secondary" dss. */
|
/* "secondary" dss. */
|
||||||
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
||||||
chunk_alloc_core_dss(new_addr, size, alignment, base, zero)) !=
|
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
||||||
NULL)
|
NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
|
|
||||||
@ -192,40 +198,6 @@ chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
|
||||||
chunk_register(void *chunk, size_t size, bool base)
|
|
||||||
{
|
|
||||||
|
|
||||||
assert(chunk != NULL);
|
|
||||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
|
||||||
|
|
||||||
if (config_ivsalloc && !base) {
|
|
||||||
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, chunk))
|
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
if (config_stats || config_prof) {
|
|
||||||
bool gdump;
|
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
|
||||||
if (config_stats)
|
|
||||||
stats_chunks.nchunks += (size / chunksize);
|
|
||||||
stats_chunks.curchunks += (size / chunksize);
|
|
||||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
|
||||||
stats_chunks.highchunks =
|
|
||||||
stats_chunks.curchunks;
|
|
||||||
if (config_prof)
|
|
||||||
gdump = true;
|
|
||||||
} else if (config_prof)
|
|
||||||
gdump = false;
|
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
|
||||||
if (config_prof && opt_prof && prof_gdump_get_unlocked() &&
|
|
||||||
gdump)
|
|
||||||
prof_gdump();
|
|
||||||
}
|
|
||||||
if (config_valgrind)
|
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
|
|
||||||
return (false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_base(size_t size)
|
chunk_alloc_base(size_t size)
|
||||||
{
|
{
|
||||||
@ -239,10 +211,10 @@ chunk_alloc_base(size_t size)
|
|||||||
*/
|
*/
|
||||||
zero = true;
|
zero = true;
|
||||||
ret = chunk_alloc_mmap(size, chunksize, &zero);
|
ret = chunk_alloc_mmap(size, chunksize, &zero);
|
||||||
if (ret != NULL && chunk_register(ret, size, true)) {
|
if (ret == NULL)
|
||||||
chunk_dalloc_core(ret, size);
|
return (NULL);
|
||||||
ret = NULL;
|
if (config_valgrind)
|
||||||
}
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -255,18 +227,16 @@ chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
|
|||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
ret = chunk_alloc(new_addr, size, alignment, zero, arena_ind);
|
ret = chunk_alloc(new_addr, size, alignment, zero, arena_ind);
|
||||||
if (ret != NULL && chunk_register(ret, size, false)) {
|
if (ret == NULL)
|
||||||
chunk_dalloc(ret, size, arena_ind);
|
return (NULL);
|
||||||
ret = NULL;
|
if (config_valgrind)
|
||||||
}
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Default arena chunk allocation routine in the absence of user override. */
|
static arena_t *
|
||||||
void *
|
chunk_arena_get(unsigned arena_ind)
|
||||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|
||||||
unsigned arena_ind)
|
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
@ -278,32 +248,32 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|||||||
* already.
|
* already.
|
||||||
*/
|
*/
|
||||||
assert(arena != NULL);
|
assert(arena != NULL);
|
||||||
|
return (arena);
|
||||||
|
}
|
||||||
|
|
||||||
return (chunk_alloc_core(new_addr, size, alignment, false, zero,
|
/* Default arena chunk allocation routine in the absence of user override. */
|
||||||
|
void *
|
||||||
|
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||||
|
unsigned arena_ind)
|
||||||
|
{
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
|
arena = chunk_arena_get(arena_ind);
|
||||||
|
return (chunk_alloc_core(arena, new_addr, size, alignment, zero,
|
||||||
arena->dss_prec));
|
arena->dss_prec));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
||||||
size_t size)
|
extent_tree_t *chunks_ad, void *chunk, size_t size)
|
||||||
{
|
{
|
||||||
bool unzeroed;
|
bool unzeroed;
|
||||||
extent_node_t *xnode, *node, *prev, *xprev, key;
|
extent_node_t *node, *prev, key;
|
||||||
|
|
||||||
unzeroed = pages_purge(chunk, size);
|
unzeroed = pages_purge(chunk, size);
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||||
|
|
||||||
/*
|
malloc_mutex_lock(&arena->chunks_mtx);
|
||||||
* Allocate a node before acquiring chunks_mtx even though it might not
|
|
||||||
* be needed, because base_node_alloc() may cause a new base chunk to
|
|
||||||
* be allocated, which could cause deadlock if chunks_mtx were already
|
|
||||||
* held.
|
|
||||||
*/
|
|
||||||
xnode = base_node_alloc();
|
|
||||||
/* Use xprev to implement conditional deferred deallocation of prev. */
|
|
||||||
xprev = NULL;
|
|
||||||
|
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
|
||||||
key.addr = (void *)((uintptr_t)chunk + size);
|
key.addr = (void *)((uintptr_t)chunk + size);
|
||||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||||
/* Try to coalesce forward. */
|
/* Try to coalesce forward. */
|
||||||
@ -320,17 +290,16 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
|||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, node);
|
||||||
} else {
|
} else {
|
||||||
/* Coalescing forward failed, so insert a new node. */
|
/* Coalescing forward failed, so insert a new node. */
|
||||||
if (xnode == NULL) {
|
node = arena_node_alloc(arena);
|
||||||
|
if (node == NULL) {
|
||||||
/*
|
/*
|
||||||
* base_node_alloc() failed, which is an exceedingly
|
* Node allocation failed, which is an exceedingly
|
||||||
* unlikely failure. Leak chunk; its pages have
|
* unlikely failure. Leak chunk; its pages have
|
||||||
* already been purged, so this is only a virtual
|
* already been purged, so this is only a virtual
|
||||||
* memory leak.
|
* memory leak.
|
||||||
*/
|
*/
|
||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
node = xnode;
|
|
||||||
xnode = NULL; /* Prevent deallocation below. */
|
|
||||||
node->addr = chunk;
|
node->addr = chunk;
|
||||||
node->size = size;
|
node->size = size;
|
||||||
node->zeroed = !unzeroed;
|
node->zeroed = !unzeroed;
|
||||||
@ -356,37 +325,15 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
|||||||
node->zeroed = (node->zeroed && prev->zeroed);
|
node->zeroed = (node->zeroed && prev->zeroed);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, node);
|
||||||
|
|
||||||
xprev = prev;
|
arena_node_dalloc(arena, prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
label_return:
|
label_return:
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
/*
|
|
||||||
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
|
|
||||||
* avoid potential deadlock.
|
|
||||||
*/
|
|
||||||
if (xnode != NULL)
|
|
||||||
base_node_dalloc(xnode);
|
|
||||||
if (xprev != NULL)
|
|
||||||
base_node_dalloc(xprev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_unmap(void *chunk, size_t size)
|
chunk_unmap(arena_t *arena, void *chunk, size_t size)
|
||||||
{
|
|
||||||
assert(chunk != NULL);
|
|
||||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
|
||||||
assert(size != 0);
|
|
||||||
assert((size & chunksize_mask) == 0);
|
|
||||||
|
|
||||||
if (have_dss && chunk_in_dss(chunk))
|
|
||||||
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
|
||||||
else if (chunk_dalloc_mmap(chunk, size))
|
|
||||||
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
chunk_dalloc_core(void *chunk, size_t size)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(chunk != NULL);
|
assert(chunk != NULL);
|
||||||
@ -394,16 +341,13 @@ chunk_dalloc_core(void *chunk, size_t size)
|
|||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
if (config_ivsalloc)
|
if (have_dss && chunk_in_dss(chunk)) {
|
||||||
rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
chunk_record(arena, &arena->chunks_szad_dss,
|
||||||
if (config_stats || config_prof) {
|
&arena->chunks_ad_dss, chunk, size);
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
} else if (chunk_dalloc_mmap(chunk, size)) {
|
||||||
assert(stats_chunks.curchunks >= (size / chunksize));
|
chunk_record(arena, &arena->chunks_szad_mmap,
|
||||||
stats_chunks.curchunks -= (size / chunksize);
|
&arena->chunks_ad_mmap, chunk, size);
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk_unmap(chunk, size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Default arena chunk deallocation routine in the absence of user override. */
|
/* Default arena chunk deallocation routine in the absence of user override. */
|
||||||
@ -411,7 +355,7 @@ bool
|
|||||||
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
||||||
{
|
{
|
||||||
|
|
||||||
chunk_dalloc_core(chunk, size);
|
chunk_unmap(chunk_arena_get(arena_ind), chunk, size);
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -433,21 +377,11 @@ chunk_boot(void)
|
|||||||
chunksize_mask = chunksize - 1;
|
chunksize_mask = chunksize - 1;
|
||||||
chunk_npages = (chunksize >> LG_PAGE);
|
chunk_npages = (chunksize >> LG_PAGE);
|
||||||
|
|
||||||
if (malloc_mutex_init(&chunks_mtx))
|
|
||||||
return (true);
|
|
||||||
if (config_stats || config_prof)
|
|
||||||
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
|
||||||
if (have_dss && chunk_dss_boot())
|
if (have_dss && chunk_dss_boot())
|
||||||
return (true);
|
return (true);
|
||||||
extent_tree_szad_new(&chunks_szad_mmap);
|
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||||
extent_tree_ad_new(&chunks_ad_mmap);
|
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
|
||||||
extent_tree_szad_new(&chunks_szad_dss);
|
return (true);
|
||||||
extent_tree_ad_new(&chunks_ad_dss);
|
|
||||||
if (config_ivsalloc) {
|
|
||||||
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
|
|
||||||
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
|
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
@ -456,7 +390,6 @@ void
|
|||||||
chunk_prefork(void)
|
chunk_prefork(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_prefork(&chunks_mtx);
|
|
||||||
chunk_dss_prefork();
|
chunk_dss_prefork();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -465,7 +398,6 @@ chunk_postfork_parent(void)
|
|||||||
{
|
{
|
||||||
|
|
||||||
chunk_dss_postfork_parent();
|
chunk_dss_postfork_parent();
|
||||||
malloc_mutex_postfork_parent(&chunks_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -473,5 +405,4 @@ chunk_postfork_child(void)
|
|||||||
{
|
{
|
||||||
|
|
||||||
chunk_dss_postfork_child();
|
chunk_dss_postfork_child();
|
||||||
malloc_mutex_postfork_child(&chunks_mtx);
|
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,8 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_dss(void *new_addr, size_t size, size_t alignment, bool *zero)
|
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||||
|
bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
@ -133,7 +134,7 @@ chunk_alloc_dss(void *new_addr, size_t size, size_t alignment, bool *zero)
|
|||||||
dss_max = dss_next;
|
dss_max = dss_next;
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(&dss_mtx);
|
||||||
if (cpad_size != 0)
|
if (cpad_size != 0)
|
||||||
chunk_unmap(cpad, cpad_size);
|
chunk_unmap(arena, cpad, cpad_size);
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
||||||
ret, size);
|
ret, size);
|
||||||
|
26
src/ctl.c
26
src/ctl.c
@ -144,9 +144,6 @@ CTL_PROTO(prof_gdump)
|
|||||||
CTL_PROTO(prof_reset)
|
CTL_PROTO(prof_reset)
|
||||||
CTL_PROTO(prof_interval)
|
CTL_PROTO(prof_interval)
|
||||||
CTL_PROTO(lg_prof_sample)
|
CTL_PROTO(lg_prof_sample)
|
||||||
CTL_PROTO(stats_chunks_current)
|
|
||||||
CTL_PROTO(stats_chunks_total)
|
|
||||||
CTL_PROTO(stats_chunks_high)
|
|
||||||
CTL_PROTO(stats_arenas_i_small_allocated)
|
CTL_PROTO(stats_arenas_i_small_allocated)
|
||||||
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
||||||
@ -363,12 +360,6 @@ static const ctl_named_node_t prof_node[] = {
|
|||||||
{NAME("lg_sample"), CTL(lg_prof_sample)}
|
{NAME("lg_sample"), CTL(lg_prof_sample)}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t stats_chunks_node[] = {
|
|
||||||
{NAME("current"), CTL(stats_chunks_current)},
|
|
||||||
{NAME("total"), CTL(stats_chunks_total)},
|
|
||||||
{NAME("high"), CTL(stats_chunks_high)}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
|
static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
|
||||||
{NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
|
{NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
|
||||||
{NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
|
{NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
|
||||||
@ -473,7 +464,6 @@ static const ctl_named_node_t stats_node[] = {
|
|||||||
{NAME("active"), CTL(stats_active)},
|
{NAME("active"), CTL(stats_active)},
|
||||||
{NAME("metadata"), CTL(stats_metadata)},
|
{NAME("metadata"), CTL(stats_metadata)},
|
||||||
{NAME("mapped"), CTL(stats_mapped)},
|
{NAME("mapped"), CTL(stats_mapped)},
|
||||||
{NAME("chunks"), CHILD(named, stats_chunks)},
|
|
||||||
{NAME("arenas"), CHILD(indexed, stats_arenas)}
|
{NAME("arenas"), CHILD(indexed, stats_arenas)}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -688,14 +678,6 @@ ctl_refresh(void)
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
|
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
|
||||||
|
|
||||||
if (config_stats) {
|
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
|
||||||
ctl_stats.chunks.current = stats_chunks.curchunks;
|
|
||||||
ctl_stats.chunks.total = stats_chunks.nchunks;
|
|
||||||
ctl_stats.chunks.high = stats_chunks.highchunks;
|
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear sum stats, since they will be merged into by
|
* Clear sum stats, since they will be merged into by
|
||||||
* ctl_arena_refresh().
|
* ctl_arena_refresh().
|
||||||
@ -733,7 +715,8 @@ ctl_refresh(void)
|
|||||||
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped
|
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped
|
||||||
+ ctl_stats.arenas[ctl_stats.narenas].astats
|
+ ctl_stats.arenas[ctl_stats.narenas].astats
|
||||||
.metadata_allocated;
|
.metadata_allocated;
|
||||||
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
|
ctl_stats.mapped =
|
||||||
|
ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctl_epoch++;
|
ctl_epoch++;
|
||||||
@ -1950,11 +1933,6 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
|
|||||||
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
|
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
|
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
|
||||||
|
|
||||||
CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
|
|
||||||
size_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
|
|
||||||
|
|
||||||
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
||||||
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
||||||
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
|
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
|
||||||
|
169
src/huge.c
169
src/huge.c
@ -2,15 +2,33 @@
|
|||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
|
||||||
|
|
||||||
/* Protects chunk-related data structures. */
|
static extent_node_t *
|
||||||
static malloc_mutex_t huge_mtx;
|
huge_node_get(const void *ptr)
|
||||||
|
{
|
||||||
|
extent_node_t *node;
|
||||||
|
|
||||||
/******************************************************************************/
|
node = chunk_lookup(ptr);
|
||||||
|
assert(node->size != 0);
|
||||||
|
|
||||||
/* Tree of chunks that are stand-alone huge allocations. */
|
return (node);
|
||||||
static extent_tree_t huge;
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
huge_node_set(const void *ptr, extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(node->addr == ptr);
|
||||||
|
assert(node->size != 0);
|
||||||
|
return (chunk_register(ptr, node));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
huge_node_unset(const void *ptr, const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
chunk_deregister(ptr, node);
|
||||||
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
||||||
@ -55,15 +73,22 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Insert node into huge. */
|
|
||||||
node->addr = ret;
|
node->addr = ret;
|
||||||
node->size = usize;
|
node->size = usize;
|
||||||
node->zeroed = is_zeroed;
|
node->zeroed = is_zeroed;
|
||||||
node->arena = arena;
|
node->arena = arena;
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
if (huge_node_set(ret, node)) {
|
||||||
extent_tree_ad_insert(&huge, node);
|
arena_chunk_dalloc_huge(arena, ret, usize);
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
idalloctm(tsd, node, tcache, true);
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Insert node into huge. */
|
||||||
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
|
ql_elm_new(node, link_ql);
|
||||||
|
ql_tail_insert(&arena->huge, node, link_ql);
|
||||||
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
if (!is_zeroed)
|
if (!is_zeroed)
|
||||||
@ -74,32 +99,6 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static extent_node_t *
|
|
||||||
huge_node_locked(const void *ptr)
|
|
||||||
{
|
|
||||||
extent_node_t *node, key;
|
|
||||||
|
|
||||||
/* Extract from tree of huge allocations. */
|
|
||||||
key.addr = __DECONST(void *, ptr);
|
|
||||||
node = extent_tree_ad_search(&huge, &key);
|
|
||||||
assert(node != NULL);
|
|
||||||
assert(node->addr == ptr);
|
|
||||||
|
|
||||||
return (node);
|
|
||||||
}
|
|
||||||
|
|
||||||
static extent_node_t *
|
|
||||||
huge_node(const void *ptr)
|
|
||||||
{
|
|
||||||
extent_node_t *node;
|
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
|
||||||
node = huge_node_locked(ptr);
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
|
||||||
|
|
||||||
return (node);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
#undef huge_dalloc_junk
|
#undef huge_dalloc_junk
|
||||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
|
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
|
||||||
@ -152,15 +151,15 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
|||||||
} else
|
} else
|
||||||
zeroed = true;
|
zeroed = true;
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
node = huge_node_get(ptr);
|
||||||
node = huge_node_locked(ptr);
|
|
||||||
arena = node->arena;
|
arena = node->arena;
|
||||||
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
assert(node->size != usize);
|
assert(node->size != usize);
|
||||||
node->size = usize;
|
node->size = usize;
|
||||||
/* Clear node->zeroed if zeroing failed above. */
|
/* Clear node->zeroed if zeroing failed above. */
|
||||||
node->zeroed = (node->zeroed && zeroed);
|
node->zeroed = (node->zeroed && zeroed);
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
|
||||||
|
|
||||||
@ -195,14 +194,14 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
|||||||
zeroed = false;
|
zeroed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
node = huge_node_get(ptr);
|
||||||
node = huge_node_locked(ptr);
|
|
||||||
arena = node->arena;
|
arena = node->arena;
|
||||||
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
node->size = usize;
|
node->size = usize;
|
||||||
/* Clear node->zeroed if zeroing failed above. */
|
/* Clear node->zeroed if zeroing failed above. */
|
||||||
node->zeroed = (node->zeroed && zeroed);
|
node->zeroed = (node->zeroed && zeroed);
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
/* Zap the excess chunks. */
|
/* Zap the excess chunks. */
|
||||||
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
|
||||||
@ -221,11 +220,11 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
|||||||
return (true);
|
return (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
node = huge_node_get(ptr);
|
||||||
node = huge_node_locked(ptr);
|
|
||||||
arena = node->arena;
|
arena = node->arena;
|
||||||
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
is_zeroed_subchunk = node->zeroed;
|
is_zeroed_subchunk = node->zeroed;
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
|
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
|
||||||
@ -237,10 +236,10 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
|||||||
&is_zeroed_chunk))
|
&is_zeroed_chunk))
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
node->size = usize;
|
node->size = usize;
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
if (!is_zeroed_subchunk) {
|
if (!is_zeroed_subchunk) {
|
||||||
@ -356,11 +355,14 @@ void
|
|||||||
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
node = huge_node_get(ptr);
|
||||||
node = huge_node_locked(ptr);
|
arena = node->arena;
|
||||||
extent_tree_ad_remove(&huge, node);
|
huge_node_unset(ptr, node);
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
|
ql_remove(&arena->huge, node, link_ql);
|
||||||
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
huge_dalloc_junk(node->addr, node->size);
|
huge_dalloc_junk(node->addr, node->size);
|
||||||
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
|
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
|
||||||
@ -371,59 +373,50 @@ arena_t *
|
|||||||
huge_aalloc(const void *ptr)
|
huge_aalloc(const void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (huge_node(ptr)->arena);
|
return (huge_node_get(ptr)->arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
huge_salloc(const void *ptr)
|
huge_salloc(const void *ptr)
|
||||||
{
|
{
|
||||||
|
size_t size;
|
||||||
|
extent_node_t *node;
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
return (huge_node(ptr)->size);
|
node = huge_node_get(ptr);
|
||||||
|
arena = node->arena;
|
||||||
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
|
size = node->size;
|
||||||
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
|
return (size);
|
||||||
}
|
}
|
||||||
|
|
||||||
prof_tctx_t *
|
prof_tctx_t *
|
||||||
huge_prof_tctx_get(const void *ptr)
|
huge_prof_tctx_get(const void *ptr)
|
||||||
{
|
{
|
||||||
|
prof_tctx_t *tctx;
|
||||||
|
extent_node_t *node;
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
return (huge_node(ptr)->prof_tctx);
|
node = huge_node_get(ptr);
|
||||||
|
arena = node->arena;
|
||||||
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
|
tctx = node->prof_tctx;
|
||||||
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
|
return (tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
|
extent_node_t *node;
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
huge_node(ptr)->prof_tctx = tctx;
|
node = huge_node_get(ptr);
|
||||||
}
|
arena = node->arena;
|
||||||
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
bool
|
node->prof_tctx = tctx;
|
||||||
huge_boot(void)
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
{
|
|
||||||
|
|
||||||
/* Initialize chunks data. */
|
|
||||||
if (malloc_mutex_init(&huge_mtx))
|
|
||||||
return (true);
|
|
||||||
extent_tree_ad_new(&huge);
|
|
||||||
|
|
||||||
return (false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
huge_prefork(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
malloc_mutex_prefork(&huge_mtx);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
huge_postfork_parent(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
malloc_mutex_postfork_parent(&huge_mtx);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
huge_postfork_child(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
malloc_mutex_postfork_child(&huge_mtx);
|
|
||||||
}
|
}
|
||||||
|
@ -1195,8 +1195,6 @@ malloc_init_hard_a0_locked(void)
|
|||||||
return (true);
|
return (true);
|
||||||
if (config_tcache && tcache_boot())
|
if (config_tcache && tcache_boot())
|
||||||
malloc_mutex_unlock(&init_lock);
|
malloc_mutex_unlock(&init_lock);
|
||||||
if (huge_boot())
|
|
||||||
return (true);
|
|
||||||
if (malloc_mutex_init(&arenas_lock))
|
if (malloc_mutex_init(&arenas_lock))
|
||||||
return (true);
|
return (true);
|
||||||
/*
|
/*
|
||||||
@ -2310,12 +2308,10 @@ je_sallocx(const void *ptr, int flags)
|
|||||||
assert(malloc_initialized() || IS_INITIALIZER);
|
assert(malloc_initialized() || IS_INITIALIZER);
|
||||||
malloc_thread_init();
|
malloc_thread_init();
|
||||||
|
|
||||||
if (config_ivsalloc)
|
if (config_debug)
|
||||||
usize = ivsalloc(ptr, config_prof);
|
usize = ivsalloc(ptr, config_prof);
|
||||||
else {
|
else
|
||||||
assert(ptr != NULL);
|
|
||||||
usize = isalloc(ptr, config_prof);
|
usize = isalloc(ptr, config_prof);
|
||||||
}
|
|
||||||
|
|
||||||
return (usize);
|
return (usize);
|
||||||
}
|
}
|
||||||
@ -2440,10 +2436,10 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
|||||||
assert(malloc_initialized() || IS_INITIALIZER);
|
assert(malloc_initialized() || IS_INITIALIZER);
|
||||||
malloc_thread_init();
|
malloc_thread_init();
|
||||||
|
|
||||||
if (config_ivsalloc)
|
if (config_debug)
|
||||||
ret = ivsalloc(ptr, config_prof);
|
ret = ivsalloc(ptr, config_prof);
|
||||||
else
|
else
|
||||||
ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
|
ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -2504,7 +2500,6 @@ _malloc_prefork(void)
|
|||||||
}
|
}
|
||||||
chunk_prefork();
|
chunk_prefork();
|
||||||
base_prefork();
|
base_prefork();
|
||||||
huge_prefork();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef JEMALLOC_MUTEX_INIT_CB
|
#ifndef JEMALLOC_MUTEX_INIT_CB
|
||||||
@ -2524,7 +2519,6 @@ _malloc_postfork(void)
|
|||||||
assert(malloc_initialized());
|
assert(malloc_initialized());
|
||||||
|
|
||||||
/* Release all mutexes, now that fork() has completed. */
|
/* Release all mutexes, now that fork() has completed. */
|
||||||
huge_postfork_parent();
|
|
||||||
base_postfork_parent();
|
base_postfork_parent();
|
||||||
chunk_postfork_parent();
|
chunk_postfork_parent();
|
||||||
for (i = 0; i < narenas_total; i++) {
|
for (i = 0; i < narenas_total; i++) {
|
||||||
@ -2544,7 +2538,6 @@ jemalloc_postfork_child(void)
|
|||||||
assert(malloc_initialized());
|
assert(malloc_initialized());
|
||||||
|
|
||||||
/* Release all mutexes, now that fork() has completed. */
|
/* Release all mutexes, now that fork() has completed. */
|
||||||
huge_postfork_child();
|
|
||||||
base_postfork_child();
|
base_postfork_child();
|
||||||
chunk_postfork_child();
|
chunk_postfork_child();
|
||||||
for (i = 0; i < narenas_total; i++) {
|
for (i = 0; i < narenas_total; i++) {
|
||||||
|
12
src/stats.c
12
src/stats.c
@ -547,8 +547,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
size_t *cactive;
|
size_t *cactive;
|
||||||
size_t allocated, active, metadata, mapped;
|
size_t allocated, active, metadata, mapped;
|
||||||
size_t chunks_current, chunks_high;
|
|
||||||
uint64_t chunks_total;
|
|
||||||
|
|
||||||
CTL_GET("stats.cactive", &cactive, size_t *);
|
CTL_GET("stats.cactive", &cactive, size_t *);
|
||||||
CTL_GET("stats.allocated", &allocated, size_t);
|
CTL_GET("stats.allocated", &allocated, size_t);
|
||||||
@ -561,16 +559,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"Current active ceiling: %zu\n", atomic_read_z(cactive));
|
"Current active ceiling: %zu\n", atomic_read_z(cactive));
|
||||||
|
|
||||||
/* Print chunk stats. */
|
|
||||||
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
|
|
||||||
CTL_GET("stats.chunks.high", &chunks_high, size_t);
|
|
||||||
CTL_GET("stats.chunks.current", &chunks_current, size_t);
|
|
||||||
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
|
|
||||||
"highchunks curchunks\n");
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
" %13"PRIu64" %12zu %12zu\n",
|
|
||||||
chunks_total, chunks_high, chunks_current);
|
|
||||||
|
|
||||||
if (merged) {
|
if (merged) {
|
||||||
unsigned narenas;
|
unsigned narenas;
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
|||||||
/* Lock the arena bin associated with the first object. */
|
/* Lock the arena bin associated with the first object. */
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||||
tbin->avail[0]);
|
tbin->avail[0]);
|
||||||
arena_t *bin_arena = chunk->arena;
|
arena_t *bin_arena = chunk->node.arena;
|
||||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||||
|
|
||||||
if (config_prof && bin_arena == arena) {
|
if (config_prof && bin_arena == arena) {
|
||||||
@ -124,7 +124,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
|||||||
ptr = tbin->avail[i];
|
ptr = tbin->avail[i];
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk->arena == bin_arena) {
|
if (chunk->node.arena == bin_arena) {
|
||||||
size_t pageind = ((uintptr_t)ptr -
|
size_t pageind = ((uintptr_t)ptr -
|
||||||
(uintptr_t)chunk) >> LG_PAGE;
|
(uintptr_t)chunk) >> LG_PAGE;
|
||||||
arena_chunk_map_bits_t *bitselm =
|
arena_chunk_map_bits_t *bitselm =
|
||||||
@ -182,7 +182,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
|||||||
/* Lock the arena associated with the first object. */
|
/* Lock the arena associated with the first object. */
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||||
tbin->avail[0]);
|
tbin->avail[0]);
|
||||||
arena_t *locked_arena = chunk->arena;
|
arena_t *locked_arena = chunk->node.arena;
|
||||||
UNUSED bool idump;
|
UNUSED bool idump;
|
||||||
|
|
||||||
if (config_prof)
|
if (config_prof)
|
||||||
@ -208,7 +208,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
|||||||
ptr = tbin->avail[i];
|
ptr = tbin->avail[i];
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk->arena == locked_arena) {
|
if (chunk->node.arena == locked_arena) {
|
||||||
arena_dalloc_large_junked_locked(locked_arena,
|
arena_dalloc_large_junked_locked(locked_arena,
|
||||||
chunk, ptr);
|
chunk, ptr);
|
||||||
} else {
|
} else {
|
||||||
|
@ -29,32 +29,6 @@ TEST_BEGIN(test_stats_summary)
|
|||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
TEST_BEGIN(test_stats_chunks)
|
|
||||||
{
|
|
||||||
size_t current, high;
|
|
||||||
uint64_t total;
|
|
||||||
size_t sz;
|
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
|
||||||
|
|
||||||
sz = sizeof(size_t);
|
|
||||||
assert_d_eq(mallctl("stats.chunks.current", ¤t, &sz, NULL, 0),
|
|
||||||
expected, "Unexpected mallctl() result");
|
|
||||||
sz = sizeof(uint64_t);
|
|
||||||
assert_d_eq(mallctl("stats.chunks.total", &total, &sz, NULL, 0),
|
|
||||||
expected, "Unexpected mallctl() result");
|
|
||||||
sz = sizeof(size_t);
|
|
||||||
assert_d_eq(mallctl("stats.chunks.high", &high, &sz, NULL, 0), expected,
|
|
||||||
"Unexpected mallctl() result");
|
|
||||||
|
|
||||||
if (config_stats) {
|
|
||||||
assert_zu_le(current, high,
|
|
||||||
"current should be no larger than high");
|
|
||||||
assert_u64_le((uint64_t)high, total,
|
|
||||||
"high should be no larger than total");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
TEST_BEGIN(test_stats_huge)
|
TEST_BEGIN(test_stats_huge)
|
||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
@ -458,7 +432,6 @@ main(void)
|
|||||||
|
|
||||||
return (test(
|
return (test(
|
||||||
test_stats_summary,
|
test_stats_summary,
|
||||||
test_stats_chunks,
|
|
||||||
test_stats_huge,
|
test_stats_huge,
|
||||||
test_stats_arenas_summary,
|
test_stats_arenas_summary,
|
||||||
test_stats_arenas_small,
|
test_stats_arenas_small,
|
||||||
|
Loading…
Reference in New Issue
Block a user