Use huge size class infrastructure for large size classes.
This commit is contained in:
parent
b46261d58b
commit
ed2c2427a7
@ -159,7 +159,6 @@ TESTS_UNIT := \
|
|||||||
$(srcroot)test/unit/qr.c \
|
$(srcroot)test/unit/qr.c \
|
||||||
$(srcroot)test/unit/rb.c \
|
$(srcroot)test/unit/rb.c \
|
||||||
$(srcroot)test/unit/rtree.c \
|
$(srcroot)test/unit/rtree.c \
|
||||||
$(srcroot)test/unit/run_quantize.c \
|
|
||||||
$(srcroot)test/unit/SFMT.c \
|
$(srcroot)test/unit/SFMT.c \
|
||||||
$(srcroot)test/unit/size_classes.c \
|
$(srcroot)test/unit/size_classes.c \
|
||||||
$(srcroot)test/unit/smoothstep.c \
|
$(srcroot)test/unit/smoothstep.c \
|
||||||
|
@ -417,22 +417,21 @@ for (i = 0; i < nbins; i++) {
|
|||||||
<parameter>write_cb</parameter>, or
|
<parameter>write_cb</parameter>, or
|
||||||
<function>malloc_message<parameter/></function> if
|
<function>malloc_message<parameter/></function> if
|
||||||
<parameter>write_cb</parameter> is <constant>NULL</constant>. This
|
<parameter>write_cb</parameter> is <constant>NULL</constant>. This
|
||||||
function can be called repeatedly. General information that never
|
function can be called repeatedly. General information that never changes
|
||||||
changes during execution can be omitted by specifying "g" as a character
|
during execution can be omitted by specifying "g" as a character within
|
||||||
within the <parameter>opts</parameter> string. Note that
|
the <parameter>opts</parameter> string. Note that
|
||||||
<function>malloc_message<parameter/></function> uses the
|
<function>malloc_message<parameter/></function> uses the
|
||||||
<function>mallctl*<parameter/></function> functions internally, so
|
<function>mallctl*<parameter/></function> functions internally, so
|
||||||
inconsistent statistics can be reported if multiple threads use these
|
inconsistent statistics can be reported if multiple threads use these
|
||||||
functions simultaneously. If <option>--enable-stats</option> is
|
functions simultaneously. If <option>--enable-stats</option> is specified
|
||||||
specified during configuration, “m” and “a” can
|
during configuration, “m” and “a” can be specified
|
||||||
be specified to omit merged arena and per arena statistics, respectively;
|
to omit merged arena and per arena statistics, respectively;
|
||||||
“b”, “l”, and “h” can be specified to
|
“b” and “l” can be specified to omit per size
|
||||||
omit per size class statistics for bins, large objects, and huge objects,
|
class statistics for bins and large objects, respectively. Unrecognized
|
||||||
respectively. Unrecognized characters are silently ignored. Note that
|
characters are silently ignored. Note that thread caching may prevent
|
||||||
thread caching may prevent some statistics from being completely up to
|
some statistics from being completely up to date, since extra locking
|
||||||
date, since extra locking would be required to merge counters that track
|
would be required to merge counters that track thread cache
|
||||||
thread cache operations.
|
operations.</para>
|
||||||
</para>
|
|
||||||
|
|
||||||
<para>The <function>malloc_usable_size<parameter/></function> function
|
<para>The <function>malloc_usable_size<parameter/></function> function
|
||||||
returns the usable size of the allocation pointed to by
|
returns the usable size of the allocation pointed to by
|
||||||
@ -1888,25 +1887,6 @@ typedef struct {
|
|||||||
<listitem><para>Number of bytes per page run.</para></listitem>
|
<listitem><para>Number of bytes per page run.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="arenas.nlruns">
|
|
||||||
<term>
|
|
||||||
<mallctl>arenas.nlruns</mallctl>
|
|
||||||
(<type>unsigned</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
</term>
|
|
||||||
<listitem><para>Total number of large size classes.</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="arenas.lrun.i.size">
|
|
||||||
<term>
|
|
||||||
<mallctl>arenas.lrun.<i>.size</mallctl>
|
|
||||||
(<type>size_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
</term>
|
|
||||||
<listitem><para>Maximum size supported by this large size
|
|
||||||
class.</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="arenas.nhchunks">
|
<varlistentry id="arenas.nhchunks">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>arenas.nhchunks</mallctl>
|
<mallctl>arenas.nhchunks</mallctl>
|
||||||
@ -2534,50 +2514,6 @@ typedef struct {
|
|||||||
<listitem><para>Current number of runs.</para></listitem>
|
<listitem><para>Current number of runs.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.lruns.j.nmalloc">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.arenas.<i>.lruns.<j>.nmalloc</mallctl>
|
|
||||||
(<type>uint64_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Cumulative number of allocation requests for this size
|
|
||||||
class served directly by the arena.</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.lruns.j.ndalloc">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.arenas.<i>.lruns.<j>.ndalloc</mallctl>
|
|
||||||
(<type>uint64_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Cumulative number of deallocation requests for this
|
|
||||||
size class served directly by the arena.</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.lruns.j.nrequests">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.arenas.<i>.lruns.<j>.nrequests</mallctl>
|
|
||||||
(<type>uint64_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Cumulative number of allocation requests for this size
|
|
||||||
class.</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.lruns.j.curruns">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.arenas.<i>.lruns.<j>.curruns</mallctl>
|
|
||||||
(<type>size_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Current number of runs for this size class.
|
|
||||||
</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.hchunks.j.nmalloc">
|
<varlistentry id="stats.arenas.i.hchunks.j.nmalloc">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>stats.arenas.<i>.hchunks.<j>.nmalloc</mallctl>
|
<mallctl>stats.arenas.<i>.hchunks.<j>.nmalloc</mallctl>
|
||||||
|
@ -294,7 +294,6 @@ struct arena_s {
|
|||||||
|
|
||||||
dss_prec_t dss_prec;
|
dss_prec_t dss_prec;
|
||||||
|
|
||||||
|
|
||||||
/* Extant arena chunks. */
|
/* Extant arena chunks. */
|
||||||
ql_head(extent_t) achunks;
|
ql_head(extent_t) achunks;
|
||||||
|
|
||||||
@ -465,9 +464,6 @@ extern const arena_bin_info_t arena_bin_info[NBINS];
|
|||||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||||
extern size_t map_misc_offset;
|
extern size_t map_misc_offset;
|
||||||
extern size_t arena_maxrun; /* Max run size for arenas. */
|
extern size_t arena_maxrun; /* Max run size for arenas. */
|
||||||
extern size_t large_maxclass; /* Max large size class. */
|
|
||||||
extern unsigned nlclasses; /* Number of large size classes. */
|
|
||||||
extern unsigned nhclasses; /* Number of huge size classes. */
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
typedef size_t (run_quantize_t)(size_t);
|
typedef size_t (run_quantize_t)(size_t);
|
||||||
@ -485,7 +481,8 @@ void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
|
|||||||
bool cache);
|
bool cache);
|
||||||
extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
|
extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
|
||||||
size_t usize, size_t alignment, bool *zero);
|
size_t usize, size_t alignment, bool *zero);
|
||||||
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||||
|
bool locked);
|
||||||
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_t *extent, size_t oldsize);
|
extent_t *extent, size_t oldsize);
|
||||||
void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
||||||
@ -508,33 +505,19 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
|
|||||||
#else
|
#else
|
||||||
void arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info);
|
void arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info);
|
||||||
#endif
|
#endif
|
||||||
void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
|
|
||||||
bool zero);
|
|
||||||
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
||||||
szind_t ind, bool zero);
|
szind_t ind, bool zero);
|
||||||
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool zero, tcache_t *tcache);
|
size_t alignment, bool zero, tcache_t *tcache);
|
||||||
void arena_prof_promoted(tsdn_t *tsdn, const extent_t *extent,
|
void arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||||
const void *ptr, size_t size);
|
size_t usize);
|
||||||
|
void arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
||||||
|
tcache_t *tcache, bool slow_path);
|
||||||
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
|
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
|
||||||
arena_chunk_t *chunk, extent_t *extent, void *ptr,
|
arena_chunk_t *chunk, extent_t *extent, void *ptr,
|
||||||
arena_chunk_map_bits_t *bitselm);
|
arena_chunk_map_bits_t *bitselm);
|
||||||
void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
||||||
extent_t *extent, void *ptr, size_t pageind);
|
extent_t *extent, void *ptr, size_t pageind);
|
||||||
#ifdef JEMALLOC_JET
|
|
||||||
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
|
|
||||||
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
|
|
||||||
#else
|
|
||||||
void arena_dalloc_junk_large(void *ptr, size_t usize);
|
|
||||||
#endif
|
|
||||||
void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
arena_chunk_t *chunk, extent_t *extent, void *ptr);
|
|
||||||
void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
|
||||||
extent_t *extent, void *ptr);
|
|
||||||
#ifdef JEMALLOC_JET
|
|
||||||
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
|
|
||||||
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
|
|
||||||
#endif
|
|
||||||
bool arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
bool arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
||||||
size_t oldsize, size_t size, size_t extra, bool zero);
|
size_t oldsize, size_t size, size_t extra, bool zero);
|
||||||
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
||||||
@ -551,8 +534,7 @@ void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
|||||||
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||||
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
||||||
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
|
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
|
||||||
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
|
malloc_bin_stats_t *bstats, malloc_huge_stats_t *hstats);
|
||||||
malloc_huge_stats_t *hstats);
|
|
||||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||||
@ -639,8 +621,7 @@ void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
|
|||||||
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
||||||
bool zero, tcache_t *tcache, bool slow_path);
|
bool zero, tcache_t *tcache, bool slow_path);
|
||||||
arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
|
arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
|
||||||
size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr,
|
size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
|
||||||
bool demote);
|
|
||||||
void arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
void arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
||||||
tcache_t *tcache, bool slow_path);
|
tcache_t *tcache, bool slow_path);
|
||||||
void arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
void arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||||
@ -1225,7 +1206,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
|||||||
tcache, size, ind, zero, slow_path));
|
tcache, size, ind, zero, slow_path));
|
||||||
}
|
}
|
||||||
if (likely(size <= tcache_maxclass)) {
|
if (likely(size <= tcache_maxclass)) {
|
||||||
return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
return (tcache_alloc_huge(tsdn_tsd(tsdn), arena,
|
||||||
tcache, size, ind, zero, slow_path));
|
tcache, size, ind, zero, slow_path));
|
||||||
}
|
}
|
||||||
/* (size > tcache_maxclass) case falls through. */
|
/* (size > tcache_maxclass) case falls through. */
|
||||||
@ -1244,49 +1225,25 @@ arena_aalloc(tsdn_t *tsdn, const void *ptr)
|
|||||||
|
|
||||||
/* Return the size of the allocation pointed to by ptr. */
|
/* Return the size of the allocation pointed to by ptr. */
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote)
|
arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||||
{
|
{
|
||||||
size_t ret;
|
size_t ret;
|
||||||
size_t pageind;
|
|
||||||
szind_t binind;
|
|
||||||
|
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
if (likely(extent_slab_get(extent))) {
|
if (likely(extent_slab_get(extent))) {
|
||||||
const arena_chunk_t *chunk =
|
const arena_chunk_t *chunk =
|
||||||
(const arena_chunk_t *)extent_base_get(extent);
|
(const arena_chunk_t *)extent_base_get(extent);
|
||||||
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||||
|
szind_t binind;
|
||||||
|
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
||||||
binind = arena_mapbits_binind_get(chunk, pageind);
|
binind = arena_mapbits_binind_get(chunk, pageind);
|
||||||
if (unlikely(binind == BININD_INVALID || (config_prof && !demote
|
/* Small allocation. */
|
||||||
&& arena_mapbits_large_get(chunk, pageind) != 0))) {
|
|
||||||
/*
|
|
||||||
* Large allocation. In the common case (demote), and
|
|
||||||
* as this is an inline function, most callers will only
|
|
||||||
* end up looking at binind to determine that ptr is a
|
|
||||||
* small allocation.
|
|
||||||
*/
|
|
||||||
assert(config_cache_oblivious || ((uintptr_t)ptr &
|
|
||||||
PAGE_MASK) == 0);
|
|
||||||
ret = arena_mapbits_large_size_get(chunk, pageind) -
|
|
||||||
large_pad;
|
|
||||||
assert(ret != 0);
|
|
||||||
assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
|
|
||||||
chunk_npages);
|
|
||||||
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
|
||||||
arena_mapbits_dirty_get(chunk,
|
|
||||||
pageind+((ret+large_pad)>>LG_PAGE)-1));
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Small allocation (possibly promoted to a large
|
|
||||||
* object).
|
|
||||||
*/
|
|
||||||
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
|
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
|
||||||
arena_ptr_small_binind_get(tsdn, ptr,
|
arena_ptr_small_binind_get(tsdn, ptr,
|
||||||
arena_mapbits_get(chunk, pageind)) == binind);
|
arena_mapbits_get(chunk, pageind)) == binind);
|
||||||
ret = index2size(binind);
|
ret = index2size(binind);
|
||||||
}
|
|
||||||
} else
|
} else
|
||||||
ret = huge_salloc(tsdn, extent);
|
ret = huge_salloc(tsdn, extent);
|
||||||
|
|
||||||
@ -1297,50 +1254,41 @@ JEMALLOC_ALWAYS_INLINE void
|
|||||||
arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||||
bool slow_path)
|
bool slow_path)
|
||||||
{
|
{
|
||||||
size_t pageind, mapbits;
|
|
||||||
|
|
||||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
if (likely(extent_slab_get(extent))) {
|
if (likely(extent_slab_get(extent))) {
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
|
||||||
|
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
|
||||||
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
|
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
|
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
||||||
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||||
|
size_t mapbits = arena_mapbits_get(chunk, pageind);
|
||||||
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
||||||
|
assert((mapbits & CHUNK_MAP_LARGE) == 0);
|
||||||
if (likely(tcache != NULL)) {
|
if (likely(tcache != NULL)) {
|
||||||
szind_t binind =
|
szind_t binind = arena_ptr_small_binind_get(tsdn, ptr,
|
||||||
arena_ptr_small_binind_get(tsdn, ptr,
|
|
||||||
mapbits);
|
mapbits);
|
||||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
||||||
binind, slow_path);
|
slow_path);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_small(tsdn,
|
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
||||||
extent_arena_get(extent), chunk, extent,
|
chunk, extent, ptr, pageind);
|
||||||
ptr, pageind);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
size_t size = arena_mapbits_large_size_get(chunk,
|
size_t usize = extent_usize_get(extent);
|
||||||
pageind);
|
|
||||||
|
|
||||||
assert(config_cache_oblivious || ((uintptr_t)ptr &
|
if (likely(tcache != NULL) && usize <= tcache_maxclass) {
|
||||||
PAGE_MASK) == 0);
|
if (config_prof && unlikely(usize <= SMALL_MAXCLASS)) {
|
||||||
|
arena_dalloc_promoted(tsdn, extent, ptr,
|
||||||
if (likely(tcache != NULL) && size - large_pad <=
|
tcache, slow_path);
|
||||||
tcache_maxclass) {
|
|
||||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
|
||||||
size - large_pad, slow_path);
|
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_large(tsdn,
|
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr,
|
||||||
extent_arena_get(extent), chunk, extent,
|
usize, slow_path);
|
||||||
ptr);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
huge_dalloc(tsdn, extent);
|
huge_dalloc(tsdn, extent);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||||
@ -1348,56 +1296,35 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
|||||||
{
|
{
|
||||||
|
|
||||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
if (likely(extent_slab_get(extent))) {
|
if (likely(extent_slab_get(extent))) {
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
|
||||||
|
|
||||||
if (config_prof && opt_prof) {
|
|
||||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
|
|
||||||
LG_PAGE;
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, pageind) !=
|
|
||||||
0);
|
|
||||||
if (arena_mapbits_large_get(chunk, pageind) != 0) {
|
|
||||||
/*
|
|
||||||
* Make sure to use promoted size, not request
|
|
||||||
* size.
|
|
||||||
*/
|
|
||||||
size = arena_mapbits_large_size_get(chunk,
|
|
||||||
pageind) - large_pad;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(s2u(size) == s2u(arena_salloc(tsdn, extent, ptr,
|
|
||||||
false)));
|
|
||||||
|
|
||||||
if (likely(size <= SMALL_MAXCLASS)) {
|
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
if (likely(tcache != NULL)) {
|
if (likely(tcache != NULL)) {
|
||||||
szind_t binind = size2index(size);
|
szind_t binind = size2index(size);
|
||||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
||||||
binind, slow_path);
|
slow_path);
|
||||||
} else {
|
} else {
|
||||||
|
arena_chunk_t *chunk =
|
||||||
|
(arena_chunk_t *)extent_base_get(extent);
|
||||||
size_t pageind = ((uintptr_t)ptr -
|
size_t pageind = ((uintptr_t)ptr -
|
||||||
(uintptr_t)chunk) >> LG_PAGE;
|
(uintptr_t)chunk) >> LG_PAGE;
|
||||||
arena_dalloc_small(tsdn,
|
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
||||||
extent_arena_get(extent), chunk, extent,
|
chunk, extent, ptr, pageind);
|
||||||
ptr, pageind);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(config_cache_oblivious || ((uintptr_t)ptr &
|
|
||||||
PAGE_MASK) == 0);
|
|
||||||
|
|
||||||
if (likely(tcache != NULL) && size <= tcache_maxclass) {
|
if (likely(tcache != NULL) && size <= tcache_maxclass) {
|
||||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
if (config_prof && unlikely(size <= SMALL_MAXCLASS)) {
|
||||||
size, slow_path);
|
arena_dalloc_promoted(tsdn, extent, ptr,
|
||||||
|
tcache, slow_path);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_large(tsdn,
|
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr,
|
||||||
extent_arena_get(extent), chunk, extent,
|
size, slow_path);
|
||||||
ptr);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
huge_dalloc(tsdn, extent);
|
huge_dalloc(tsdn, extent);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -61,7 +61,8 @@ bool chunk_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
|||||||
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, extent_t *extent, size_t offset, size_t length);
|
chunk_hooks_t *chunk_hooks, extent_t *extent, size_t offset, size_t length);
|
||||||
extent_t *chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
extent_t *chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, extent_t *extent, size_t size_a, size_t size_b);
|
chunk_hooks_t *chunk_hooks, extent_t *extent, size_t size_a, size_t usize_a,
|
||||||
|
size_t size_b, size_t usize_b);
|
||||||
bool chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
bool chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, extent_t *a, extent_t *b);
|
chunk_hooks_t *chunk_hooks, extent_t *a, extent_t *b);
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
|
@ -51,8 +51,7 @@ struct ctl_arena_stats_s {
|
|||||||
uint64_t nrequests_small;
|
uint64_t nrequests_small;
|
||||||
|
|
||||||
malloc_bin_stats_t bstats[NBINS];
|
malloc_bin_stats_t bstats[NBINS];
|
||||||
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
malloc_huge_stats_t hstats[NSIZES - NBINS];
|
||||||
malloc_huge_stats_t *hstats; /* nhclasses elements. */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ctl_stats_s {
|
struct ctl_stats_s {
|
||||||
|
@ -15,9 +15,15 @@ struct extent_s {
|
|||||||
/* Pointer to the extent that this structure is responsible for. */
|
/* Pointer to the extent that this structure is responsible for. */
|
||||||
void *e_addr;
|
void *e_addr;
|
||||||
|
|
||||||
/* Total region size. */
|
/* Extent size. */
|
||||||
size_t e_size;
|
size_t e_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Usable size, typically smaller than extent size due to large_pad or
|
||||||
|
* promotion of sampled small regions.
|
||||||
|
*/
|
||||||
|
size_t e_usize;
|
||||||
|
|
||||||
/* True if extent is active (in use). */
|
/* True if extent is active (in use). */
|
||||||
bool e_active;
|
bool e_active;
|
||||||
|
|
||||||
@ -106,6 +112,7 @@ void extent_arena_set(extent_t *extent, arena_t *arena);
|
|||||||
void extent_addr_set(extent_t *extent, void *addr);
|
void extent_addr_set(extent_t *extent, void *addr);
|
||||||
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
||||||
void extent_size_set(extent_t *extent, size_t size);
|
void extent_size_set(extent_t *extent, size_t size);
|
||||||
|
void extent_usize_set(extent_t *extent, size_t usize);
|
||||||
void extent_active_set(extent_t *extent, bool active);
|
void extent_active_set(extent_t *extent, bool active);
|
||||||
void extent_dirty_set(extent_t *extent, bool dirty);
|
void extent_dirty_set(extent_t *extent, bool dirty);
|
||||||
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
||||||
@ -113,8 +120,8 @@ void extent_committed_set(extent_t *extent, bool committed);
|
|||||||
void extent_slab_set(extent_t *extent, bool slab);
|
void extent_slab_set(extent_t *extent, bool slab);
|
||||||
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
||||||
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
||||||
size_t size, bool active, bool dirty, bool zeroed, bool committed,
|
size_t size, size_t usize, bool active, bool dirty, bool zeroed,
|
||||||
bool slab);
|
bool committed, bool slab);
|
||||||
void extent_dirty_insert(extent_t *extent,
|
void extent_dirty_insert(extent_t *extent,
|
||||||
arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty);
|
arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty);
|
||||||
void extent_dirty_remove(extent_t *extent);
|
void extent_dirty_remove(extent_t *extent);
|
||||||
@ -158,7 +165,7 @@ extent_usize_get(const extent_t *extent)
|
|||||||
{
|
{
|
||||||
|
|
||||||
assert(!extent->e_slab);
|
assert(!extent->e_slab);
|
||||||
return (extent->e_size - large_pad);
|
return (extent->e_usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
JEMALLOC_INLINE void *
|
||||||
@ -172,14 +179,15 @@ JEMALLOC_INLINE void *
|
|||||||
extent_last_get(const extent_t *extent)
|
extent_last_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
return ((void *)(uintptr_t)extent->e_addr + extent->e_size - PAGE);
|
return ((void *)(uintptr_t)extent->e_addr + extent_size_get(extent) -
|
||||||
|
PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
JEMALLOC_INLINE void *
|
||||||
extent_past_get(const extent_t *extent)
|
extent_past_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
return ((void *)(uintptr_t)extent->e_addr + extent->e_size);
|
return ((void *)(uintptr_t)extent->e_addr + extent_size_get(extent));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
@ -258,9 +266,12 @@ extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
|
|||||||
uint64_t r =
|
uint64_t r =
|
||||||
prng_lg_range(&extent_arena_get(extent)->offset_state,
|
prng_lg_range(&extent_arena_get(extent)->offset_state,
|
||||||
lg_range, true);
|
lg_range, true);
|
||||||
uintptr_t random_offset = ((uintptr_t)r) << lg_range;
|
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
||||||
|
lg_range);
|
||||||
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
|
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
|
||||||
random_offset);
|
random_offset);
|
||||||
|
assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
|
||||||
|
extent->e_addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -271,6 +282,13 @@ extent_size_set(extent_t *extent, size_t size)
|
|||||||
extent->e_size = size;
|
extent->e_size = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
extent_usize_set(extent_t *extent, size_t usize)
|
||||||
|
{
|
||||||
|
|
||||||
|
extent->e_usize = usize;
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_active_set(extent_t *extent, bool active)
|
extent_active_set(extent_t *extent, bool active)
|
||||||
{
|
{
|
||||||
@ -315,7 +333,8 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
|
|||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||||
bool active, bool dirty, bool zeroed, bool committed, bool slab)
|
size_t usize, bool active, bool dirty, bool zeroed, bool committed,
|
||||||
|
bool slab)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||||
@ -323,6 +342,7 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
|||||||
extent_arena_set(extent, arena);
|
extent_arena_set(extent, arena);
|
||||||
extent_addr_set(extent, addr);
|
extent_addr_set(extent, addr);
|
||||||
extent_size_set(extent, size);
|
extent_size_set(extent, size);
|
||||||
|
extent_usize_set(extent, usize);
|
||||||
extent_active_set(extent, active);
|
extent_active_set(extent, active);
|
||||||
extent_dirty_set(extent, dirty);
|
extent_dirty_set(extent, dirty);
|
||||||
extent_zeroed_set(extent, zeroed);
|
extent_zeroed_set(extent, zeroed);
|
||||||
|
@ -17,9 +17,12 @@ bool huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
|||||||
void *huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
void *huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||||
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
|
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t);
|
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||||
|
#else
|
||||||
|
void huge_dalloc_junk(void *ptr, size_t usize);
|
||||||
#endif
|
#endif
|
||||||
|
void huge_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
||||||
void huge_dalloc(tsdn_t *tsdn, extent_t *extent);
|
void huge_dalloc(tsdn_t *tsdn, extent_t *extent);
|
||||||
size_t huge_salloc(tsdn_t *tsdn, const extent_t *extent);
|
size_t huge_salloc(tsdn_t *tsdn, const extent_t *extent);
|
||||||
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
||||||
|
@ -797,33 +797,14 @@ sa2u(size_t size, size_t alignment)
|
|||||||
return (usize);
|
return (usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We can't achieve subpage alignment, so round up alignment to the
|
|
||||||
* minimum that can actually be supported.
|
|
||||||
*/
|
|
||||||
alignment = PAGE_CEILING(alignment);
|
|
||||||
|
|
||||||
/* Try for a large size class. */
|
|
||||||
if (likely(size <= large_maxclass) && likely(alignment == PAGE)) {
|
|
||||||
/* Make sure result is a large size class. */
|
|
||||||
usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Calculate the size of the over-size run that arena_palloc()
|
|
||||||
* would need to allocate in order to guarantee the alignment.
|
|
||||||
*/
|
|
||||||
if (usize + large_pad + alignment <= arena_maxrun)
|
|
||||||
return (usize);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Huge size class. Beware of overflow. */
|
/* Huge size class. Beware of overflow. */
|
||||||
|
|
||||||
if (unlikely(alignment > HUGE_MAXCLASS))
|
if (unlikely(alignment > HUGE_MAXCLASS))
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
/* Make sure result is a huge size class. */
|
/* Make sure result is a large size class. */
|
||||||
if (size <= chunksize)
|
if (size <= LARGE_MINCLASS)
|
||||||
usize = chunksize;
|
usize = LARGE_MINCLASS;
|
||||||
else {
|
else {
|
||||||
usize = s2u(size);
|
usize = s2u(size);
|
||||||
if (usize < size) {
|
if (usize < size) {
|
||||||
@ -836,7 +817,7 @@ sa2u(size_t size, size_t alignment)
|
|||||||
* Calculate the multi-page mapping that huge_palloc() would need in
|
* Calculate the multi-page mapping that huge_palloc() would need in
|
||||||
* order to guarantee the alignment.
|
* order to guarantee the alignment.
|
||||||
*/
|
*/
|
||||||
if (usize + alignment < usize) {
|
if (usize + large_pad + PAGE_CEILING(alignment) < usize) {
|
||||||
/* size_t overflow. */
|
/* size_t overflow. */
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
@ -960,8 +941,7 @@ iealloc(tsdn_t *tsdn, const void *ptr)
|
|||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
arena_t *iaalloc(tsdn_t *tsdn, const void *ptr);
|
arena_t *iaalloc(tsdn_t *tsdn, const void *ptr);
|
||||||
size_t isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr,
|
size_t isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
|
||||||
bool demote);
|
|
||||||
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
|
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
|
||||||
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
|
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
|
||||||
void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
|
void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
|
||||||
@ -971,7 +951,7 @@ void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
|||||||
void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||||
tcache_t *tcache, arena_t *arena);
|
tcache_t *tcache, arena_t *arena);
|
||||||
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
|
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
|
||||||
size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
|
size_t ivsalloc(tsdn_t *tsdn, const void *ptr);
|
||||||
void idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
void idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||||
bool is_metadata, bool slow_path);
|
bool is_metadata, bool slow_path);
|
||||||
void idalloc(tsd_t *tsd, extent_t *extent, void *ptr);
|
void idalloc(tsd_t *tsd, extent_t *extent, void *ptr);
|
||||||
@ -1003,17 +983,15 @@ iaalloc(tsdn_t *tsdn, const void *ptr)
|
|||||||
* tsdn_t *tsdn = [...]
|
* tsdn_t *tsdn = [...]
|
||||||
* void *ptr = [...]
|
* void *ptr = [...]
|
||||||
* extent_t *extent = iealloc(tsdn, ptr);
|
* extent_t *extent = iealloc(tsdn, ptr);
|
||||||
* size_t sz = isalloc(tsdn, extent, ptr, config_prof);
|
* size_t sz = isalloc(tsdn, extent, ptr);
|
||||||
*/
|
*/
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote)
|
isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
/* Demotion only makes sense if config_prof is true. */
|
|
||||||
assert(config_prof || !demote);
|
|
||||||
|
|
||||||
return (arena_salloc(tsdn, extent, ptr, demote));
|
return (arena_salloc(tsdn, extent, ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
@ -1029,7 +1007,7 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
|||||||
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
||||||
if (config_stats && is_metadata && likely(ret != NULL)) {
|
if (config_stats && is_metadata && likely(ret != NULL)) {
|
||||||
arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn,
|
arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn,
|
||||||
iealloc(tsdn, ret), ret, config_prof));
|
iealloc(tsdn, ret), ret));
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -1057,7 +1035,7 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
|||||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||||
if (config_stats && is_metadata && likely(ret != NULL)) {
|
if (config_stats && is_metadata && likely(ret != NULL)) {
|
||||||
arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn,
|
arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn,
|
||||||
iealloc(tsdn, ret), ret, config_prof));
|
iealloc(tsdn, ret), ret));
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -1079,7 +1057,7 @@ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
|
ivsalloc(tsdn_t *tsdn, const void *ptr)
|
||||||
{
|
{
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
|
|
||||||
@ -1091,7 +1069,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
|
|||||||
/* Only arena chunks should be looked up via interior pointers. */
|
/* Only arena chunks should be looked up via interior pointers. */
|
||||||
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
||||||
|
|
||||||
return (isalloc(tsdn, extent, ptr, demote));
|
return (isalloc(tsdn, extent, ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
@ -1104,7 +1082,7 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
|||||||
assert(!is_metadata || iaalloc(tsdn, ptr)->ind < narenas_auto);
|
assert(!is_metadata || iaalloc(tsdn, ptr)->ind < narenas_auto);
|
||||||
if (config_stats && is_metadata) {
|
if (config_stats && is_metadata) {
|
||||||
arena_metadata_allocated_sub(iaalloc(tsdn, ptr), isalloc(tsdn,
|
arena_metadata_allocated_sub(iaalloc(tsdn, ptr), isalloc(tsdn,
|
||||||
extent, ptr, config_prof));
|
extent, ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_dalloc(tsdn, extent, ptr, tcache, slow_path);
|
arena_dalloc(tsdn, extent, ptr, tcache, slow_path);
|
||||||
|
@ -23,10 +23,8 @@ arena_cleanup
|
|||||||
arena_dalloc
|
arena_dalloc
|
||||||
arena_dalloc_bin
|
arena_dalloc_bin
|
||||||
arena_dalloc_bin_junked_locked
|
arena_dalloc_bin_junked_locked
|
||||||
arena_dalloc_junk_large
|
|
||||||
arena_dalloc_junk_small
|
arena_dalloc_junk_small
|
||||||
arena_dalloc_large
|
arena_dalloc_promoted
|
||||||
arena_dalloc_large_junked_locked
|
|
||||||
arena_dalloc_small
|
arena_dalloc_small
|
||||||
arena_decay_tick
|
arena_decay_tick
|
||||||
arena_decay_ticks
|
arena_decay_ticks
|
||||||
@ -45,7 +43,6 @@ arena_lg_dirty_mult_get
|
|||||||
arena_lg_dirty_mult_set
|
arena_lg_dirty_mult_set
|
||||||
arena_malloc
|
arena_malloc
|
||||||
arena_malloc_hard
|
arena_malloc_hard
|
||||||
arena_malloc_large
|
|
||||||
arena_mapbits_allocated_get
|
arena_mapbits_allocated_get
|
||||||
arena_mapbits_binind_get
|
arena_mapbits_binind_get
|
||||||
arena_mapbits_decommitted_get
|
arena_mapbits_decommitted_get
|
||||||
@ -92,7 +89,7 @@ arena_prefork3
|
|||||||
arena_prof_accum
|
arena_prof_accum
|
||||||
arena_prof_accum_impl
|
arena_prof_accum_impl
|
||||||
arena_prof_accum_locked
|
arena_prof_accum_locked
|
||||||
arena_prof_promoted
|
arena_prof_promote
|
||||||
arena_prof_tctx_get
|
arena_prof_tctx_get
|
||||||
arena_prof_tctx_reset
|
arena_prof_tctx_reset
|
||||||
arena_prof_tctx_set
|
arena_prof_tctx_set
|
||||||
@ -254,6 +251,7 @@ hash_x86_128
|
|||||||
hash_x86_32
|
hash_x86_32
|
||||||
huge_dalloc
|
huge_dalloc
|
||||||
huge_dalloc_junk
|
huge_dalloc_junk
|
||||||
|
huge_dalloc_junked_locked
|
||||||
huge_malloc
|
huge_malloc
|
||||||
huge_palloc
|
huge_palloc
|
||||||
huge_prof_tctx_get
|
huge_prof_tctx_get
|
||||||
@ -287,7 +285,6 @@ ixalloc
|
|||||||
jemalloc_postfork_child
|
jemalloc_postfork_child
|
||||||
jemalloc_postfork_parent
|
jemalloc_postfork_parent
|
||||||
jemalloc_prefork
|
jemalloc_prefork
|
||||||
large_maxclass
|
|
||||||
lg_floor
|
lg_floor
|
||||||
lg_prof_sample
|
lg_prof_sample
|
||||||
malloc_cprintf
|
malloc_cprintf
|
||||||
@ -320,8 +317,6 @@ narenas_tdata_cleanup
|
|||||||
narenas_total_get
|
narenas_total_get
|
||||||
ncpus
|
ncpus
|
||||||
nhbins
|
nhbins
|
||||||
nhclasses
|
|
||||||
nlclasses
|
|
||||||
nstime_add
|
nstime_add
|
||||||
nstime_compare
|
nstime_compare
|
||||||
nstime_copy
|
nstime_copy
|
||||||
|
@ -489,7 +489,7 @@ prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
|
|||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(usize == isalloc(tsdn, extent, ptr, true));
|
assert(usize == isalloc(tsdn, extent, ptr));
|
||||||
|
|
||||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||||
prof_malloc_sample_object(tsdn, extent, ptr, usize, tctx);
|
prof_malloc_sample_object(tsdn, extent, ptr, usize, tctx);
|
||||||
@ -510,7 +510,7 @@ prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
|
|||||||
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
||||||
|
|
||||||
if (prof_active && !updated && ptr != NULL) {
|
if (prof_active && !updated && ptr != NULL) {
|
||||||
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, true));
|
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
|
||||||
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
||||||
/*
|
/*
|
||||||
* Don't sample. The usize passed to prof_alloc_prep()
|
* Don't sample. The usize passed to prof_alloc_prep()
|
||||||
@ -544,7 +544,7 @@ prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr, size_t usize)
|
|||||||
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), extent, ptr);
|
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), extent, ptr);
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, true));
|
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
|
||||||
|
|
||||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||||
prof_free_sampled_object(tsd, usize, tctx);
|
prof_free_sampled_object(tsd, usize, tctx);
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
|
|
||||||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
||||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
||||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
|
||||||
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
|
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
|
||||||
typedef struct arena_stats_s arena_stats_t;
|
typedef struct arena_stats_s arena_stats_t;
|
||||||
typedef struct chunk_stats_s chunk_stats_t;
|
typedef struct chunk_stats_s chunk_stats_t;
|
||||||
@ -62,12 +61,10 @@ struct malloc_bin_stats_s {
|
|||||||
size_t curruns;
|
size_t curruns;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct malloc_large_stats_s {
|
struct malloc_huge_stats_s {
|
||||||
/*
|
/*
|
||||||
* Total number of allocation/deallocation requests served directly by
|
* Total number of allocation/deallocation requests served directly by
|
||||||
* the arena. Note that tcache may allocate an object, then recycle it
|
* the arena.
|
||||||
* many times, resulting many increments to nrequests, but only one
|
|
||||||
* each to nmalloc and ndalloc.
|
|
||||||
*/
|
*/
|
||||||
uint64_t nmalloc;
|
uint64_t nmalloc;
|
||||||
uint64_t ndalloc;
|
uint64_t ndalloc;
|
||||||
@ -79,21 +76,6 @@ struct malloc_large_stats_s {
|
|||||||
*/
|
*/
|
||||||
uint64_t nrequests;
|
uint64_t nrequests;
|
||||||
|
|
||||||
/*
|
|
||||||
* Current number of runs of this size class, including runs currently
|
|
||||||
* cached by tcache.
|
|
||||||
*/
|
|
||||||
size_t curruns;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct malloc_huge_stats_s {
|
|
||||||
/*
|
|
||||||
* Total number of allocation/deallocation requests served directly by
|
|
||||||
* the arena.
|
|
||||||
*/
|
|
||||||
uint64_t nmalloc;
|
|
||||||
uint64_t ndalloc;
|
|
||||||
|
|
||||||
/* Current number of (multi-)chunk allocations of this size class. */
|
/* Current number of (multi-)chunk allocations of this size class. */
|
||||||
size_t curhchunks;
|
size_t curhchunks;
|
||||||
};
|
};
|
||||||
@ -126,21 +108,13 @@ struct arena_stats_s {
|
|||||||
size_t metadata_mapped;
|
size_t metadata_mapped;
|
||||||
size_t metadata_allocated; /* Protected via atomic_*_z(). */
|
size_t metadata_allocated; /* Protected via atomic_*_z(). */
|
||||||
|
|
||||||
/* Per-size-category statistics. */
|
|
||||||
size_t allocated_large;
|
|
||||||
uint64_t nmalloc_large;
|
|
||||||
uint64_t ndalloc_large;
|
|
||||||
uint64_t nrequests_large;
|
|
||||||
|
|
||||||
size_t allocated_huge;
|
size_t allocated_huge;
|
||||||
uint64_t nmalloc_huge;
|
uint64_t nmalloc_huge;
|
||||||
uint64_t ndalloc_huge;
|
uint64_t ndalloc_huge;
|
||||||
|
uint64_t nrequests_huge;
|
||||||
/* One element for each large size class. */
|
|
||||||
malloc_large_stats_t *lstats;
|
|
||||||
|
|
||||||
/* One element for each huge size class. */
|
/* One element for each huge size class. */
|
||||||
malloc_huge_stats_t *hstats;
|
malloc_huge_stats_t hstats[NSIZES - NBINS];
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
|
@ -30,8 +30,8 @@ typedef struct tcaches_s tcaches_t;
|
|||||||
*/
|
*/
|
||||||
#define TCACHE_NSLOTS_SMALL_MAX 200
|
#define TCACHE_NSLOTS_SMALL_MAX 200
|
||||||
|
|
||||||
/* Number of cache slots for large size classes. */
|
/* Number of cache slots for huge size classes. */
|
||||||
#define TCACHE_NSLOTS_LARGE 20
|
#define TCACHE_NSLOTS_HUGE 20
|
||||||
|
|
||||||
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
|
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
|
||||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||||
@ -113,7 +113,7 @@ extern tcache_bin_info_t *tcache_bin_info;
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
||||||
* large-object bins.
|
* huge-object bins.
|
||||||
*/
|
*/
|
||||||
extern unsigned nhbins;
|
extern unsigned nhbins;
|
||||||
|
|
||||||
@ -136,7 +136,7 @@ void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
|||||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
||||||
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||||
szind_t binind, unsigned rem);
|
szind_t binind, unsigned rem);
|
||||||
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
void tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||||
unsigned rem, tcache_t *tcache);
|
unsigned rem, tcache_t *tcache);
|
||||||
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
|
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
|
||||||
arena_t *oldarena, arena_t *newarena);
|
arena_t *oldarena, arena_t *newarena);
|
||||||
@ -163,11 +163,11 @@ void tcache_enabled_set(bool enabled);
|
|||||||
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
|
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
|
||||||
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||||
size_t size, szind_t ind, bool zero, bool slow_path);
|
size_t size, szind_t ind, bool zero, bool slow_path);
|
||||||
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
void *tcache_alloc_huge(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||||
size_t size, szind_t ind, bool zero, bool slow_path);
|
size_t size, szind_t ind, bool zero, bool slow_path);
|
||||||
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||||
szind_t binind, bool slow_path);
|
szind_t binind, bool slow_path);
|
||||||
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
void tcache_dalloc_huge(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||||
size_t size, bool slow_path);
|
size_t size, bool slow_path);
|
||||||
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
||||||
#endif
|
#endif
|
||||||
@ -336,7 +336,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
tcache_alloc_huge(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||||
szind_t binind, bool zero, bool slow_path)
|
szind_t binind, bool zero, bool slow_path)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
@ -349,14 +349,14 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|||||||
assert(tcache_success == (ret != NULL));
|
assert(tcache_success == (ret != NULL));
|
||||||
if (unlikely(!tcache_success)) {
|
if (unlikely(!tcache_success)) {
|
||||||
/*
|
/*
|
||||||
* Only allocate one large object at a time, because it's quite
|
* Only allocate one huge object at a time, because it's quite
|
||||||
* expensive to create one and not use it.
|
* expensive to create one and not use it.
|
||||||
*/
|
*/
|
||||||
arena = arena_choose(tsd, arena);
|
arena = arena_choose(tsd, arena);
|
||||||
if (unlikely(arena == NULL))
|
if (unlikely(arena == NULL))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
|
ret = huge_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
} else {
|
} else {
|
||||||
@ -369,14 +369,6 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|||||||
assert(usize <= tcache_maxclass);
|
assert(usize <= tcache_maxclass);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_prof && usize == LARGE_MINCLASS) {
|
|
||||||
arena_chunk_t *chunk =(arena_chunk_t *)extent_addr_get(
|
|
||||||
iealloc(tsd_tsdn(tsd), ret));
|
|
||||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
|
||||||
LG_PAGE);
|
|
||||||
arena_mapbits_large_binind_set(chunk, pageind,
|
|
||||||
BININD_INVALID);
|
|
||||||
}
|
|
||||||
if (likely(!zero)) {
|
if (likely(!zero)) {
|
||||||
if (slow_path && config_fill) {
|
if (slow_path && config_fill) {
|
||||||
if (unlikely(opt_junk_alloc)) {
|
if (unlikely(opt_junk_alloc)) {
|
||||||
@ -424,26 +416,25 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
tcache_dalloc_huge(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
||||||
bool slow_path)
|
bool slow_path)
|
||||||
{
|
{
|
||||||
szind_t binind;
|
szind_t binind;
|
||||||
tcache_bin_t *tbin;
|
tcache_bin_t *tbin;
|
||||||
tcache_bin_info_t *tbin_info;
|
tcache_bin_info_t *tbin_info;
|
||||||
|
|
||||||
assert((size & PAGE_MASK) == 0);
|
|
||||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
|
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
|
||||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
||||||
|
|
||||||
binind = size2index(size);
|
binind = size2index(size);
|
||||||
|
|
||||||
if (slow_path && config_fill && unlikely(opt_junk_free))
|
if (slow_path && config_fill && unlikely(opt_junk_free))
|
||||||
arena_dalloc_junk_large(ptr, size);
|
huge_dalloc_junk(ptr, size);
|
||||||
|
|
||||||
tbin = &tcache->tbins[binind];
|
tbin = &tcache->tbins[binind];
|
||||||
tbin_info = &tcache_bin_info[binind];
|
tbin_info = &tcache_bin_info[binind];
|
||||||
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
||||||
tcache_bin_flush_large(tsd, tbin, binind,
|
tcache_bin_flush_huge(tsd, tbin, binind,
|
||||||
(tbin_info->ncached_max >> 1), tcache);
|
(tbin_info->ncached_max >> 1), tcache);
|
||||||
}
|
}
|
||||||
assert(tbin->ncached < tbin_info->ncached_max);
|
assert(tbin->ncached < tbin_info->ncached_max);
|
||||||
|
856
src/arena.c
856
src/arena.c
File diff suppressed because it is too large
Load Diff
@ -74,7 +74,8 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
|
|||||||
base_resident += PAGE_CEILING(nsize);
|
base_resident += PAGE_CEILING(nsize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extent_init(extent, NULL, addr, csize, true, false, true, true, false);
|
extent_init(extent, NULL, addr, csize, 0, true, false, true, true,
|
||||||
|
false);
|
||||||
return (extent);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
27
src/chunk.c
27
src/chunk.c
@ -369,7 +369,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
if (leadsize != 0) {
|
if (leadsize != 0) {
|
||||||
extent_t *lead = extent;
|
extent_t *lead = extent;
|
||||||
extent = chunk_split_wrapper(tsdn, arena, chunk_hooks, lead,
|
extent = chunk_split_wrapper(tsdn, arena, chunk_hooks, lead,
|
||||||
leadsize, size + trailsize);
|
leadsize, leadsize, size + trailsize, usize + trailsize);
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
chunk_leak(tsdn, arena, chunk_hooks, cache, lead);
|
chunk_leak(tsdn, arena, chunk_hooks, cache, lead);
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
@ -382,7 +382,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
/* Split the trail. */
|
/* Split the trail. */
|
||||||
if (trailsize != 0) {
|
if (trailsize != 0) {
|
||||||
extent_t *trail = chunk_split_wrapper(tsdn, arena, chunk_hooks,
|
extent_t *trail = chunk_split_wrapper(tsdn, arena, chunk_hooks,
|
||||||
extent, size, trailsize);
|
extent, size, usize, trailsize, trailsize);
|
||||||
if (trail == NULL) {
|
if (trail == NULL) {
|
||||||
chunk_leak(tsdn, arena, chunk_hooks, cache, extent);
|
chunk_leak(tsdn, arena, chunk_hooks, cache, extent);
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
@ -390,6 +390,12 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
}
|
}
|
||||||
extent_heaps_insert(extent_heaps, trail);
|
extent_heaps_insert(extent_heaps, trail);
|
||||||
arena_chunk_cache_maybe_insert(arena, trail, cache);
|
arena_chunk_cache_maybe_insert(arena, trail, cache);
|
||||||
|
} else if (leadsize == 0) {
|
||||||
|
/*
|
||||||
|
* Splitting causes usize to be set as a side effect, but no
|
||||||
|
* splitting occurred.
|
||||||
|
*/
|
||||||
|
extent_usize_set(extent, usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!extent_committed_get(extent) &&
|
if (!extent_committed_get(extent) &&
|
||||||
@ -552,7 +558,8 @@ chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
extent_dalloc(tsdn, arena, extent);
|
extent_dalloc(tsdn, arena, extent);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
extent_init(extent, arena, addr, size, true, false, zero, commit, slab);
|
extent_init(extent, arena, addr, size, usize, true, false, zero, commit,
|
||||||
|
slab);
|
||||||
if (pad != 0)
|
if (pad != 0)
|
||||||
extent_addr_randomize(tsdn, extent, alignment);
|
extent_addr_randomize(tsdn, extent, alignment);
|
||||||
if (chunk_register(tsdn, extent)) {
|
if (chunk_register(tsdn, extent)) {
|
||||||
@ -635,6 +642,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||||
|
|
||||||
|
extent_usize_set(extent, 0);
|
||||||
extent_active_set(extent, false);
|
extent_active_set(extent, false);
|
||||||
extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
|
extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
|
||||||
if (extent_slab_get(extent)) {
|
if (extent_slab_get(extent)) {
|
||||||
@ -801,7 +809,8 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
|
|||||||
|
|
||||||
extent_t *
|
extent_t *
|
||||||
chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_t *extent, size_t size_a, size_t size_b)
|
extent_t *extent, size_t size_a, size_t usize_a, size_t size_b,
|
||||||
|
size_t usize_b)
|
||||||
{
|
{
|
||||||
extent_t *trail;
|
extent_t *trail;
|
||||||
rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
|
rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
|
||||||
@ -818,9 +827,9 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
extent_t lead;
|
extent_t lead;
|
||||||
|
|
||||||
extent_init(&lead, arena, extent_addr_get(extent), size_a,
|
extent_init(&lead, arena, extent_addr_get(extent), size_a,
|
||||||
extent_active_get(extent), extent_dirty_get(extent),
|
usize_a, extent_active_get(extent),
|
||||||
extent_zeroed_get(extent), extent_committed_get(extent),
|
extent_dirty_get(extent), extent_zeroed_get(extent),
|
||||||
extent_slab_get(extent));
|
extent_committed_get(extent), extent_slab_get(extent));
|
||||||
|
|
||||||
if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
|
if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
|
||||||
&lead_elm_b))
|
&lead_elm_b))
|
||||||
@ -828,7 +837,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
}
|
}
|
||||||
|
|
||||||
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
||||||
size_a), size_b, extent_active_get(extent),
|
size_a), size_b, usize_b, extent_active_get(extent),
|
||||||
extent_dirty_get(extent), extent_zeroed_get(extent),
|
extent_dirty_get(extent), extent_zeroed_get(extent),
|
||||||
extent_committed_get(extent), extent_slab_get(extent));
|
extent_committed_get(extent), extent_slab_get(extent));
|
||||||
if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
|
if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
|
||||||
@ -840,6 +849,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
goto label_error_d;
|
goto label_error_d;
|
||||||
|
|
||||||
extent_size_set(extent, size_a);
|
extent_size_set(extent, size_a);
|
||||||
|
extent_usize_set(extent, usize_a);
|
||||||
|
|
||||||
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
|
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
|
||||||
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
|
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
|
||||||
@ -905,6 +915,7 @@ chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
b_elm_b = b_elm_a;
|
b_elm_b = b_elm_a;
|
||||||
|
|
||||||
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
|
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
|
||||||
|
extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b));
|
||||||
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
|
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
|
||||||
|
|
||||||
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
|
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
|
||||||
|
@ -121,7 +121,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
pad_size = (uintptr_t)ret - (uintptr_t)pad_addr;
|
pad_size = (uintptr_t)ret - (uintptr_t)pad_addr;
|
||||||
if (pad_size != 0) {
|
if (pad_size != 0) {
|
||||||
extent_init(pad, arena, pad_addr, pad_size,
|
extent_init(pad, arena, pad_addr, pad_size,
|
||||||
false, true, false, true, false);
|
pad_size, false, true, false, true, false);
|
||||||
}
|
}
|
||||||
dss_next = (void *)((uintptr_t)ret + size);
|
dss_next = (void *)((uintptr_t)ret + size);
|
||||||
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
||||||
|
175
src/ctl.c
175
src/ctl.c
@ -49,7 +49,6 @@ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
|
|||||||
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
|
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
|
||||||
const size_t *mib, size_t miblen, size_t i);
|
const size_t *mib, size_t miblen, size_t i);
|
||||||
|
|
||||||
static bool ctl_arena_init(ctl_arena_stats_t *astats);
|
|
||||||
static void ctl_arena_clear(ctl_arena_stats_t *astats);
|
static void ctl_arena_clear(ctl_arena_stats_t *astats);
|
||||||
static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
|
static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
|
||||||
arena_t *arena);
|
arena_t *arena);
|
||||||
@ -127,8 +126,6 @@ CTL_PROTO(arenas_bin_i_size)
|
|||||||
CTL_PROTO(arenas_bin_i_nregs)
|
CTL_PROTO(arenas_bin_i_nregs)
|
||||||
CTL_PROTO(arenas_bin_i_run_size)
|
CTL_PROTO(arenas_bin_i_run_size)
|
||||||
INDEX_PROTO(arenas_bin_i)
|
INDEX_PROTO(arenas_bin_i)
|
||||||
CTL_PROTO(arenas_lrun_i_size)
|
|
||||||
INDEX_PROTO(arenas_lrun_i)
|
|
||||||
CTL_PROTO(arenas_hchunk_i_size)
|
CTL_PROTO(arenas_hchunk_i_size)
|
||||||
INDEX_PROTO(arenas_hchunk_i)
|
INDEX_PROTO(arenas_hchunk_i)
|
||||||
CTL_PROTO(arenas_narenas)
|
CTL_PROTO(arenas_narenas)
|
||||||
@ -140,7 +137,6 @@ CTL_PROTO(arenas_page)
|
|||||||
CTL_PROTO(arenas_tcache_max)
|
CTL_PROTO(arenas_tcache_max)
|
||||||
CTL_PROTO(arenas_nbins)
|
CTL_PROTO(arenas_nbins)
|
||||||
CTL_PROTO(arenas_nhbins)
|
CTL_PROTO(arenas_nhbins)
|
||||||
CTL_PROTO(arenas_nlruns)
|
|
||||||
CTL_PROTO(arenas_nhchunks)
|
CTL_PROTO(arenas_nhchunks)
|
||||||
CTL_PROTO(arenas_extend)
|
CTL_PROTO(arenas_extend)
|
||||||
CTL_PROTO(prof_thread_active_init)
|
CTL_PROTO(prof_thread_active_init)
|
||||||
@ -154,10 +150,6 @@ CTL_PROTO(stats_arenas_i_small_allocated)
|
|||||||
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
||||||
CTL_PROTO(stats_arenas_i_small_nrequests)
|
CTL_PROTO(stats_arenas_i_small_nrequests)
|
||||||
CTL_PROTO(stats_arenas_i_large_allocated)
|
|
||||||
CTL_PROTO(stats_arenas_i_large_nmalloc)
|
|
||||||
CTL_PROTO(stats_arenas_i_large_ndalloc)
|
|
||||||
CTL_PROTO(stats_arenas_i_large_nrequests)
|
|
||||||
CTL_PROTO(stats_arenas_i_huge_allocated)
|
CTL_PROTO(stats_arenas_i_huge_allocated)
|
||||||
CTL_PROTO(stats_arenas_i_huge_nmalloc)
|
CTL_PROTO(stats_arenas_i_huge_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_huge_ndalloc)
|
CTL_PROTO(stats_arenas_i_huge_ndalloc)
|
||||||
@ -172,11 +164,6 @@ CTL_PROTO(stats_arenas_i_bins_j_nruns)
|
|||||||
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
|
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_curruns)
|
CTL_PROTO(stats_arenas_i_bins_j_curruns)
|
||||||
INDEX_PROTO(stats_arenas_i_bins_j)
|
INDEX_PROTO(stats_arenas_i_bins_j)
|
||||||
CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
|
|
||||||
CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
|
|
||||||
CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
|
|
||||||
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
|
|
||||||
INDEX_PROTO(stats_arenas_i_lruns_j)
|
|
||||||
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
|
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
|
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
|
||||||
CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
|
CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
|
||||||
@ -323,17 +310,6 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
|
|||||||
{INDEX(arenas_bin_i)}
|
{INDEX(arenas_bin_i)}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t arenas_lrun_i_node[] = {
|
|
||||||
{NAME("size"), CTL(arenas_lrun_i_size)}
|
|
||||||
};
|
|
||||||
static const ctl_named_node_t super_arenas_lrun_i_node[] = {
|
|
||||||
{NAME(""), CHILD(named, arenas_lrun_i)}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const ctl_indexed_node_t arenas_lrun_node[] = {
|
|
||||||
{INDEX(arenas_lrun_i)}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const ctl_named_node_t arenas_hchunk_i_node[] = {
|
static const ctl_named_node_t arenas_hchunk_i_node[] = {
|
||||||
{NAME("size"), CTL(arenas_hchunk_i_size)}
|
{NAME("size"), CTL(arenas_hchunk_i_size)}
|
||||||
};
|
};
|
||||||
@ -356,8 +332,6 @@ static const ctl_named_node_t arenas_node[] = {
|
|||||||
{NAME("nbins"), CTL(arenas_nbins)},
|
{NAME("nbins"), CTL(arenas_nbins)},
|
||||||
{NAME("nhbins"), CTL(arenas_nhbins)},
|
{NAME("nhbins"), CTL(arenas_nhbins)},
|
||||||
{NAME("bin"), CHILD(indexed, arenas_bin)},
|
{NAME("bin"), CHILD(indexed, arenas_bin)},
|
||||||
{NAME("nlruns"), CTL(arenas_nlruns)},
|
|
||||||
{NAME("lrun"), CHILD(indexed, arenas_lrun)},
|
|
||||||
{NAME("nhchunks"), CTL(arenas_nhchunks)},
|
{NAME("nhchunks"), CTL(arenas_nhchunks)},
|
||||||
{NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
|
{NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
|
||||||
{NAME("extend"), CTL(arenas_extend)}
|
{NAME("extend"), CTL(arenas_extend)}
|
||||||
@ -385,13 +359,6 @@ static const ctl_named_node_t stats_arenas_i_small_node[] = {
|
|||||||
{NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
|
{NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t stats_arenas_i_large_node[] = {
|
|
||||||
{NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
|
|
||||||
{NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
|
|
||||||
{NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
|
|
||||||
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const ctl_named_node_t stats_arenas_i_huge_node[] = {
|
static const ctl_named_node_t stats_arenas_i_huge_node[] = {
|
||||||
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
|
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
|
||||||
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
|
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
|
||||||
@ -418,20 +385,6 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
|
|||||||
{INDEX(stats_arenas_i_bins_j)}
|
{INDEX(stats_arenas_i_bins_j)}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
|
|
||||||
{NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
|
|
||||||
{NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
|
|
||||||
{NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
|
|
||||||
{NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
|
|
||||||
};
|
|
||||||
static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
|
|
||||||
{NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
|
|
||||||
{INDEX(stats_arenas_i_lruns_j)}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
|
static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
|
||||||
{NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
|
{NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
|
||||||
{NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
|
{NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
|
||||||
@ -460,10 +413,8 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
|
|||||||
{NAME("purged"), CTL(stats_arenas_i_purged)},
|
{NAME("purged"), CTL(stats_arenas_i_purged)},
|
||||||
{NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
|
{NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
|
||||||
{NAME("small"), CHILD(named, stats_arenas_i_small)},
|
{NAME("small"), CHILD(named, stats_arenas_i_small)},
|
||||||
{NAME("large"), CHILD(named, stats_arenas_i_large)},
|
|
||||||
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
|
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
|
||||||
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
|
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
|
||||||
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)},
|
|
||||||
{NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
|
{NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
|
||||||
};
|
};
|
||||||
static const ctl_named_node_t super_stats_arenas_i_node[] = {
|
static const ctl_named_node_t super_stats_arenas_i_node[] = {
|
||||||
@ -508,27 +459,6 @@ static const ctl_named_node_t super_root_node[] = {
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
static bool
|
|
||||||
ctl_arena_init(ctl_arena_stats_t *astats)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (astats->lstats == NULL) {
|
|
||||||
astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
|
|
||||||
sizeof(malloc_large_stats_t));
|
|
||||||
if (astats->lstats == NULL)
|
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (astats->hstats == NULL) {
|
|
||||||
astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
|
|
||||||
sizeof(malloc_huge_stats_t));
|
|
||||||
if (astats->hstats == NULL)
|
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ctl_arena_clear(ctl_arena_stats_t *astats)
|
ctl_arena_clear(ctl_arena_stats_t *astats)
|
||||||
{
|
{
|
||||||
@ -546,9 +476,7 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
|
|||||||
astats->ndalloc_small = 0;
|
astats->ndalloc_small = 0;
|
||||||
astats->nrequests_small = 0;
|
astats->nrequests_small = 0;
|
||||||
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
|
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
|
||||||
memset(astats->lstats, 0, nlclasses *
|
memset(astats->hstats, 0, (NSIZES - NBINS) *
|
||||||
sizeof(malloc_large_stats_t));
|
|
||||||
memset(astats->hstats, 0, nhclasses *
|
|
||||||
sizeof(malloc_huge_stats_t));
|
sizeof(malloc_huge_stats_t));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -562,7 +490,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
|
|||||||
arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
|
arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
|
||||||
&cstats->lg_dirty_mult, &cstats->decay_time,
|
&cstats->lg_dirty_mult, &cstats->decay_time,
|
||||||
&cstats->pactive, &cstats->pdirty, &cstats->astats,
|
&cstats->pactive, &cstats->pdirty, &cstats->astats,
|
||||||
cstats->bstats, cstats->lstats, cstats->hstats);
|
cstats->bstats, cstats->hstats);
|
||||||
|
|
||||||
for (i = 0; i < NBINS; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
cstats->allocated_small += cstats->bstats[i].curregs *
|
cstats->allocated_small += cstats->bstats[i].curregs *
|
||||||
@ -604,16 +532,10 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
|
|||||||
sstats->ndalloc_small += astats->ndalloc_small;
|
sstats->ndalloc_small += astats->ndalloc_small;
|
||||||
sstats->nrequests_small += astats->nrequests_small;
|
sstats->nrequests_small += astats->nrequests_small;
|
||||||
|
|
||||||
sstats->astats.allocated_large +=
|
|
||||||
astats->astats.allocated_large;
|
|
||||||
sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
|
|
||||||
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
|
|
||||||
sstats->astats.nrequests_large +=
|
|
||||||
astats->astats.nrequests_large;
|
|
||||||
|
|
||||||
sstats->astats.allocated_huge += astats->astats.allocated_huge;
|
sstats->astats.allocated_huge += astats->astats.allocated_huge;
|
||||||
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
|
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
|
||||||
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
|
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
|
||||||
|
sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
|
||||||
|
|
||||||
for (i = 0; i < NBINS; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
|
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
|
||||||
@ -632,17 +554,11 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
|
|||||||
sstats->bstats[i].curruns += astats->bstats[i].curruns;
|
sstats->bstats[i].curruns += astats->bstats[i].curruns;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nlclasses; i++) {
|
for (i = 0; i < NSIZES - NBINS; i++) {
|
||||||
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
|
|
||||||
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
|
|
||||||
sstats->lstats[i].nrequests +=
|
|
||||||
astats->lstats[i].nrequests;
|
|
||||||
sstats->lstats[i].curruns += astats->lstats[i].curruns;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < nhclasses; i++) {
|
|
||||||
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
|
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
|
||||||
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
|
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
|
||||||
|
sstats->hstats[i].nrequests +=
|
||||||
|
astats->hstats[i].nrequests;
|
||||||
sstats->hstats[i].curhchunks +=
|
sstats->hstats[i].curhchunks +=
|
||||||
astats->hstats[i].curhchunks;
|
astats->hstats[i].curhchunks;
|
||||||
}
|
}
|
||||||
@ -680,10 +596,6 @@ ctl_grow(tsdn_t *tsdn)
|
|||||||
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
|
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
|
||||||
sizeof(ctl_arena_stats_t));
|
sizeof(ctl_arena_stats_t));
|
||||||
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
|
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
|
||||||
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
|
|
||||||
a0dalloc(astats);
|
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
/* Swap merged stats to their new location. */
|
/* Swap merged stats to their new location. */
|
||||||
{
|
{
|
||||||
ctl_arena_stats_t tstats;
|
ctl_arena_stats_t tstats;
|
||||||
@ -730,7 +642,6 @@ ctl_refresh(tsdn_t *tsdn)
|
|||||||
&base_mapped);
|
&base_mapped);
|
||||||
ctl_stats.allocated =
|
ctl_stats.allocated =
|
||||||
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
|
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
|
||||||
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
|
|
||||||
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
|
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
|
||||||
ctl_stats.active =
|
ctl_stats.active =
|
||||||
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
|
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
|
||||||
@ -771,30 +682,6 @@ ctl_init(tsdn_t *tsdn)
|
|||||||
}
|
}
|
||||||
memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
|
memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
|
||||||
sizeof(ctl_arena_stats_t));
|
sizeof(ctl_arena_stats_t));
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialize all stats structures, regardless of whether they
|
|
||||||
* ever get used. Lazy initialization would allow errors to
|
|
||||||
* cause inconsistent state to be viewable by the application.
|
|
||||||
*/
|
|
||||||
if (config_stats) {
|
|
||||||
unsigned i;
|
|
||||||
for (i = 0; i <= ctl_stats.narenas; i++) {
|
|
||||||
if (ctl_arena_init(&ctl_stats.arenas[i])) {
|
|
||||||
unsigned j;
|
|
||||||
for (j = 0; j < i; j++) {
|
|
||||||
a0dalloc(
|
|
||||||
ctl_stats.arenas[j].lstats);
|
|
||||||
a0dalloc(
|
|
||||||
ctl_stats.arenas[j].hstats);
|
|
||||||
}
|
|
||||||
a0dalloc(ctl_stats.arenas);
|
|
||||||
ctl_stats.arenas = NULL;
|
|
||||||
ret = true;
|
|
||||||
goto label_return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
|
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
|
||||||
|
|
||||||
ctl_epoch = 0;
|
ctl_epoch = 0;
|
||||||
@ -1924,25 +1811,13 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
|
|||||||
return (super_arenas_bin_i_node);
|
return (super_arenas_bin_i_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
|
CTL_RO_NL_GEN(arenas_nhchunks, NSIZES - NBINS, unsigned)
|
||||||
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
|
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
|
||||||
static const ctl_named_node_t *
|
|
||||||
arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (i > nlclasses)
|
|
||||||
return (NULL);
|
|
||||||
return (super_arenas_lrun_i_node);
|
|
||||||
}
|
|
||||||
|
|
||||||
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
|
|
||||||
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
|
|
||||||
size_t)
|
|
||||||
static const ctl_named_node_t *
|
static const ctl_named_node_t *
|
||||||
arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
|
arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (i > nhclasses)
|
if (i > NSIZES - NBINS)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
return (super_arenas_hchunk_i_node);
|
return (super_arenas_hchunk_i_node);
|
||||||
}
|
}
|
||||||
@ -2136,14 +2011,6 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
|
|||||||
ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
|
ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
|
||||||
ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
|
ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
|
|
||||||
ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
|
|
||||||
ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
|
|
||||||
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
|
|
||||||
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
|
||||||
ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
|
ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
|
||||||
@ -2182,32 +2049,12 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
|||||||
return (super_stats_arenas_i_bins_j_node);
|
return (super_stats_arenas_i_bins_j_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
|
|
||||||
ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
|
|
||||||
ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
|
|
||||||
ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
|
|
||||||
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
|
|
||||||
|
|
||||||
static const ctl_named_node_t *
|
|
||||||
stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
|
||||||
size_t j)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (j > nlclasses)
|
|
||||||
return (NULL);
|
|
||||||
return (super_stats_arenas_i_lruns_j_node);
|
|
||||||
}
|
|
||||||
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
|
||||||
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
|
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
|
||||||
ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
|
ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
|
||||||
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
|
ctl_stats.arenas[mib[2]].hstats[mib[4]].nrequests, uint64_t)
|
||||||
uint64_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
|
||||||
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
|
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
|
||||||
|
|
||||||
@ -2216,7 +2063,7 @@ stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
|||||||
size_t j)
|
size_t j)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (j > nhclasses)
|
if (j > NSIZES - NBINS)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
return (super_stats_arenas_i_hchunks_j_node);
|
return (super_stats_arenas_i_hchunks_j_node);
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ extent_size_quantize_floor(size_t size)
|
|||||||
pszind_t pind;
|
pszind_t pind;
|
||||||
|
|
||||||
assert(size > 0);
|
assert(size > 0);
|
||||||
assert(size <= HUGE_MAXCLASS);
|
assert(size - large_pad <= HUGE_MAXCLASS);
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
|
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
@ -77,7 +77,7 @@ extent_size_quantize_ceil(size_t size)
|
|||||||
size_t ret;
|
size_t ret;
|
||||||
|
|
||||||
assert(size > 0);
|
assert(size > 0);
|
||||||
assert(size <= HUGE_MAXCLASS);
|
assert(size - large_pad <= HUGE_MAXCLASS);
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
|
|
||||||
ret = extent_size_quantize_floor(size);
|
ret = extent_size_quantize_floor(size);
|
||||||
|
59
src/huge.c
59
src/huge.c
@ -19,6 +19,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
size_t ausize;
|
size_t ausize;
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
bool is_zeroed;
|
bool is_zeroed;
|
||||||
|
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
|
||||||
|
|
||||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||||
|
|
||||||
@ -42,6 +43,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
ql_elm_new(extent, ql_link);
|
ql_elm_new(extent, ql_link);
|
||||||
ql_tail_insert(&arena->huge, extent, ql_link);
|
ql_tail_insert(&arena->huge, extent, ql_link);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
if (config_prof && arena_prof_accum(tsdn, arena, usize))
|
||||||
|
prof_idump(tsdn);
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
if (!is_zeroed) {
|
if (!is_zeroed) {
|
||||||
@ -61,8 +64,20 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
#undef huge_dalloc_junk
|
#undef huge_dalloc_junk
|
||||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
|
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
|
||||||
#endif
|
#endif
|
||||||
|
void
|
||||||
|
huge_dalloc_junk(void *ptr, size_t usize)
|
||||||
|
{
|
||||||
|
|
||||||
|
memset(ptr, JEMALLOC_FREE_JUNK, usize);
|
||||||
|
}
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
#undef huge_dalloc_junk
|
||||||
|
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
|
||||||
|
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
||||||
|
#endif
|
||||||
|
|
||||||
static void
|
static void
|
||||||
huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
|
huge_dalloc_maybe_junk(tsdn_t *tsdn, void *ptr, size_t usize)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (config_fill && have_dss && unlikely(opt_junk_free)) {
|
if (config_fill && have_dss && unlikely(opt_junk_free)) {
|
||||||
@ -71,14 +86,10 @@ huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
|
|||||||
* unmapped.
|
* unmapped.
|
||||||
*/
|
*/
|
||||||
if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr)))
|
if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr)))
|
||||||
|
huge_dalloc_junk(ptr, usize);
|
||||||
memset(ptr, JEMALLOC_FREE_JUNK, usize);
|
memset(ptr, JEMALLOC_FREE_JUNK, usize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef JEMALLOC_JET
|
|
||||||
#undef huge_dalloc_junk
|
|
||||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
|
|
||||||
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
|
huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
|
||||||
@ -93,12 +104,12 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
|
|||||||
/* Split excess pages. */
|
/* Split excess pages. */
|
||||||
if (diff != 0) {
|
if (diff != 0) {
|
||||||
extent_t *trail = chunk_split_wrapper(tsdn, arena, &chunk_hooks,
|
extent_t *trail = chunk_split_wrapper(tsdn, arena, &chunk_hooks,
|
||||||
extent, usize + large_pad, diff);
|
extent, usize + large_pad, usize, diff, diff);
|
||||||
if (trail == NULL)
|
if (trail == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
if (config_fill && unlikely(opt_junk_free)) {
|
||||||
huge_dalloc_junk(tsdn, extent_addr_get(trail),
|
huge_dalloc_maybe_junk(tsdn, extent_addr_get(trail),
|
||||||
extent_usize_get(trail));
|
extent_usize_get(trail));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,7 +187,8 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
|||||||
/* The following should have been caught by callers. */
|
/* The following should have been caught by callers. */
|
||||||
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
|
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
|
||||||
/* Both allocation sizes must be huge to avoid a move. */
|
/* Both allocation sizes must be huge to avoid a move. */
|
||||||
assert(extent_usize_get(extent) >= chunksize && usize_max >= chunksize);
|
assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize_max >=
|
||||||
|
LARGE_MINCLASS);
|
||||||
|
|
||||||
if (usize_max > extent_usize_get(extent)) {
|
if (usize_max > extent_usize_get(extent)) {
|
||||||
/* Attempt to expand the allocation in-place. */
|
/* Attempt to expand the allocation in-place. */
|
||||||
@ -234,7 +246,8 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
|||||||
/* The following should have been caught by callers. */
|
/* The following should have been caught by callers. */
|
||||||
assert(usize > 0 && usize <= HUGE_MAXCLASS);
|
assert(usize > 0 && usize <= HUGE_MAXCLASS);
|
||||||
/* Both allocation sizes must be huge to avoid a move. */
|
/* Both allocation sizes must be huge to avoid a move. */
|
||||||
assert(extent_usize_get(extent) >= chunksize && usize >= chunksize);
|
assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize >=
|
||||||
|
LARGE_MINCLASS);
|
||||||
|
|
||||||
/* Try to avoid moving the allocation. */
|
/* Try to avoid moving the allocation. */
|
||||||
if (!huge_ralloc_no_move(tsdn, extent, usize, usize, zero))
|
if (!huge_ralloc_no_move(tsdn, extent, usize, usize, zero))
|
||||||
@ -257,23 +270,41 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static void
|
||||||
huge_dalloc(tsdn_t *tsdn, extent_t *extent)
|
huge_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
arena = extent_arena_get(extent);
|
arena = extent_arena_get(extent);
|
||||||
|
if (!junked_locked)
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
ql_remove(&arena->huge, extent, ql_link);
|
ql_remove(&arena->huge, extent, ql_link);
|
||||||
|
if (!junked_locked) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
huge_dalloc_junk(tsdn, extent_addr_get(extent),
|
huge_dalloc_maybe_junk(tsdn, extent_addr_get(extent),
|
||||||
extent_usize_get(extent));
|
extent_usize_get(extent));
|
||||||
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent), extent);
|
}
|
||||||
|
arena_chunk_dalloc_huge(tsdn, arena, extent, junked_locked);
|
||||||
|
|
||||||
|
if (!junked_locked)
|
||||||
arena_decay_tick(tsdn, arena);
|
arena_decay_tick(tsdn, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
huge_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent)
|
||||||
|
{
|
||||||
|
|
||||||
|
huge_dalloc_impl(tsdn, extent, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
huge_dalloc(tsdn_t *tsdn, extent_t *extent)
|
||||||
|
{
|
||||||
|
|
||||||
|
huge_dalloc_impl(tsdn, extent, false);
|
||||||
|
}
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
huge_salloc(tsdn_t *tsdn, const extent_t *extent)
|
huge_salloc(tsdn_t *tsdn, const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
@ -1401,7 +1401,7 @@ ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
|
|||||||
p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
|
p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
|
||||||
if (p == NULL)
|
if (p == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
|
arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
|
||||||
usize);
|
usize);
|
||||||
} else
|
} else
|
||||||
p = ialloc(tsd, usize, ind, zero, slow_path);
|
p = ialloc(tsd, usize, ind, zero, slow_path);
|
||||||
@ -1483,8 +1483,7 @@ ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
|
|||||||
set_errno(ENOMEM);
|
set_errno(ENOMEM);
|
||||||
}
|
}
|
||||||
if (config_stats && likely(ret != NULL)) {
|
if (config_stats && likely(ret != NULL)) {
|
||||||
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret,
|
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret));
|
||||||
config_prof));
|
|
||||||
*tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
|
*tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
|
||||||
}
|
}
|
||||||
witness_assert_lockless(tsdn);
|
witness_assert_lockless(tsdn);
|
||||||
@ -1527,7 +1526,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
|
|||||||
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
|
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
|
||||||
if (p == NULL)
|
if (p == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
|
arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
|
||||||
usize);
|
usize);
|
||||||
} else
|
} else
|
||||||
p = ipalloc(tsd, usize, alignment, false);
|
p = ipalloc(tsd, usize, alignment, false);
|
||||||
@ -1608,7 +1607,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
|
|||||||
label_return:
|
label_return:
|
||||||
if (config_stats && likely(result != NULL)) {
|
if (config_stats && likely(result != NULL)) {
|
||||||
assert(usize == isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
|
assert(usize == isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
|
||||||
result), result, config_prof));
|
result), result));
|
||||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||||
}
|
}
|
||||||
UTRACE(0, size, result);
|
UTRACE(0, size, result);
|
||||||
@ -1699,7 +1698,7 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr,
|
|||||||
false);
|
false);
|
||||||
if (p == NULL)
|
if (p == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
|
arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
|
||||||
usize);
|
usize);
|
||||||
} else
|
} else
|
||||||
p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false);
|
p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false);
|
||||||
@ -1748,10 +1747,10 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
|
|||||||
|
|
||||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
|
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
|
||||||
prof_free(tsd, extent, ptr, usize);
|
prof_free(tsd, extent, ptr, usize);
|
||||||
} else if (config_stats)
|
} else if (config_stats)
|
||||||
usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
|
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
|
||||||
if (config_stats)
|
if (config_stats)
|
||||||
*tsd_thread_deallocatedp_get(tsd) += usize;
|
*tsd_thread_deallocatedp_get(tsd) += usize;
|
||||||
|
|
||||||
@ -1815,7 +1814,7 @@ je_realloc(void *ptr, size_t size)
|
|||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
|
|
||||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
|
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
usize = s2u(size);
|
usize = s2u(size);
|
||||||
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
|
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
|
||||||
@ -1848,8 +1847,7 @@ je_realloc(void *ptr, size_t size)
|
|||||||
if (config_stats && likely(ret != NULL)) {
|
if (config_stats && likely(ret != NULL)) {
|
||||||
tsd_t *tsd;
|
tsd_t *tsd;
|
||||||
|
|
||||||
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret,
|
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret));
|
||||||
config_prof));
|
|
||||||
tsd = tsdn_tsd(tsdn);
|
tsd = tsdn_tsd(tsdn);
|
||||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||||
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
||||||
@ -2003,7 +2001,7 @@ imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
|||||||
tcache, arena, slow_path);
|
tcache, arena, slow_path);
|
||||||
if (p == NULL)
|
if (p == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
arena_prof_promoted(tsdn, iealloc(tsdn, p), p, usize);
|
arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize);
|
||||||
} else
|
} else
|
||||||
p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
|
p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
|
||||||
slow_path);
|
slow_path);
|
||||||
@ -2138,7 +2136,7 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
|
|||||||
alignment, zero, tcache, arena);
|
alignment, zero, tcache, arena);
|
||||||
if (p == NULL)
|
if (p == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
arena_prof_promoted(tsdn, iealloc(tsdn, p), p, usize);
|
arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize);
|
||||||
} else {
|
} else {
|
||||||
p = iralloct(tsdn, extent, old_ptr, old_usize, usize, alignment,
|
p = iralloct(tsdn, extent, old_ptr, old_usize, usize, alignment,
|
||||||
zero, tcache, arena);
|
zero, tcache, arena);
|
||||||
@ -2182,7 +2180,7 @@ irallocx_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
|
|||||||
* reallocation. Therefore, query the actual value of usize.
|
* reallocation. Therefore, query the actual value of usize.
|
||||||
*/
|
*/
|
||||||
e = extent;
|
e = extent;
|
||||||
*usize = isalloc(tsd_tsdn(tsd), e, p, config_prof);
|
*usize = isalloc(tsd_tsdn(tsd), e, p);
|
||||||
} else
|
} else
|
||||||
e = iealloc(tsd_tsdn(tsd), p);
|
e = iealloc(tsd_tsdn(tsd), p);
|
||||||
prof_realloc(tsd, e, p, *usize, tctx, prof_active, true, old_ptr,
|
prof_realloc(tsd, e, p, *usize, tctx, prof_active, true, old_ptr,
|
||||||
@ -2229,7 +2227,7 @@ je_rallocx(void *ptr, size_t size, int flags)
|
|||||||
} else
|
} else
|
||||||
tcache = tcache_get(tsd, true);
|
tcache = tcache_get(tsd, true);
|
||||||
|
|
||||||
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
|
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
|
||||||
|
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
||||||
@ -2246,7 +2244,7 @@ je_rallocx(void *ptr, size_t size, int flags)
|
|||||||
goto label_oom;
|
goto label_oom;
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
|
usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
|
||||||
p), p, config_prof);
|
p), p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
|
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
|
||||||
@ -2276,7 +2274,7 @@ ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize,
|
|||||||
|
|
||||||
if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment, zero))
|
if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment, zero))
|
||||||
return (old_usize);
|
return (old_usize);
|
||||||
usize = isalloc(tsdn, extent, ptr, config_prof);
|
usize = isalloc(tsdn, extent, ptr);
|
||||||
|
|
||||||
return (usize);
|
return (usize);
|
||||||
}
|
}
|
||||||
@ -2363,7 +2361,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
|||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
|
|
||||||
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
|
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The API explicitly absolves itself of protecting against (size +
|
* The API explicitly absolves itself of protecting against (size +
|
||||||
@ -2414,9 +2412,9 @@ je_sallocx(const void *ptr, int flags)
|
|||||||
witness_assert_lockless(tsdn);
|
witness_assert_lockless(tsdn);
|
||||||
|
|
||||||
if (config_ivsalloc)
|
if (config_ivsalloc)
|
||||||
usize = ivsalloc(tsdn, ptr, config_prof);
|
usize = ivsalloc(tsdn, ptr);
|
||||||
else
|
else
|
||||||
usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr, config_prof);
|
usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr);
|
||||||
|
|
||||||
witness_assert_lockless(tsdn);
|
witness_assert_lockless(tsdn);
|
||||||
return (usize);
|
return (usize);
|
||||||
@ -2477,7 +2475,7 @@ je_sdallocx(void *ptr, size_t size, int flags)
|
|||||||
tsd = tsd_fetch();
|
tsd = tsd_fetch();
|
||||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
usize = inallocx(tsd_tsdn(tsd), size, flags);
|
usize = inallocx(tsd_tsdn(tsd), size, flags);
|
||||||
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, config_prof));
|
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
|
||||||
|
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
||||||
@ -2593,10 +2591,10 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
|||||||
witness_assert_lockless(tsdn);
|
witness_assert_lockless(tsdn);
|
||||||
|
|
||||||
if (config_ivsalloc)
|
if (config_ivsalloc)
|
||||||
ret = ivsalloc(tsdn, ptr, config_prof);
|
ret = ivsalloc(tsdn, ptr);
|
||||||
else {
|
else {
|
||||||
ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr), ptr,
|
ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr),
|
||||||
config_prof);
|
ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
witness_assert_lockless(tsdn);
|
witness_assert_lockless(tsdn);
|
||||||
|
86
src/stats.c
86
src/stats.c
@ -37,12 +37,10 @@ size_t stats_cactive = 0;
|
|||||||
|
|
||||||
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
|
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
|
||||||
void *cbopaque, unsigned i);
|
void *cbopaque, unsigned i);
|
||||||
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
|
|
||||||
void *cbopaque, unsigned i);
|
|
||||||
static void stats_arena_hchunks_print(
|
static void stats_arena_hchunks_print(
|
||||||
void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
|
void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
|
||||||
static void stats_arena_print(void (*write_cb)(void *, const char *),
|
static void stats_arena_print(void (*write_cb)(void *, const char *),
|
||||||
void *cbopaque, unsigned i, bool bins, bool large, bool huge);
|
void *cbopaque, unsigned i, bool bins, bool huge);
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
@ -157,64 +155,17 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|
||||||
unsigned i)
|
|
||||||
{
|
|
||||||
unsigned nbins, nlruns, j;
|
|
||||||
bool in_gap;
|
|
||||||
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
"large: size ind allocated nmalloc ndalloc"
|
|
||||||
" nrequests curruns\n");
|
|
||||||
CTL_GET("arenas.nbins", &nbins, unsigned);
|
|
||||||
CTL_GET("arenas.nlruns", &nlruns, unsigned);
|
|
||||||
for (j = 0, in_gap = false; j < nlruns; j++) {
|
|
||||||
uint64_t nmalloc, ndalloc, nrequests;
|
|
||||||
size_t run_size, curruns;
|
|
||||||
|
|
||||||
CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
|
|
||||||
uint64_t);
|
|
||||||
CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
|
|
||||||
uint64_t);
|
|
||||||
CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
|
|
||||||
&nrequests, uint64_t);
|
|
||||||
if (nrequests == 0)
|
|
||||||
in_gap = true;
|
|
||||||
else {
|
|
||||||
CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
|
|
||||||
CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
|
|
||||||
&curruns, size_t);
|
|
||||||
if (in_gap) {
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
" ---\n");
|
|
||||||
in_gap = false;
|
|
||||||
}
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
|
|
||||||
" %12"FMTu64" %12zu\n",
|
|
||||||
run_size, nbins + j, curruns * run_size, nmalloc,
|
|
||||||
ndalloc, nrequests, curruns);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (in_gap) {
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
" ---\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
|
stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
|
||||||
void *cbopaque, unsigned i)
|
void *cbopaque, unsigned i)
|
||||||
{
|
{
|
||||||
unsigned nbins, nlruns, nhchunks, j;
|
unsigned nbins, nhchunks, j;
|
||||||
bool in_gap;
|
bool in_gap;
|
||||||
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"huge: size ind allocated nmalloc ndalloc"
|
"huge: size ind allocated nmalloc ndalloc"
|
||||||
" nrequests curhchunks\n");
|
" nrequests curhchunks\n");
|
||||||
CTL_GET("arenas.nbins", &nbins, unsigned);
|
CTL_GET("arenas.nbins", &nbins, unsigned);
|
||||||
CTL_GET("arenas.nlruns", &nlruns, unsigned);
|
|
||||||
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
|
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
|
||||||
for (j = 0, in_gap = false; j < nhchunks; j++) {
|
for (j = 0, in_gap = false; j < nhchunks; j++) {
|
||||||
uint64_t nmalloc, ndalloc, nrequests;
|
uint64_t nmalloc, ndalloc, nrequests;
|
||||||
@ -241,7 +192,7 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
|
|||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
|
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
|
||||||
" %12"FMTu64" %12zu\n",
|
" %12"FMTu64" %12zu\n",
|
||||||
hchunk_size, nbins + nlruns + j,
|
hchunk_size, nbins + j,
|
||||||
curhchunks * hchunk_size, nmalloc, ndalloc,
|
curhchunks * hchunk_size, nmalloc, ndalloc,
|
||||||
nrequests, curhchunks);
|
nrequests, curhchunks);
|
||||||
}
|
}
|
||||||
@ -254,7 +205,7 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
unsigned i, bool bins, bool large, bool huge)
|
unsigned i, bool bins, bool huge)
|
||||||
{
|
{
|
||||||
unsigned nthreads;
|
unsigned nthreads;
|
||||||
const char *dss;
|
const char *dss;
|
||||||
@ -264,8 +215,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
uint64_t npurge, nmadvise, purged;
|
uint64_t npurge, nmadvise, purged;
|
||||||
size_t small_allocated;
|
size_t small_allocated;
|
||||||
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
||||||
size_t large_allocated;
|
|
||||||
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
|
||||||
size_t huge_allocated;
|
size_t huge_allocated;
|
||||||
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
|
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
|
||||||
|
|
||||||
@ -318,16 +267,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
"small: %12zu %12"FMTu64" %12"FMTu64
|
"small: %12zu %12"FMTu64" %12"FMTu64
|
||||||
" %12"FMTu64"\n",
|
" %12"FMTu64"\n",
|
||||||
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
|
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
|
||||||
CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
|
|
||||||
size_t);
|
|
||||||
CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
|
|
||||||
CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
|
|
||||||
CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
|
|
||||||
uint64_t);
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
"large: %12zu %12"FMTu64" %12"FMTu64
|
|
||||||
" %12"FMTu64"\n",
|
|
||||||
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
|
|
||||||
CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
|
CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
|
||||||
CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
|
CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
|
||||||
CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
|
CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
|
||||||
@ -340,10 +279,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"total: %12zu %12"FMTu64" %12"FMTu64
|
"total: %12zu %12"FMTu64" %12"FMTu64
|
||||||
" %12"FMTu64"\n",
|
" %12"FMTu64"\n",
|
||||||
small_allocated + large_allocated + huge_allocated,
|
small_allocated + huge_allocated, small_nmalloc + huge_nmalloc,
|
||||||
small_nmalloc + large_nmalloc + huge_nmalloc,
|
small_ndalloc + huge_ndalloc, small_nrequests + huge_nrequests);
|
||||||
small_ndalloc + large_ndalloc + huge_ndalloc,
|
|
||||||
small_nrequests + large_nrequests + huge_nrequests);
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"active: %12zu\n", pactive * page);
|
"active: %12zu\n", pactive * page);
|
||||||
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
|
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
|
||||||
@ -362,8 +299,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
|
|
||||||
if (bins)
|
if (bins)
|
||||||
stats_arena_bins_print(write_cb, cbopaque, i);
|
stats_arena_bins_print(write_cb, cbopaque, i);
|
||||||
if (large)
|
|
||||||
stats_arena_lruns_print(write_cb, cbopaque, i);
|
|
||||||
if (huge)
|
if (huge)
|
||||||
stats_arena_hchunks_print(write_cb, cbopaque, i);
|
stats_arena_hchunks_print(write_cb, cbopaque, i);
|
||||||
}
|
}
|
||||||
@ -379,7 +314,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
bool merged = true;
|
bool merged = true;
|
||||||
bool unmerged = true;
|
bool unmerged = true;
|
||||||
bool bins = true;
|
bool bins = true;
|
||||||
bool large = true;
|
|
||||||
bool huge = true;
|
bool huge = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -421,9 +355,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
bins = false;
|
bins = false;
|
||||||
break;
|
break;
|
||||||
case 'l':
|
case 'l':
|
||||||
large = false;
|
|
||||||
break;
|
|
||||||
case 'h':
|
|
||||||
huge = false;
|
huge = false;
|
||||||
break;
|
break;
|
||||||
default:;
|
default:;
|
||||||
@ -636,7 +567,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"\nMerged arenas stats:\n");
|
"\nMerged arenas stats:\n");
|
||||||
stats_arena_print(write_cb, cbopaque,
|
stats_arena_print(write_cb, cbopaque,
|
||||||
narenas, bins, large, huge);
|
narenas, bins, huge);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -662,8 +593,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
cbopaque,
|
cbopaque,
|
||||||
"\narenas[%u]:\n", i);
|
"\narenas[%u]:\n", i);
|
||||||
stats_arena_print(write_cb,
|
stats_arena_print(write_cb,
|
||||||
cbopaque, i, bins, large,
|
cbopaque, i, bins, huge);
|
||||||
huge);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
41
src/tcache.c
41
src/tcache.c
@ -27,7 +27,7 @@ size_t
|
|||||||
tcache_salloc(tsdn_t *tsdn, const void *ptr)
|
tcache_salloc(tsdn_t *tsdn, const void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr, false));
|
return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -46,7 +46,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
|
|||||||
tbin->ncached - tbin->low_water + (tbin->low_water
|
tbin->ncached - tbin->low_water + (tbin->low_water
|
||||||
>> 2));
|
>> 2));
|
||||||
} else {
|
} else {
|
||||||
tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
|
tcache_bin_flush_huge(tsd, tbin, binind, tbin->ncached
|
||||||
- tbin->low_water + (tbin->low_water >> 2), tcache);
|
- tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -170,7 +170,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||||
unsigned rem, tcache_t *tcache)
|
unsigned rem, tcache_t *tcache)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
@ -200,9 +200,9 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
}
|
}
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
merged_stats = true;
|
merged_stats = true;
|
||||||
arena->stats.nrequests_large +=
|
arena->stats.nrequests_huge +=
|
||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
arena->stats.hstats[binind - NBINS].nrequests +=
|
||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
}
|
}
|
||||||
@ -213,10 +213,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
if (extent_arena_get(extent) == locked_arena) {
|
if (extent_arena_get(extent) == locked_arena) {
|
||||||
arena_chunk_t *chunk =
|
huge_dalloc_junked_locked(tsd_tsdn(tsd),
|
||||||
(arena_chunk_t *)extent_base_get(extent);
|
extent);
|
||||||
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
|
|
||||||
locked_arena, chunk, extent, ptr);
|
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* This object was allocated via a different
|
* This object was allocated via a different
|
||||||
@ -240,8 +238,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
* arena, so the stats didn't get merged. Manually do so now.
|
* arena, so the stats didn't get merged. Manually do so now.
|
||||||
*/
|
*/
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
arena->stats.nrequests_huge += tbin->tstats.nrequests;
|
||||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
arena->stats.hstats[binind - NBINS].nrequests +=
|
||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
||||||
@ -379,12 +377,12 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
|||||||
|
|
||||||
for (; i < nhbins; i++) {
|
for (; i < nhbins; i++) {
|
||||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
tcache_bin_flush_huge(tsd, tbin, i, 0, tcache);
|
||||||
|
|
||||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
arena->stats.nrequests_huge += tbin->tstats.nrequests;
|
||||||
arena->stats.lstats[i - NBINS].nrequests +=
|
arena->stats.hstats[i - NBINS].nrequests +=
|
||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
||||||
}
|
}
|
||||||
@ -439,10 +437,10 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (; i < nhbins; i++) {
|
for (; i < nhbins; i++) {
|
||||||
malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
|
malloc_huge_stats_t *hstats = &arena->stats.hstats[i - NBINS];
|
||||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
arena->stats.nrequests_huge += tbin->tstats.nrequests;
|
||||||
lstats->nrequests += tbin->tstats.nrequests;
|
hstats->nrequests += tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -516,14 +514,9 @@ tcache_boot(tsdn_t *tsdn)
|
|||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
/*
|
/* If necessary, clamp opt_lg_tcache_max. */
|
||||||
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
|
|
||||||
* known.
|
|
||||||
*/
|
|
||||||
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
||||||
tcache_maxclass = SMALL_MAXCLASS;
|
tcache_maxclass = SMALL_MAXCLASS;
|
||||||
else if ((1U << opt_lg_tcache_max) > large_maxclass)
|
|
||||||
tcache_maxclass = large_maxclass;
|
|
||||||
else
|
else
|
||||||
tcache_maxclass = (1U << opt_lg_tcache_max);
|
tcache_maxclass = (1U << opt_lg_tcache_max);
|
||||||
|
|
||||||
@ -550,7 +543,7 @@ tcache_boot(tsdn_t *tsdn)
|
|||||||
stack_nelms += tcache_bin_info[i].ncached_max;
|
stack_nelms += tcache_bin_info[i].ncached_max;
|
||||||
}
|
}
|
||||||
for (; i < nhbins; i++) {
|
for (; i < nhbins; i++) {
|
||||||
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
|
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_HUGE;
|
||||||
stack_nelms += tcache_bin_info[i].ncached_max;
|
stack_nelms += tcache_bin_info[i].ncached_max;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
|
|||||||
* not work in practice, we must check all pointers to assure that they
|
* not work in practice, we must check all pointers to assure that they
|
||||||
* reside within a mapped chunk before determining size.
|
* reside within a mapped chunk before determining size.
|
||||||
*/
|
*/
|
||||||
return (ivsalloc(tsdn_fetch(), ptr, config_prof));
|
return (ivsalloc(tsdn_fetch(), ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
@ -87,7 +87,7 @@ static void
|
|||||||
zone_free(malloc_zone_t *zone, void *ptr)
|
zone_free(malloc_zone_t *zone, void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
|
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
|
||||||
je_free(ptr);
|
je_free(ptr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -99,7 +99,7 @@ static void *
|
|||||||
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
|
if (ivsalloc(tsdn_fetch(), ptr) != 0)
|
||||||
return (je_realloc(ptr, size));
|
return (je_realloc(ptr, size));
|
||||||
|
|
||||||
return (realloc(ptr, size));
|
return (realloc(ptr, size));
|
||||||
@ -123,7 +123,7 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
|
|||||||
{
|
{
|
||||||
size_t alloc_size;
|
size_t alloc_size;
|
||||||
|
|
||||||
alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
|
alloc_size = ivsalloc(tsdn_fetch(), ptr);
|
||||||
if (alloc_size != 0) {
|
if (alloc_size != 0) {
|
||||||
assert(alloc_size == size);
|
assert(alloc_size == size);
|
||||||
je_free(ptr);
|
je_free(ptr);
|
||||||
|
@ -120,7 +120,7 @@ chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
|||||||
TEST_BEGIN(test_chunk)
|
TEST_BEGIN(test_chunk)
|
||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz;
|
size_t old_size, new_size, huge0, huge1, huge2, sz;
|
||||||
unsigned arena_ind;
|
unsigned arena_ind;
|
||||||
int flags;
|
int flags;
|
||||||
size_t hooks_mib[3], purge_mib[3];
|
size_t hooks_mib[3], purge_mib[3];
|
||||||
@ -162,14 +162,8 @@ TEST_BEGIN(test_chunk)
|
|||||||
assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error");
|
assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error");
|
||||||
assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error");
|
assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error");
|
||||||
|
|
||||||
/* Get large size classes. */
|
|
||||||
sz = sizeof(size_t);
|
|
||||||
assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected arenas.lrun.0.size failure");
|
|
||||||
assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected arenas.lrun.1.size failure");
|
|
||||||
|
|
||||||
/* Get huge size classes. */
|
/* Get huge size classes. */
|
||||||
|
sz = sizeof(size_t);
|
||||||
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
|
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
|
||||||
"Unexpected arenas.hchunk.0.size failure");
|
"Unexpected arenas.hchunk.0.size failure");
|
||||||
assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
|
assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
|
||||||
@ -224,24 +218,6 @@ TEST_BEGIN(test_chunk)
|
|||||||
do_dalloc = true;
|
do_dalloc = true;
|
||||||
do_decommit = false;
|
do_decommit = false;
|
||||||
|
|
||||||
/* Test decommit for large allocations. */
|
|
||||||
do_decommit = true;
|
|
||||||
p = mallocx(large1, flags);
|
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
|
||||||
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
|
|
||||||
0, "Unexpected arena.%u.purge error", arena_ind);
|
|
||||||
did_decommit = false;
|
|
||||||
assert_zu_eq(xallocx(p, large0, 0, flags), large0,
|
|
||||||
"Unexpected xallocx() failure");
|
|
||||||
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
|
|
||||||
0, "Unexpected arena.%u.purge error", arena_ind);
|
|
||||||
did_commit = false;
|
|
||||||
assert_zu_eq(xallocx(p, large1, 0, flags), large1,
|
|
||||||
"Unexpected xallocx() failure");
|
|
||||||
assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
|
|
||||||
dallocx(p, flags);
|
|
||||||
do_decommit = false;
|
|
||||||
|
|
||||||
/* Make sure non-huge allocation succeeds. */
|
/* Make sure non-huge allocation succeeds. */
|
||||||
p = mallocx(42, flags);
|
p = mallocx(42, flags);
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
|
@ -91,13 +91,6 @@ get_nsmall(void)
|
|||||||
return (get_nsizes_impl("arenas.nbins"));
|
return (get_nsizes_impl("arenas.nbins"));
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned
|
|
||||||
get_nlarge(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (get_nsizes_impl("arenas.nlruns"));
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned
|
static unsigned
|
||||||
get_nhuge(void)
|
get_nhuge(void)
|
||||||
{
|
{
|
||||||
@ -131,13 +124,6 @@ get_small_size(size_t ind)
|
|||||||
return (get_size_impl("arenas.bin.0.size", ind));
|
return (get_size_impl("arenas.bin.0.size", ind));
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
|
||||||
get_large_size(size_t ind)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (get_size_impl("arenas.lrun.0.size", ind));
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
get_huge_size(size_t ind)
|
get_huge_size(size_t ind)
|
||||||
{
|
{
|
||||||
@ -239,81 +225,14 @@ TEST_BEGIN(test_extra_small)
|
|||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
TEST_BEGIN(test_extra_large)
|
TEST_BEGIN(test_extra_huge)
|
||||||
{
|
{
|
||||||
int flags = MALLOCX_ARENA(arena_ind());
|
int flags = MALLOCX_ARENA(arena_ind());
|
||||||
size_t smallmax, large0, large1, large2, huge0, hugemax;
|
size_t smallmax, huge1, huge2, huge3, hugemax;
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
/* Get size classes. */
|
/* Get size classes. */
|
||||||
smallmax = get_small_size(get_nsmall()-1);
|
smallmax = get_small_size(get_nsmall()-1);
|
||||||
large0 = get_large_size(0);
|
|
||||||
large1 = get_large_size(1);
|
|
||||||
large2 = get_large_size(2);
|
|
||||||
huge0 = get_huge_size(0);
|
|
||||||
hugemax = get_huge_size(get_nhuge()-1);
|
|
||||||
|
|
||||||
p = mallocx(large2, flags);
|
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
|
||||||
|
|
||||||
assert_zu_eq(xallocx(p, large2, 0, flags), large2,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
/* Test size decrease with zero extra. */
|
|
||||||
assert_zu_eq(xallocx(p, large0, 0, flags), large0,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
assert_zu_eq(xallocx(p, smallmax, 0, flags), large0,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
|
|
||||||
assert_zu_eq(xallocx(p, large2, 0, flags), large2,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
/* Test size decrease with non-zero extra. */
|
|
||||||
assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
|
|
||||||
assert_zu_eq(xallocx(p, large0, 0, flags), large0,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
/* Test size increase with zero extra. */
|
|
||||||
assert_zu_eq(xallocx(p, large2, 0, flags), large2,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
assert_zu_eq(xallocx(p, huge0, 0, flags), large2,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
|
|
||||||
assert_zu_eq(xallocx(p, large0, 0, flags), large0,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
/* Test size increase with non-zero extra. */
|
|
||||||
assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
|
|
||||||
assert_zu_eq(xallocx(p, large0, 0, flags), large0,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
/* Test size increase with non-zero extra. */
|
|
||||||
assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
|
|
||||||
assert_zu_eq(xallocx(p, large2, 0, flags), large2,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
/* Test size+extra overflow. */
|
|
||||||
assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0,
|
|
||||||
"Unexpected xallocx() behavior");
|
|
||||||
|
|
||||||
dallocx(p, flags);
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
TEST_BEGIN(test_extra_huge)
|
|
||||||
{
|
|
||||||
int flags = MALLOCX_ARENA(arena_ind());
|
|
||||||
size_t largemax, huge1, huge2, huge3, hugemax;
|
|
||||||
void *p;
|
|
||||||
|
|
||||||
/* Get size classes. */
|
|
||||||
largemax = get_large_size(get_nlarge()-1);
|
|
||||||
huge1 = get_huge_size(1);
|
huge1 = get_huge_size(1);
|
||||||
huge2 = get_huge_size(2);
|
huge2 = get_huge_size(2);
|
||||||
huge3 = get_huge_size(3);
|
huge3 = get_huge_size(3);
|
||||||
@ -327,7 +246,7 @@ TEST_BEGIN(test_extra_huge)
|
|||||||
/* Test size decrease with zero extra. */
|
/* Test size decrease with zero extra. */
|
||||||
assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
|
assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
|
||||||
"Unexpected xallocx() behavior");
|
"Unexpected xallocx() behavior");
|
||||||
assert_zu_ge(xallocx(p, largemax, 0, flags), huge1,
|
assert_zu_ge(xallocx(p, smallmax, 0, flags), huge1,
|
||||||
"Unexpected xallocx() behavior");
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
|
assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
|
||||||
@ -339,7 +258,7 @@ TEST_BEGIN(test_extra_huge)
|
|||||||
"Unexpected xallocx() behavior");
|
"Unexpected xallocx() behavior");
|
||||||
assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2,
|
assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2,
|
||||||
"Unexpected xallocx() behavior");
|
"Unexpected xallocx() behavior");
|
||||||
assert_zu_ge(xallocx(p, largemax, huge1 - largemax, flags), huge1,
|
assert_zu_ge(xallocx(p, smallmax, huge1 - smallmax, flags), huge1,
|
||||||
"Unexpected xallocx() behavior");
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
|
assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
|
||||||
@ -455,18 +374,6 @@ test_zero(size_t szmin, size_t szmax)
|
|||||||
dallocx(p, flags);
|
dallocx(p, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_BEGIN(test_zero_large)
|
|
||||||
{
|
|
||||||
size_t large0, largemax;
|
|
||||||
|
|
||||||
/* Get size classes. */
|
|
||||||
large0 = get_large_size(0);
|
|
||||||
largemax = get_large_size(get_nlarge()-1);
|
|
||||||
|
|
||||||
test_zero(large0, largemax);
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
TEST_BEGIN(test_zero_huge)
|
TEST_BEGIN(test_zero_huge)
|
||||||
{
|
{
|
||||||
size_t huge0, huge1;
|
size_t huge0, huge1;
|
||||||
@ -490,8 +397,6 @@ main(void)
|
|||||||
test_size,
|
test_size,
|
||||||
test_size_extra_overflow,
|
test_size_extra_overflow,
|
||||||
test_extra_small,
|
test_extra_small,
|
||||||
test_extra_large,
|
|
||||||
test_extra_huge,
|
test_extra_huge,
|
||||||
test_zero_large,
|
|
||||||
test_zero_huge));
|
test_zero_huge));
|
||||||
}
|
}
|
||||||
|
@ -24,13 +24,6 @@ get_nsmall(void)
|
|||||||
return (get_nsizes_impl("arenas.nbins"));
|
return (get_nsizes_impl("arenas.nbins"));
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned
|
|
||||||
get_nlarge(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (get_nsizes_impl("arenas.nlruns"));
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned
|
static unsigned
|
||||||
get_nhuge(void)
|
get_nhuge(void)
|
||||||
{
|
{
|
||||||
@ -64,13 +57,6 @@ get_small_size(size_t ind)
|
|||||||
return (get_size_impl("arenas.bin.0.size", ind));
|
return (get_size_impl("arenas.bin.0.size", ind));
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
|
||||||
get_large_size(size_t ind)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (get_size_impl("arenas.lrun.0.size", ind));
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
get_huge_size(size_t ind)
|
get_huge_size(size_t ind)
|
||||||
{
|
{
|
||||||
@ -90,13 +76,13 @@ vsalloc(tsdn_t *tsdn, const void *ptr)
|
|||||||
if (!extent_active_get(extent))
|
if (!extent_active_get(extent))
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
return (isalloc(tsdn, extent, ptr, false));
|
return (isalloc(tsdn, extent, ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_BEGIN(test_arena_reset)
|
TEST_BEGIN(test_arena_reset)
|
||||||
{
|
{
|
||||||
#define NHUGE 4
|
#define NHUGE 32
|
||||||
unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i;
|
unsigned arena_ind, nsmall, nhuge, nptrs, i;
|
||||||
size_t sz, miblen;
|
size_t sz, miblen;
|
||||||
void **ptrs;
|
void **ptrs;
|
||||||
int flags;
|
int flags;
|
||||||
@ -110,9 +96,8 @@ TEST_BEGIN(test_arena_reset)
|
|||||||
flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
|
flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
|
||||||
|
|
||||||
nsmall = get_nsmall();
|
nsmall = get_nsmall();
|
||||||
nlarge = get_nlarge();
|
|
||||||
nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge();
|
nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge();
|
||||||
nptrs = nsmall + nlarge + nhuge;
|
nptrs = nsmall + nhuge;
|
||||||
ptrs = (void **)malloc(nptrs * sizeof(void *));
|
ptrs = (void **)malloc(nptrs * sizeof(void *));
|
||||||
assert_ptr_not_null(ptrs, "Unexpected malloc() failure");
|
assert_ptr_not_null(ptrs, "Unexpected malloc() failure");
|
||||||
|
|
||||||
@ -123,15 +108,9 @@ TEST_BEGIN(test_arena_reset)
|
|||||||
assert_ptr_not_null(ptrs[i],
|
assert_ptr_not_null(ptrs[i],
|
||||||
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
|
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
|
||||||
}
|
}
|
||||||
for (i = 0; i < nlarge; i++) {
|
|
||||||
sz = get_large_size(i);
|
|
||||||
ptrs[nsmall + i] = mallocx(sz, flags);
|
|
||||||
assert_ptr_not_null(ptrs[i],
|
|
||||||
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
|
|
||||||
}
|
|
||||||
for (i = 0; i < nhuge; i++) {
|
for (i = 0; i < nhuge; i++) {
|
||||||
sz = get_huge_size(i);
|
sz = get_huge_size(i);
|
||||||
ptrs[nsmall + nlarge + i] = mallocx(sz, flags);
|
ptrs[nsmall + i] = mallocx(sz, flags);
|
||||||
assert_ptr_not_null(ptrs[i],
|
assert_ptr_not_null(ptrs[i],
|
||||||
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
|
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
|
||||||
}
|
}
|
||||||
@ -140,7 +119,7 @@ TEST_BEGIN(test_arena_reset)
|
|||||||
|
|
||||||
/* Verify allocations. */
|
/* Verify allocations. */
|
||||||
for (i = 0; i < nptrs; i++) {
|
for (i = 0; i < nptrs; i++) {
|
||||||
assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0,
|
assert_zu_gt(ivsalloc(tsdn, ptrs[i]), 0,
|
||||||
"Allocation should have queryable size");
|
"Allocation should have queryable size");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include "test/jemalloc_test.h"
|
#include "test/jemalloc_test.h"
|
||||||
|
|
||||||
const char *malloc_conf = "purge:decay,decay_time:1";
|
const char *malloc_conf = "purge:decay,decay_time:1,lg_tcache_max:0";
|
||||||
|
|
||||||
static nstime_update_t *nstime_update_orig;
|
static nstime_update_t *nstime_update_orig;
|
||||||
|
|
||||||
@ -22,7 +22,7 @@ TEST_BEGIN(test_decay_ticks)
|
|||||||
{
|
{
|
||||||
ticker_t *decay_ticker;
|
ticker_t *decay_ticker;
|
||||||
unsigned tick0, tick1;
|
unsigned tick0, tick1;
|
||||||
size_t sz, huge0, large0;
|
size_t sz, huge0;
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
test_skip_if(opt_purge != purge_mode_decay);
|
test_skip_if(opt_purge != purge_mode_decay);
|
||||||
@ -34,13 +34,11 @@ TEST_BEGIN(test_decay_ticks)
|
|||||||
sz = sizeof(size_t);
|
sz = sizeof(size_t);
|
||||||
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
|
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
|
||||||
"Unexpected mallctl failure");
|
"Unexpected mallctl failure");
|
||||||
assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected mallctl failure");
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Test the standard APIs using a huge size class, since we can't
|
* Test the standard APIs using a huge size class, since we can't
|
||||||
* control tcache interactions (except by completely disabling tcache
|
* control tcache interactions for small size classes (except by
|
||||||
* for the entire test program).
|
* completely disabling tcache for the entire test program).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* malloc(). */
|
/* malloc(). */
|
||||||
@ -101,15 +99,14 @@ TEST_BEGIN(test_decay_ticks)
|
|||||||
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
|
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Test the *allocx() APIs using huge, large, and small size classes,
|
* Test the *allocx() APIs using huge and small size classes, with
|
||||||
* with tcache explicitly disabled.
|
* tcache explicitly disabled.
|
||||||
*/
|
*/
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
size_t allocx_sizes[3];
|
size_t allocx_sizes[2];
|
||||||
allocx_sizes[0] = huge0;
|
allocx_sizes[0] = huge0;
|
||||||
allocx_sizes[1] = large0;
|
allocx_sizes[1] = 1;
|
||||||
allocx_sizes[2] = 1;
|
|
||||||
|
|
||||||
for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
|
for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
|
||||||
sz = allocx_sizes[i];
|
sz = allocx_sizes[i];
|
||||||
@ -157,13 +154,13 @@ TEST_BEGIN(test_decay_ticks)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Test tcache fill/flush interactions for large and small size classes,
|
* Test tcache fill/flush interactions for huge and small size classes,
|
||||||
* using an explicit tcache.
|
* using an explicit tcache.
|
||||||
*/
|
*/
|
||||||
if (config_tcache) {
|
if (config_tcache) {
|
||||||
unsigned tcache_ind, i;
|
unsigned tcache_ind, i;
|
||||||
size_t tcache_sizes[2];
|
size_t tcache_sizes[2];
|
||||||
tcache_sizes[0] = large0;
|
tcache_sizes[0] = huge0;
|
||||||
tcache_sizes[1] = 1;
|
tcache_sizes[1] = 1;
|
||||||
|
|
||||||
sz = sizeof(unsigned);
|
sz = sizeof(unsigned);
|
||||||
@ -204,14 +201,14 @@ TEST_BEGIN(test_decay_ticker)
|
|||||||
uint64_t epoch;
|
uint64_t epoch;
|
||||||
uint64_t npurge0 = 0;
|
uint64_t npurge0 = 0;
|
||||||
uint64_t npurge1 = 0;
|
uint64_t npurge1 = 0;
|
||||||
size_t sz, large;
|
size_t sz, huge;
|
||||||
unsigned i, nupdates0;
|
unsigned i, nupdates0;
|
||||||
nstime_t time, decay_time, deadline;
|
nstime_t time, decay_time, deadline;
|
||||||
|
|
||||||
test_skip_if(opt_purge != purge_mode_decay);
|
test_skip_if(opt_purge != purge_mode_decay);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate a bunch of large objects, pause the clock, deallocate the
|
* Allocate a bunch of huge objects, pause the clock, deallocate the
|
||||||
* objects, restore the clock, then [md]allocx() in a tight loop to
|
* objects, restore the clock, then [md]allocx() in a tight loop to
|
||||||
* verify the ticker triggers purging.
|
* verify the ticker triggers purging.
|
||||||
*/
|
*/
|
||||||
@ -222,11 +219,11 @@ TEST_BEGIN(test_decay_ticker)
|
|||||||
sz = sizeof(size_t);
|
sz = sizeof(size_t);
|
||||||
assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL,
|
assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL,
|
||||||
0), 0, "Unexpected mallctl failure");
|
0), 0, "Unexpected mallctl failure");
|
||||||
large = nallocx(tcache_max + 1, flags);
|
huge = nallocx(tcache_max + 1, flags);
|
||||||
} else {
|
} else {
|
||||||
sz = sizeof(size_t);
|
sz = sizeof(size_t);
|
||||||
assert_d_eq(mallctl("arenas.lrun.0.size", &large, &sz, NULL, 0),
|
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge, &sz, NULL,
|
||||||
0, "Unexpected mallctl failure");
|
0), 0, "Unexpected mallctl failure");
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
||||||
@ -238,7 +235,7 @@ TEST_BEGIN(test_decay_ticker)
|
|||||||
config_stats ? 0 : ENOENT, "Unexpected mallctl result");
|
config_stats ? 0 : ENOENT, "Unexpected mallctl result");
|
||||||
|
|
||||||
for (i = 0; i < NPS; i++) {
|
for (i = 0; i < NPS; i++) {
|
||||||
ps[i] = mallocx(large, flags);
|
ps[i] = mallocx(huge, flags);
|
||||||
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
|
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,13 +293,13 @@ TEST_BEGIN(test_decay_nonmonotonic)
|
|||||||
uint64_t epoch;
|
uint64_t epoch;
|
||||||
uint64_t npurge0 = 0;
|
uint64_t npurge0 = 0;
|
||||||
uint64_t npurge1 = 0;
|
uint64_t npurge1 = 0;
|
||||||
size_t sz, large0;
|
size_t sz, huge0;
|
||||||
unsigned i, nupdates0;
|
unsigned i, nupdates0;
|
||||||
|
|
||||||
test_skip_if(opt_purge != purge_mode_decay);
|
test_skip_if(opt_purge != purge_mode_decay);
|
||||||
|
|
||||||
sz = sizeof(size_t);
|
sz = sizeof(size_t);
|
||||||
assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
|
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
|
||||||
"Unexpected mallctl failure");
|
"Unexpected mallctl failure");
|
||||||
|
|
||||||
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
||||||
@ -322,7 +319,7 @@ TEST_BEGIN(test_decay_nonmonotonic)
|
|||||||
nstime_update = nstime_update_mock;
|
nstime_update = nstime_update_mock;
|
||||||
|
|
||||||
for (i = 0; i < NPS; i++) {
|
for (i = 0; i < NPS; i++) {
|
||||||
ps[i] = mallocx(large0, flags);
|
ps[i] = mallocx(huge0, flags);
|
||||||
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
|
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,16 +35,16 @@ TEST_BEGIN(test_small_extent_size)
|
|||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
TEST_BEGIN(test_large_extent_size)
|
TEST_BEGIN(test_huge_extent_size)
|
||||||
{
|
{
|
||||||
bool cache_oblivious;
|
bool cache_oblivious;
|
||||||
unsigned nlruns, i;
|
unsigned nhchunks, i;
|
||||||
size_t sz, extent_size_prev, ceil_prev;
|
size_t sz, extent_size_prev, ceil_prev;
|
||||||
size_t mib[4];
|
size_t mib[4];
|
||||||
size_t miblen = sizeof(mib) / sizeof(size_t);
|
size_t miblen = sizeof(mib) / sizeof(size_t);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Iterate over all large size classes, get their extent sizes, and
|
* Iterate over all huge size classes, get their extent sizes, and
|
||||||
* verify that the quantized size is the same as the extent size.
|
* verify that the quantized size is the same as the extent size.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -53,12 +53,12 @@ TEST_BEGIN(test_large_extent_size)
|
|||||||
NULL, 0), 0, "Unexpected mallctl failure");
|
NULL, 0), 0, "Unexpected mallctl failure");
|
||||||
|
|
||||||
sz = sizeof(unsigned);
|
sz = sizeof(unsigned);
|
||||||
assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
|
assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
|
||||||
"Unexpected mallctl failure");
|
"Unexpected mallctl failure");
|
||||||
|
|
||||||
assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
|
assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
|
||||||
"Unexpected mallctlnametomib failure");
|
"Unexpected mallctlnametomib failure");
|
||||||
for (i = 0; i < nlruns; i++) {
|
for (i = 0; i < nhchunks; i++) {
|
||||||
size_t lextent_size, extent_size, floor, ceil;
|
size_t lextent_size, extent_size, floor, ceil;
|
||||||
|
|
||||||
mib[2] = i;
|
mib[2] = i;
|
||||||
@ -91,33 +91,24 @@ TEST_BEGIN(test_large_extent_size)
|
|||||||
ceil_prev, extent_size);
|
ceil_prev, extent_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (i + 1 < nhchunks) {
|
||||||
extent_size_prev = floor;
|
extent_size_prev = floor;
|
||||||
ceil_prev = extent_size_quantize_ceil(extent_size + PAGE);
|
ceil_prev = extent_size_quantize_ceil(extent_size +
|
||||||
|
PAGE);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
TEST_BEGIN(test_monotonic)
|
TEST_BEGIN(test_monotonic)
|
||||||
{
|
{
|
||||||
unsigned nbins, nlruns, i;
|
#define SZ_MAX ZU(4 * 1024 * 1024)
|
||||||
size_t sz, floor_prev, ceil_prev;
|
unsigned i;
|
||||||
|
size_t floor_prev, ceil_prev;
|
||||||
/*
|
|
||||||
* Iterate over all extent sizes and verify that
|
|
||||||
* extent_size_quantize_{floor,ceil}() are monotonic.
|
|
||||||
*/
|
|
||||||
|
|
||||||
sz = sizeof(unsigned);
|
|
||||||
assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected mallctl failure");
|
|
||||||
|
|
||||||
sz = sizeof(unsigned);
|
|
||||||
assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected mallctl failure");
|
|
||||||
|
|
||||||
floor_prev = 0;
|
floor_prev = 0;
|
||||||
ceil_prev = 0;
|
ceil_prev = 0;
|
||||||
for (i = 1; i <= large_maxclass >> LG_PAGE; i++) {
|
for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) {
|
||||||
size_t extent_size, floor, ceil;
|
size_t extent_size, floor, ceil;
|
||||||
|
|
||||||
extent_size = i << LG_PAGE;
|
extent_size = i << LG_PAGE;
|
||||||
@ -150,6 +141,6 @@ main(void)
|
|||||||
|
|
||||||
return (test(
|
return (test(
|
||||||
test_small_extent_size,
|
test_small_extent_size,
|
||||||
test_large_extent_size,
|
test_huge_extent_size,
|
||||||
test_monotonic));
|
test_monotonic));
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,6 @@ const char *malloc_conf =
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
|
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
|
||||||
static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig;
|
|
||||||
static huge_dalloc_junk_t *huge_dalloc_junk_orig;
|
static huge_dalloc_junk_t *huge_dalloc_junk_orig;
|
||||||
static void *watch_for_junking;
|
static void *watch_for_junking;
|
||||||
static bool saw_junking;
|
static bool saw_junking;
|
||||||
@ -38,25 +37,10 @@ arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
|
huge_dalloc_junk_intercept(void *ptr, size_t usize)
|
||||||
{
|
|
||||||
size_t i;
|
|
||||||
|
|
||||||
arena_dalloc_junk_large_orig(ptr, usize);
|
|
||||||
for (i = 0; i < usize; i++) {
|
|
||||||
assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
|
|
||||||
"Missing junk fill for byte %zu/%zu of deallocated region",
|
|
||||||
i, usize);
|
|
||||||
}
|
|
||||||
if (ptr == watch_for_junking)
|
|
||||||
saw_junking = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
huge_dalloc_junk_intercept(tsdn_t *tsdn, void *ptr, size_t usize)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
huge_dalloc_junk_orig(tsdn, ptr, usize);
|
huge_dalloc_junk_orig(ptr, usize);
|
||||||
/*
|
/*
|
||||||
* The conditions under which junk filling actually occurs are nuanced
|
* The conditions under which junk filling actually occurs are nuanced
|
||||||
* enough that it doesn't make sense to duplicate the decision logic in
|
* enough that it doesn't make sense to duplicate the decision logic in
|
||||||
@ -75,8 +59,6 @@ test_junk(size_t sz_min, size_t sz_max)
|
|||||||
if (opt_junk_free) {
|
if (opt_junk_free) {
|
||||||
arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
|
arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
|
||||||
arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
|
arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
|
||||||
arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
|
|
||||||
arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
|
|
||||||
huge_dalloc_junk_orig = huge_dalloc_junk;
|
huge_dalloc_junk_orig = huge_dalloc_junk;
|
||||||
huge_dalloc_junk = huge_dalloc_junk_intercept;
|
huge_dalloc_junk = huge_dalloc_junk_intercept;
|
||||||
}
|
}
|
||||||
@ -106,13 +88,18 @@ test_junk(size_t sz_min, size_t sz_max)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (xallocx(s, sz+1, 0, 0) == sz) {
|
if (xallocx(s, sz+1, 0, 0) == sz) {
|
||||||
|
uint8_t *t;
|
||||||
watch_junking(s);
|
watch_junking(s);
|
||||||
s = (uint8_t *)rallocx(s, sz+1, 0);
|
t = (uint8_t *)rallocx(s, sz+1, 0);
|
||||||
assert_ptr_not_null((void *)s,
|
assert_ptr_not_null((void *)t,
|
||||||
"Unexpected rallocx() failure");
|
"Unexpected rallocx() failure");
|
||||||
|
assert_ptr_ne(s, t, "Unexpected in-place rallocx()");
|
||||||
|
assert_zu_ge(sallocx(t, 0), sz+1,
|
||||||
|
"Unexpectedly small rallocx() result");
|
||||||
assert_true(!opt_junk_free || saw_junking,
|
assert_true(!opt_junk_free || saw_junking,
|
||||||
"Expected region of size %zu to be junk-filled",
|
"Expected region of size %zu to be junk-filled",
|
||||||
sz);
|
sz);
|
||||||
|
s = t;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,7 +110,6 @@ test_junk(size_t sz_min, size_t sz_max)
|
|||||||
|
|
||||||
if (opt_junk_free) {
|
if (opt_junk_free) {
|
||||||
arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
|
arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
|
||||||
arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
|
|
||||||
huge_dalloc_junk = huge_dalloc_junk_orig;
|
huge_dalloc_junk = huge_dalloc_junk_orig;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -136,64 +122,11 @@ TEST_BEGIN(test_junk_small)
|
|||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
TEST_BEGIN(test_junk_large)
|
|
||||||
{
|
|
||||||
|
|
||||||
test_skip_if(!config_fill);
|
|
||||||
test_junk(SMALL_MAXCLASS+1, large_maxclass);
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
TEST_BEGIN(test_junk_huge)
|
TEST_BEGIN(test_junk_huge)
|
||||||
{
|
{
|
||||||
|
|
||||||
test_skip_if(!config_fill);
|
test_skip_if(!config_fill);
|
||||||
test_junk(large_maxclass+1, chunksize*2);
|
test_junk(SMALL_MAXCLASS+1, chunksize*2);
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig;
|
|
||||||
static void *most_recently_trimmed;
|
|
||||||
|
|
||||||
static size_t
|
|
||||||
shrink_size(size_t size)
|
|
||||||
{
|
|
||||||
size_t shrink_size;
|
|
||||||
|
|
||||||
for (shrink_size = size - 1; nallocx(shrink_size, 0) == size;
|
|
||||||
shrink_size--)
|
|
||||||
; /* Do nothing. */
|
|
||||||
|
|
||||||
return (shrink_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
|
|
||||||
{
|
|
||||||
|
|
||||||
arena_ralloc_junk_large_orig(ptr, old_usize, usize);
|
|
||||||
assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize");
|
|
||||||
assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize");
|
|
||||||
most_recently_trimmed = ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_BEGIN(test_junk_large_ralloc_shrink)
|
|
||||||
{
|
|
||||||
void *p1, *p2;
|
|
||||||
|
|
||||||
p1 = mallocx(large_maxclass, 0);
|
|
||||||
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
|
|
||||||
|
|
||||||
arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
|
|
||||||
arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
|
|
||||||
|
|
||||||
p2 = rallocx(p1, shrink_size(large_maxclass), 0);
|
|
||||||
assert_ptr_eq(p1, p2, "Unexpected move during shrink");
|
|
||||||
|
|
||||||
arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
|
|
||||||
|
|
||||||
assert_ptr_eq(most_recently_trimmed, p1,
|
|
||||||
"Expected trimmed portion of region to be junk-filled");
|
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
@ -203,7 +136,5 @@ main(void)
|
|||||||
|
|
||||||
return (test(
|
return (test(
|
||||||
test_junk_small,
|
test_junk_small,
|
||||||
test_junk_large,
|
test_junk_huge));
|
||||||
test_junk_huge,
|
|
||||||
test_junk_large_ralloc_shrink));
|
|
||||||
}
|
}
|
||||||
|
@ -596,8 +596,7 @@ TEST_BEGIN(test_arenas_constants)
|
|||||||
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
|
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
|
||||||
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
|
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
|
||||||
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
|
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
|
||||||
TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses);
|
TEST_ARENAS_CONSTANT(unsigned, nhchunks, NSIZES - NBINS);
|
||||||
TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses);
|
|
||||||
|
|
||||||
#undef TEST_ARENAS_CONSTANT
|
#undef TEST_ARENAS_CONSTANT
|
||||||
}
|
}
|
||||||
@ -622,23 +621,6 @@ TEST_BEGIN(test_arenas_bin_constants)
|
|||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
TEST_BEGIN(test_arenas_lrun_constants)
|
|
||||||
{
|
|
||||||
|
|
||||||
#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \
|
|
||||||
t name; \
|
|
||||||
size_t sz = sizeof(t); \
|
|
||||||
assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \
|
|
||||||
0), 0, "Unexpected mallctl() failure"); \
|
|
||||||
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS);
|
|
||||||
|
|
||||||
#undef TEST_ARENAS_LRUN_CONSTANT
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
TEST_BEGIN(test_arenas_hchunk_constants)
|
TEST_BEGIN(test_arenas_hchunk_constants)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -650,7 +632,7 @@ TEST_BEGIN(test_arenas_hchunk_constants)
|
|||||||
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
|
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize);
|
TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, LARGE_MINCLASS);
|
||||||
|
|
||||||
#undef TEST_ARENAS_HCHUNK_CONSTANT
|
#undef TEST_ARENAS_HCHUNK_CONSTANT
|
||||||
}
|
}
|
||||||
@ -721,7 +703,6 @@ main(void)
|
|||||||
test_arenas_decay_time,
|
test_arenas_decay_time,
|
||||||
test_arenas_constants,
|
test_arenas_constants,
|
||||||
test_arenas_bin_constants,
|
test_arenas_bin_constants,
|
||||||
test_arenas_lrun_constants,
|
|
||||||
test_arenas_hchunk_constants,
|
test_arenas_hchunk_constants,
|
||||||
test_arenas_extend,
|
test_arenas_extend,
|
||||||
test_stats_arenas));
|
test_stats_arenas));
|
||||||
|
@ -1,10 +1,17 @@
|
|||||||
#include "test/jemalloc_test.h"
|
#include "test/jemalloc_test.h"
|
||||||
|
|
||||||
|
const char *malloc_conf = ""
|
||||||
#ifdef JEMALLOC_PROF
|
#ifdef JEMALLOC_PROF
|
||||||
const char *malloc_conf =
|
"prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"
|
||||||
"prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,"
|
",lg_prof_interval:0"
|
||||||
"lg_prof_interval:0";
|
# ifdef JEMALLOC_TCACHE
|
||||||
|
","
|
||||||
# endif
|
# endif
|
||||||
|
#endif
|
||||||
|
#ifdef JEMALLOC_TCACHE
|
||||||
|
"tcache:false"
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
|
||||||
static bool did_prof_dump_open;
|
static bool did_prof_dump_open;
|
||||||
|
|
||||||
|
@ -1,149 +0,0 @@
|
|||||||
#include "test/jemalloc_test.h"
|
|
||||||
|
|
||||||
TEST_BEGIN(test_small_run_size)
|
|
||||||
{
|
|
||||||
unsigned nbins, i;
|
|
||||||
size_t sz, run_size;
|
|
||||||
size_t mib[4];
|
|
||||||
size_t miblen = sizeof(mib) / sizeof(size_t);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Iterate over all small size classes, get their run sizes, and verify
|
|
||||||
* that the quantized size is the same as the run size.
|
|
||||||
*/
|
|
||||||
|
|
||||||
sz = sizeof(unsigned);
|
|
||||||
assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected mallctl failure");
|
|
||||||
|
|
||||||
assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
|
|
||||||
"Unexpected mallctlnametomib failure");
|
|
||||||
for (i = 0; i < nbins; i++) {
|
|
||||||
mib[2] = i;
|
|
||||||
sz = sizeof(size_t);
|
|
||||||
assert_d_eq(mallctlbymib(mib, miblen, &run_size, &sz, NULL, 0),
|
|
||||||
0, "Unexpected mallctlbymib failure");
|
|
||||||
assert_zu_eq(run_size, run_quantize_floor(run_size),
|
|
||||||
"Small run quantization should be a no-op (run_size=%zu)",
|
|
||||||
run_size);
|
|
||||||
assert_zu_eq(run_size, run_quantize_ceil(run_size),
|
|
||||||
"Small run quantization should be a no-op (run_size=%zu)",
|
|
||||||
run_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
TEST_BEGIN(test_large_run_size)
|
|
||||||
{
|
|
||||||
bool cache_oblivious;
|
|
||||||
unsigned nlruns, i;
|
|
||||||
size_t sz, run_size_prev, ceil_prev;
|
|
||||||
size_t mib[4];
|
|
||||||
size_t miblen = sizeof(mib) / sizeof(size_t);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Iterate over all large size classes, get their run sizes, and verify
|
|
||||||
* that the quantized size is the same as the run size.
|
|
||||||
*/
|
|
||||||
|
|
||||||
sz = sizeof(bool);
|
|
||||||
assert_d_eq(mallctl("config.cache_oblivious", &cache_oblivious, &sz,
|
|
||||||
NULL, 0), 0, "Unexpected mallctl failure");
|
|
||||||
|
|
||||||
sz = sizeof(unsigned);
|
|
||||||
assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected mallctl failure");
|
|
||||||
|
|
||||||
assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
|
|
||||||
"Unexpected mallctlnametomib failure");
|
|
||||||
for (i = 0; i < nlruns; i++) {
|
|
||||||
size_t lrun_size, run_size, floor, ceil;
|
|
||||||
|
|
||||||
mib[2] = i;
|
|
||||||
sz = sizeof(size_t);
|
|
||||||
assert_d_eq(mallctlbymib(mib, miblen, &lrun_size, &sz, NULL, 0),
|
|
||||||
0, "Unexpected mallctlbymib failure");
|
|
||||||
run_size = cache_oblivious ? lrun_size + PAGE : lrun_size;
|
|
||||||
floor = run_quantize_floor(run_size);
|
|
||||||
ceil = run_quantize_ceil(run_size);
|
|
||||||
|
|
||||||
assert_zu_eq(run_size, floor,
|
|
||||||
"Large run quantization should be a no-op for precise "
|
|
||||||
"size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
|
|
||||||
assert_zu_eq(run_size, ceil,
|
|
||||||
"Large run quantization should be a no-op for precise "
|
|
||||||
"size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
|
|
||||||
|
|
||||||
if (i > 0) {
|
|
||||||
assert_zu_eq(run_size_prev, run_quantize_floor(run_size
|
|
||||||
- PAGE), "Floor should be a precise size");
|
|
||||||
if (run_size_prev < ceil_prev) {
|
|
||||||
assert_zu_eq(ceil_prev, run_size,
|
|
||||||
"Ceiling should be a precise size "
|
|
||||||
"(run_size_prev=%zu, ceil_prev=%zu, "
|
|
||||||
"run_size=%zu)", run_size_prev, ceil_prev,
|
|
||||||
run_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
run_size_prev = floor;
|
|
||||||
ceil_prev = run_quantize_ceil(run_size + PAGE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
TEST_BEGIN(test_monotonic)
|
|
||||||
{
|
|
||||||
unsigned nbins, nlruns, i;
|
|
||||||
size_t sz, floor_prev, ceil_prev;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Iterate over all run sizes and verify that
|
|
||||||
* run_quantize_{floor,ceil}() are monotonic.
|
|
||||||
*/
|
|
||||||
|
|
||||||
sz = sizeof(unsigned);
|
|
||||||
assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected mallctl failure");
|
|
||||||
|
|
||||||
sz = sizeof(unsigned);
|
|
||||||
assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected mallctl failure");
|
|
||||||
|
|
||||||
floor_prev = 0;
|
|
||||||
ceil_prev = 0;
|
|
||||||
for (i = 1; i <= large_maxclass >> LG_PAGE; i++) {
|
|
||||||
size_t run_size, floor, ceil;
|
|
||||||
|
|
||||||
run_size = i << LG_PAGE;
|
|
||||||
floor = run_quantize_floor(run_size);
|
|
||||||
ceil = run_quantize_ceil(run_size);
|
|
||||||
|
|
||||||
assert_zu_le(floor, run_size,
|
|
||||||
"Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)",
|
|
||||||
floor, run_size, ceil);
|
|
||||||
assert_zu_ge(ceil, run_size,
|
|
||||||
"Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)",
|
|
||||||
floor, run_size, ceil);
|
|
||||||
|
|
||||||
assert_zu_le(floor_prev, floor, "Floor should be monotonic "
|
|
||||||
"(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)",
|
|
||||||
floor_prev, floor, run_size, ceil);
|
|
||||||
assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
|
|
||||||
"(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)",
|
|
||||||
floor, run_size, ceil_prev, ceil);
|
|
||||||
|
|
||||||
floor_prev = floor;
|
|
||||||
ceil_prev = ceil;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
int
|
|
||||||
main(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (test(
|
|
||||||
test_small_run_size,
|
|
||||||
test_large_run_size,
|
|
||||||
test_monotonic));
|
|
||||||
}
|
|
@ -42,7 +42,7 @@ TEST_BEGIN(test_stats_huge)
|
|||||||
size_t sz;
|
size_t sz;
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
int expected = config_stats ? 0 : ENOENT;
|
||||||
|
|
||||||
p = mallocx(large_maxclass+1, 0);
|
p = mallocx(SMALL_MAXCLASS+1, 0);
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||||
|
|
||||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||||
@ -75,7 +75,7 @@ TEST_END
|
|||||||
TEST_BEGIN(test_stats_arenas_summary)
|
TEST_BEGIN(test_stats_arenas_summary)
|
||||||
{
|
{
|
||||||
unsigned arena;
|
unsigned arena;
|
||||||
void *little, *large, *huge;
|
void *little, *huge;
|
||||||
uint64_t epoch;
|
uint64_t epoch;
|
||||||
size_t sz;
|
size_t sz;
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
int expected = config_stats ? 0 : ENOENT;
|
||||||
@ -88,13 +88,10 @@ TEST_BEGIN(test_stats_arenas_summary)
|
|||||||
|
|
||||||
little = mallocx(SMALL_MAXCLASS, 0);
|
little = mallocx(SMALL_MAXCLASS, 0);
|
||||||
assert_ptr_not_null(little, "Unexpected mallocx() failure");
|
assert_ptr_not_null(little, "Unexpected mallocx() failure");
|
||||||
large = mallocx(large_maxclass, 0);
|
|
||||||
assert_ptr_not_null(large, "Unexpected mallocx() failure");
|
|
||||||
huge = mallocx(chunksize, 0);
|
huge = mallocx(chunksize, 0);
|
||||||
assert_ptr_not_null(huge, "Unexpected mallocx() failure");
|
assert_ptr_not_null(huge, "Unexpected mallocx() failure");
|
||||||
|
|
||||||
dallocx(little, 0);
|
dallocx(little, 0);
|
||||||
dallocx(large, 0);
|
|
||||||
dallocx(huge, 0);
|
dallocx(huge, 0);
|
||||||
|
|
||||||
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
||||||
@ -188,50 +185,6 @@ TEST_BEGIN(test_stats_arenas_small)
|
|||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
TEST_BEGIN(test_stats_arenas_large)
|
|
||||||
{
|
|
||||||
unsigned arena;
|
|
||||||
void *p;
|
|
||||||
size_t sz, allocated;
|
|
||||||
uint64_t epoch, nmalloc, ndalloc, nrequests;
|
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
|
||||||
|
|
||||||
arena = 0;
|
|
||||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
|
||||||
0, "Unexpected mallctl() failure");
|
|
||||||
|
|
||||||
p = mallocx(large_maxclass, 0);
|
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
|
||||||
|
|
||||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
|
||||||
"Unexpected mallctl() failure");
|
|
||||||
|
|
||||||
sz = sizeof(size_t);
|
|
||||||
assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz,
|
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
|
||||||
sz = sizeof(uint64_t);
|
|
||||||
assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz,
|
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
|
||||||
assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz,
|
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
|
||||||
assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz,
|
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
|
||||||
|
|
||||||
if (config_stats) {
|
|
||||||
assert_zu_gt(allocated, 0,
|
|
||||||
"allocated should be greater than zero");
|
|
||||||
assert_u64_gt(nmalloc, 0,
|
|
||||||
"nmalloc should be greater than zero");
|
|
||||||
assert_u64_ge(nmalloc, ndalloc,
|
|
||||||
"nmalloc should be at least as large as ndalloc");
|
|
||||||
assert_u64_gt(nrequests, 0,
|
|
||||||
"nrequests should be greater than zero");
|
|
||||||
}
|
|
||||||
|
|
||||||
dallocx(p, 0);
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
TEST_BEGIN(test_stats_arenas_huge)
|
TEST_BEGIN(test_stats_arenas_huge)
|
||||||
{
|
{
|
||||||
unsigned arena;
|
unsigned arena;
|
||||||
@ -346,63 +299,23 @@ TEST_BEGIN(test_stats_arenas_bins)
|
|||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
TEST_BEGIN(test_stats_arenas_lruns)
|
|
||||||
{
|
|
||||||
unsigned arena;
|
|
||||||
void *p;
|
|
||||||
uint64_t epoch, nmalloc, ndalloc, nrequests;
|
|
||||||
size_t curruns, sz;
|
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
|
||||||
|
|
||||||
arena = 0;
|
|
||||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
|
||||||
0, "Unexpected mallctl() failure");
|
|
||||||
|
|
||||||
p = mallocx(LARGE_MINCLASS, 0);
|
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
|
||||||
|
|
||||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
|
||||||
"Unexpected mallctl() failure");
|
|
||||||
|
|
||||||
sz = sizeof(uint64_t);
|
|
||||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
|
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
|
||||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
|
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
|
||||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
|
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
|
||||||
sz = sizeof(size_t);
|
|
||||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz,
|
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
|
||||||
|
|
||||||
if (config_stats) {
|
|
||||||
assert_u64_gt(nmalloc, 0,
|
|
||||||
"nmalloc should be greater than zero");
|
|
||||||
assert_u64_ge(nmalloc, ndalloc,
|
|
||||||
"nmalloc should be at least as large as ndalloc");
|
|
||||||
assert_u64_gt(nrequests, 0,
|
|
||||||
"nrequests should be greater than zero");
|
|
||||||
assert_u64_gt(curruns, 0,
|
|
||||||
"At least one run should be currently allocated");
|
|
||||||
}
|
|
||||||
|
|
||||||
dallocx(p, 0);
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
TEST_BEGIN(test_stats_arenas_hchunks)
|
TEST_BEGIN(test_stats_arenas_hchunks)
|
||||||
{
|
{
|
||||||
unsigned arena;
|
unsigned arena;
|
||||||
void *p;
|
void *p;
|
||||||
uint64_t epoch, nmalloc, ndalloc;
|
uint64_t epoch, nmalloc, ndalloc;
|
||||||
size_t curhchunks, sz;
|
size_t curhchunks, sz, hsize;
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
int expected = config_stats ? 0 : ENOENT;
|
||||||
|
|
||||||
arena = 0;
|
arena = 0;
|
||||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
||||||
0, "Unexpected mallctl() failure");
|
0, "Unexpected mallctl() failure");
|
||||||
|
|
||||||
p = mallocx(chunksize, 0);
|
sz = sizeof(size_t);
|
||||||
|
assert_d_eq(mallctl("arenas.hchunk.0.size", &hsize, &sz, NULL, 0), 0,
|
||||||
|
"Unexpected mallctl() failure");
|
||||||
|
|
||||||
|
p = mallocx(hsize, 0);
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||||
|
|
||||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||||
@ -439,9 +352,7 @@ main(void)
|
|||||||
test_stats_huge,
|
test_stats_huge,
|
||||||
test_stats_arenas_summary,
|
test_stats_arenas_summary,
|
||||||
test_stats_arenas_small,
|
test_stats_arenas_small,
|
||||||
test_stats_arenas_large,
|
|
||||||
test_stats_arenas_huge,
|
test_stats_arenas_huge,
|
||||||
test_stats_arenas_bins,
|
test_stats_arenas_bins,
|
||||||
test_stats_arenas_lruns,
|
|
||||||
test_stats_arenas_hchunks));
|
test_stats_arenas_hchunks));
|
||||||
}
|
}
|
||||||
|
@ -53,19 +53,11 @@ TEST_BEGIN(test_zero_small)
|
|||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
TEST_BEGIN(test_zero_large)
|
|
||||||
{
|
|
||||||
|
|
||||||
test_skip_if(!config_fill);
|
|
||||||
test_zero(SMALL_MAXCLASS+1, large_maxclass);
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
TEST_BEGIN(test_zero_huge)
|
TEST_BEGIN(test_zero_huge)
|
||||||
{
|
{
|
||||||
|
|
||||||
test_skip_if(!config_fill);
|
test_skip_if(!config_fill);
|
||||||
test_zero(large_maxclass+1, chunksize*2);
|
test_zero(SMALL_MAXCLASS+1, chunksize*2);
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
@ -75,6 +67,5 @@ main(void)
|
|||||||
|
|
||||||
return (test(
|
return (test(
|
||||||
test_zero_small,
|
test_zero_small,
|
||||||
test_zero_large,
|
|
||||||
test_zero_huge));
|
test_zero_huge));
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user