Add per size class huge allocation statistics.

Add per size class huge allocation statistics, and normalize various
stats:
- Change the arenas.nlruns type from size_t to unsigned.
- Add the arenas.nhchunks and arenas.hchunks.<i>.size mallctl's.
- Replace the stats.arenas.<i>.bins.<j>.allocated mallctl with
  stats.arenas.<i>.bins.<j>.curregs .
- Add the stats.arenas.<i>.hchunks.<j>.nmalloc,
  stats.arenas.<i>.hchunks.<j>.ndalloc,
  stats.arenas.<i>.hchunks.<j>.nrequests, and
  stats.arenas.<i>.hchunks.<j>.curhchunks mallctl's.
This commit is contained in:
Jason Evans 2014-10-12 22:53:59 -07:00
parent 44c97b712e
commit 3c4d92e82a
10 changed files with 724 additions and 338 deletions

View File

@ -406,11 +406,12 @@ for (i = 0; i < nbins; i++) {
functions simultaneously. If <option>--enable-stats</option> is functions simultaneously. If <option>--enable-stats</option> is
specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can
be specified to omit merged arena and per arena statistics, respectively; be specified to omit merged arena and per arena statistics, respectively;
&ldquo;b&rdquo; and &ldquo;l&rdquo; can be specified to omit per size &ldquo;b&rdquo;, &ldquo;l&rdquo;, and &ldquo;h&rdquo; can be specified to
class statistics for bins and large objects, respectively. Unrecognized omit per size class statistics for bins, large objects, and huge objects,
characters are silently ignored. Note that thread caching may prevent respectively. Unrecognized characters are silently ignored. Note that
some statistics from being completely up to date, since extra locking thread caching may prevent some statistics from being completely up to
would be required to merge counters that track thread cache operations. date, since extra locking would be required to merge counters that track
thread cache operations.
</para> </para>
<para>The <function>malloc_usable_size<parameter/></function> function <para>The <function>malloc_usable_size<parameter/></function> function
@ -1520,7 +1521,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<varlistentry id="arenas.nlruns"> <varlistentry id="arenas.nlruns">
<term> <term>
<mallctl>arenas.nlruns</mallctl> <mallctl>arenas.nlruns</mallctl>
(<type>size_t</type>) (<type>unsigned</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Total number of large size classes.</para></listitem> <listitem><para>Total number of large size classes.</para></listitem>
@ -1536,6 +1537,25 @@ malloc_conf = "xmalloc:true";]]></programlisting>
class.</para></listitem> class.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arenas.nhchunks">
<term>
<mallctl>arenas.nhchunks</mallctl>
(<type>unsigned</type>)
<literal>r-</literal>
</term>
<listitem><para>Total number of huge size classes.</para></listitem>
</varlistentry>
<varlistentry id="arenas.hchunks.i.size">
<term>
<mallctl>arenas.hchunks.&lt;i&gt;.size</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Maximum size supported by this huge size
class.</para></listitem>
</varlistentry>
<varlistentry id="arenas.extend"> <varlistentry id="arenas.extend">
<term> <term>
<mallctl>arenas.extend</mallctl> <mallctl>arenas.extend</mallctl>
@ -1945,17 +1965,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.bins.j.allocated">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.allocated</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Current number of bytes allocated by
bin.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.bins.j.nmalloc"> <varlistentry id="stats.arenas.i.bins.j.nmalloc">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl>
@ -1989,6 +1998,17 @@ malloc_conf = "xmalloc:true";]]></programlisting>
requests.</para></listitem> requests.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.bins.j.curregs">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Current number of regions for this size
class.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.bins.j.nfills"> <varlistentry id="stats.arenas.i.bins.j.nfills">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl> <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
@ -2083,6 +2103,50 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Current number of runs for this size class. <listitem><para>Current number of runs for this size class.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.hchunks.j.nmalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nmalloc</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Cumulative number of allocation requests for this size
class served directly by the arena.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.hchunks.j.ndalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.ndalloc</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Cumulative number of deallocation requests for this
size class served directly by the arena.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.hchunks.j.nrequests">
<term>
<mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nrequests</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Cumulative number of allocation requests for this size
class.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.hchunks.j.curhchunks">
<term>
<mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.curhchunks</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Current number of huge allocations for this size class.
</para></listitem>
</varlistentry>
</variablelist> </variablelist>
</refsect1> </refsect1>
<refsect1 id="debugging_malloc_problems"> <refsect1 id="debugging_malloc_problems">

View File

@ -335,11 +335,12 @@ extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t map_misc_offset; extern size_t map_misc_offset;
extern size_t arena_maxrun; /* Max run size for arenas. */ extern size_t arena_maxrun; /* Max run size for arenas. */
extern size_t arena_maxclass; /* Max size class for arenas. */ extern size_t arena_maxclass; /* Max size class for arenas. */
extern size_t nlclasses; /* Number of large size classes. */ extern unsigned nlclasses; /* Number of large size classes. */
extern unsigned nhclasses; /* Number of huge size classes. */
void *arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t size, void *arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t usize,
size_t alignment, bool *zero); size_t alignment, bool *zero);
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size); void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
void arena_purge_all(arena_t *arena); void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
index_t binind, uint64_t prof_accumbytes); index_t binind, uint64_t prof_accumbytes);
@ -387,7 +388,7 @@ dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats); malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
arena_t *arena_new(unsigned ind); arena_t *arena_new(unsigned ind);
void arena_boot(void); void arena_boot(void);
void arena_prefork(arena_t *arena); void arena_prefork(arena_t *arena);

View File

@ -46,6 +46,7 @@ struct ctl_arena_stats_s {
malloc_bin_stats_t bstats[NBINS]; malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */ malloc_large_stats_t *lstats; /* nlclasses elements. */
malloc_huge_stats_t *hstats; /* nhclasses elements. */
}; };
struct ctl_stats_s { struct ctl_stats_s {

View File

@ -4,6 +4,7 @@
typedef struct tcache_bin_stats_s tcache_bin_stats_t; typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t; typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t; typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
typedef struct arena_stats_s arena_stats_t; typedef struct arena_stats_s arena_stats_t;
typedef struct chunk_stats_s chunk_stats_t; typedef struct chunk_stats_s chunk_stats_t;
@ -20,12 +21,6 @@ struct tcache_bin_stats_s {
}; };
struct malloc_bin_stats_s { struct malloc_bin_stats_s {
/*
* Current number of bytes allocated, including objects currently
* cached by tcache.
*/
size_t allocated;
/* /*
* Total number of allocation/deallocation requests served directly by * Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it * the bin. Note that tcache may allocate an object, then recycle it
@ -42,6 +37,12 @@ struct malloc_bin_stats_s {
*/ */
uint64_t nrequests; uint64_t nrequests;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t curregs;
/* Number of tcache fills from this bin. */ /* Number of tcache fills from this bin. */
uint64_t nfills; uint64_t nfills;
@ -78,10 +79,25 @@ struct malloc_large_stats_s {
*/ */
uint64_t nrequests; uint64_t nrequests;
/* Current number of runs of this size class. */ /*
* Current number of runs of this size class, including runs currently
* cached by tcache.
*/
size_t curruns; size_t curruns;
}; };
struct malloc_huge_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/* Current number of (multi-)chunk allocations of this size class. */
size_t curhchunks;
};
struct arena_stats_s { struct arena_stats_s {
/* Number of bytes currently mapped. */ /* Number of bytes currently mapped. */
size_t mapped; size_t mapped;
@ -104,10 +120,12 @@ struct arena_stats_s {
size_t allocated_huge; size_t allocated_huge;
uint64_t nmalloc_huge; uint64_t nmalloc_huge;
uint64_t ndalloc_huge; uint64_t ndalloc_huge;
uint64_t nrequests_huge;
/* One element for each large size class. */ /* One element for each large size class. */
malloc_large_stats_t *lstats; malloc_large_stats_t *lstats;
/* One element for each huge size class. */
malloc_huge_stats_t *hstats;
}; };
struct chunk_stats_s { struct chunk_stats_s {

View File

@ -11,7 +11,8 @@ size_t map_bias;
size_t map_misc_offset; size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */ size_t arena_maxrun; /* Max run size for arenas. */
size_t arena_maxclass; /* Max size class for arenas. */ size_t arena_maxclass; /* Max size class for arenas. */
size_t nlclasses; /* Number of large size classes. */ unsigned nlclasses; /* Number of large size classes. */
unsigned nhclasses; /* Number of huge size classes. */
/******************************************************************************/ /******************************************************************************/
/* /*
@ -411,7 +412,7 @@ arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
} }
void * void *
arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t size, arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t usize,
size_t alignment, bool *zero) size_t alignment, bool *zero)
{ {
void *ret; void *ret;
@ -422,26 +423,33 @@ arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t size,
chunk_alloc = arena->chunk_alloc; chunk_alloc = arena->chunk_alloc;
chunk_dalloc = arena->chunk_dalloc; chunk_dalloc = arena->chunk_dalloc;
if (config_stats) { if (config_stats) {
index_t index = size2index(usize) - nlclasses - NBINS;
/* Optimistically update stats prior to unlocking. */ /* Optimistically update stats prior to unlocking. */
arena->stats.mapped += size; arena->stats.allocated_huge += usize;
arena->stats.allocated_huge += size;
arena->stats.nmalloc_huge++; arena->stats.nmalloc_huge++;
arena->stats.nrequests_huge++; arena->stats.hstats[index].nmalloc++;
arena->stats.hstats[index].curhchunks++;
arena->stats.mapped += usize;
} }
arena->nactive += (size >> LG_PAGE); arena->nactive += (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
new_addr, size, alignment, zero); new_addr, usize, alignment, zero);
if (config_stats) { if (config_stats) {
if (ret != NULL) if (ret != NULL)
stats_cactive_add(size); stats_cactive_add(usize);
else { else {
/* Revert optimistic stats updates. */ index_t index = size2index(usize) - nlclasses - NBINS;
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
arena->stats.mapped -= size; /* Revert optimistic stats updates. */
arena->stats.allocated_huge -= size; arena->stats.allocated_huge -= usize;
arena->stats.nmalloc_huge--; arena->stats.nmalloc_huge--;
arena->stats.hstats[index].nmalloc--;
arena->stats.hstats[index].curhchunks--;
arena->stats.mapped -= usize;
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
} }
} }
@ -534,21 +542,25 @@ arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk)
} }
void void
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size) arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
{ {
chunk_dalloc_t *chunk_dalloc; chunk_dalloc_t *chunk_dalloc;
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
chunk_dalloc = arena->chunk_dalloc; chunk_dalloc = arena->chunk_dalloc;
if (config_stats) { if (config_stats) {
arena->stats.mapped -= size; index_t index = size2index(usize) - nlclasses - NBINS;
arena->stats.allocated_huge -= size;
arena->stats.ndalloc_huge++; arena->stats.ndalloc_huge++;
stats_cactive_sub(size); arena->stats.allocated_huge -= usize;
arena->stats.hstats[index].ndalloc++;
arena->stats.hstats[index].curhchunks--;
arena->stats.mapped -= usize;
stats_cactive_sub(usize);
} }
arena->nactive -= (size >> LG_PAGE); arena->nactive -= (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
chunk_dalloc(chunk, size, arena->ind); chunk_dalloc(chunk, usize, arena->ind);
} }
static void static void
@ -1300,9 +1312,9 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind,
tbin->avail[nfill - 1 - i] = ptr; tbin->avail[nfill - 1 - i] = ptr;
} }
if (config_stats) { if (config_stats) {
bin->stats.allocated += i * arena_bin_info[binind].reg_size;
bin->stats.nmalloc += i; bin->stats.nmalloc += i;
bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.nrequests += tbin->tstats.nrequests;
bin->stats.curregs += i;
bin->stats.nfills++; bin->stats.nfills++;
tbin->tstats.nrequests = 0; tbin->tstats.nrequests = 0;
} }
@ -1436,9 +1448,9 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
} }
if (config_stats) { if (config_stats) {
bin->stats.allocated += size;
bin->stats.nmalloc++; bin->stats.nmalloc++;
bin->stats.nrequests++; bin->stats.nrequests++;
bin->stats.curregs++;
} }
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(&bin->lock);
if (config_prof && !isthreaded && arena_prof_accum(arena, size)) if (config_prof && !isthreaded && arena_prof_accum(arena, size))
@ -1678,7 +1690,6 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_run_t *run; arena_run_t *run;
arena_bin_t *bin; arena_bin_t *bin;
arena_bin_info_t *bin_info; arena_bin_info_t *bin_info;
size_t size;
index_t binind; index_t binind;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
@ -1687,8 +1698,6 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
binind = run->binind; binind = run->binind;
bin = &arena->bins[binind]; bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
if (config_fill || config_stats)
size = bin_info->reg_size;
if (!junked && config_fill && unlikely(opt_junk)) if (!junked && config_fill && unlikely(opt_junk))
arena_dalloc_junk_small(ptr, bin_info); arena_dalloc_junk_small(ptr, bin_info);
@ -1701,8 +1710,8 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_bin_lower_run(arena, chunk, run, bin); arena_bin_lower_run(arena, chunk, run, bin);
if (config_stats) { if (config_stats) {
bin->stats.allocated -= size;
bin->stats.ndalloc++; bin->stats.ndalloc++;
bin->stats.curregs--;
} }
} }
@ -2102,7 +2111,7 @@ arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
void void
arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats) malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
{ {
unsigned i; unsigned i;
@ -2122,7 +2131,6 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
astats->allocated_huge += arena->stats.allocated_huge; astats->allocated_huge += arena->stats.allocated_huge;
astats->nmalloc_huge += arena->stats.nmalloc_huge; astats->nmalloc_huge += arena->stats.nmalloc_huge;
astats->ndalloc_huge += arena->stats.ndalloc_huge; astats->ndalloc_huge += arena->stats.ndalloc_huge;
astats->nrequests_huge += arena->stats.nrequests_huge;
for (i = 0; i < nlclasses; i++) { for (i = 0; i < nlclasses; i++) {
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
@ -2130,16 +2138,22 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
lstats[i].nrequests += arena->stats.lstats[i].nrequests; lstats[i].nrequests += arena->stats.lstats[i].nrequests;
lstats[i].curruns += arena->stats.lstats[i].curruns; lstats[i].curruns += arena->stats.lstats[i].curruns;
} }
for (i = 0; i < nhclasses; i++) {
hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
}
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i]; arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(&bin->lock);
bstats[i].allocated += bin->stats.allocated;
bstats[i].nmalloc += bin->stats.nmalloc; bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc; bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests; bstats[i].nrequests += bin->stats.nrequests;
bstats[i].curregs += bin->stats.curregs;
if (config_tcache) { if (config_tcache) {
bstats[i].nfills += bin->stats.nfills; bstats[i].nfills += bin->stats.nfills;
bstats[i].nflushes += bin->stats.nflushes; bstats[i].nflushes += bin->stats.nflushes;
@ -2159,12 +2173,13 @@ arena_new(unsigned ind)
arena_bin_t *bin; arena_bin_t *bin;
/* /*
* Allocate arena and arena->lstats contiguously, mainly because there * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
* is no way to clean up if base_alloc() OOMs. * because there is no way to clean up if base_alloc() OOMs.
*/ */
if (config_stats) { if (config_stats) {
arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
+ nlclasses * sizeof(malloc_large_stats_t)); + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
nhclasses) * sizeof(malloc_huge_stats_t));
} else } else
arena = (arena_t *)base_alloc(sizeof(arena_t)); arena = (arena_t *)base_alloc(sizeof(arena_t));
if (arena == NULL) if (arena == NULL)
@ -2184,6 +2199,11 @@ arena_new(unsigned ind)
CACHELINE_CEILING(sizeof(arena_t))); CACHELINE_CEILING(sizeof(arena_t)));
memset(arena->stats.lstats, 0, nlclasses * memset(arena->stats.lstats, 0, nlclasses *
sizeof(malloc_large_stats_t)); sizeof(malloc_large_stats_t));
arena->stats.hstats = (malloc_huge_stats_t *)(((void *)arena) +
CACHELINE_CEILING(sizeof(arena_t)) +
QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
memset(arena->stats.hstats, 0, nhclasses *
sizeof(malloc_huge_stats_t));
if (config_tcache) if (config_tcache)
ql_new(&arena->tcache_ql); ql_new(&arena->tcache_ql);
} }
@ -2369,6 +2389,7 @@ arena_boot(void)
} }
assert(arena_maxclass > 0); assert(arena_maxclass > 0);
nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS); nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS);
nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init(); bin_info_init();
} }

122
src/ctl.c
View File

@ -122,6 +122,8 @@ CTL_PROTO(arenas_bin_i_run_size)
INDEX_PROTO(arenas_bin_i) INDEX_PROTO(arenas_bin_i)
CTL_PROTO(arenas_lrun_i_size) CTL_PROTO(arenas_lrun_i_size)
INDEX_PROTO(arenas_lrun_i) INDEX_PROTO(arenas_lrun_i)
CTL_PROTO(arenas_hchunk_i_size)
INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized) CTL_PROTO(arenas_initialized)
CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_quantum)
@ -130,6 +132,7 @@ CTL_PROTO(arenas_tcache_max)
CTL_PROTO(arenas_nbins) CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins) CTL_PROTO(arenas_nhbins)
CTL_PROTO(arenas_nlruns) CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_nhchunks)
CTL_PROTO(arenas_extend) CTL_PROTO(arenas_extend)
CTL_PROTO(prof_thread_active_init) CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active) CTL_PROTO(prof_active)
@ -152,10 +155,10 @@ CTL_PROTO(stats_arenas_i_huge_allocated)
CTL_PROTO(stats_arenas_i_huge_nmalloc) CTL_PROTO(stats_arenas_i_huge_nmalloc)
CTL_PROTO(stats_arenas_i_huge_ndalloc) CTL_PROTO(stats_arenas_i_huge_ndalloc)
CTL_PROTO(stats_arenas_i_huge_nrequests) CTL_PROTO(stats_arenas_i_huge_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_allocated)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests) CTL_PROTO(stats_arenas_i_bins_j_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_curregs)
CTL_PROTO(stats_arenas_i_bins_j_nfills) CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes) CTL_PROTO(stats_arenas_i_bins_j_nflushes)
CTL_PROTO(stats_arenas_i_bins_j_nruns) CTL_PROTO(stats_arenas_i_bins_j_nruns)
@ -167,6 +170,11 @@ CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
CTL_PROTO(stats_arenas_i_lruns_j_nrequests) CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
CTL_PROTO(stats_arenas_i_lruns_j_curruns) CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j) INDEX_PROTO(stats_arenas_i_lruns_j)
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pactive)
@ -305,6 +313,17 @@ static const ctl_indexed_node_t arenas_lrun_node[] = {
{INDEX(arenas_lrun_i)} {INDEX(arenas_lrun_i)}
}; };
static const ctl_named_node_t arenas_hchunk_i_node[] = {
{NAME("size"), CTL(arenas_hchunk_i_size)}
};
static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
{NAME(""), CHILD(named, arenas_hchunk_i)}
};
static const ctl_indexed_node_t arenas_hchunk_node[] = {
{INDEX(arenas_hchunk_i)}
};
static const ctl_named_node_t arenas_node[] = { static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)}, {NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)}, {NAME("initialized"), CTL(arenas_initialized)},
@ -316,6 +335,8 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("bin"), CHILD(indexed, arenas_bin)}, {NAME("bin"), CHILD(indexed, arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)}, {NAME("nlruns"), CTL(arenas_nlruns)},
{NAME("lrun"), CHILD(indexed, arenas_lrun)}, {NAME("lrun"), CHILD(indexed, arenas_lrun)},
{NAME("nhchunks"), CTL(arenas_nhchunks)},
{NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
{NAME("extend"), CTL(arenas_extend)} {NAME("extend"), CTL(arenas_extend)}
}; };
@ -352,14 +373,14 @@ static const ctl_named_node_t stats_arenas_i_huge_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)}, {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)}, {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)}, {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)}
}; };
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
{NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
{NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
@ -388,6 +409,20 @@ static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
{INDEX(stats_arenas_i_lruns_j)} {INDEX(stats_arenas_i_lruns_j)}
}; };
static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
{NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)},
{NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)}
};
static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_hchunks_j)}
};
static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
{INDEX(stats_arenas_i_hchunks_j)}
};
static const ctl_named_node_t stats_arenas_i_node[] = { static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)}, {NAME("dss"), CTL(stats_arenas_i_dss)},
@ -401,7 +436,8 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("huge"), CHILD(named, stats_arenas_i_huge)}, {NAME("huge"), CHILD(named, stats_arenas_i_huge)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)} {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)},
{NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
}; };
static const ctl_named_node_t super_stats_arenas_i_node[] = { static const ctl_named_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(named, stats_arenas_i)} {NAME(""), CHILD(named, stats_arenas_i)}
@ -453,6 +489,13 @@ ctl_arena_init(ctl_arena_stats_t *astats)
return (true); return (true);
} }
if (astats->hstats == NULL) {
astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
sizeof(malloc_huge_stats_t));
if (astats->hstats == NULL)
return (true);
}
return (false); return (false);
} }
@ -472,6 +515,8 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
memset(astats->lstats, 0, nlclasses * memset(astats->lstats, 0, nlclasses *
sizeof(malloc_large_stats_t)); sizeof(malloc_large_stats_t));
memset(astats->hstats, 0, nhclasses *
sizeof(malloc_huge_stats_t));
} }
} }
@ -481,10 +526,12 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
unsigned i; unsigned i;
arena_stats_merge(arena, &cstats->dss, &cstats->pactive, arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
&cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats); &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats,
cstats->hstats);
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].allocated; cstats->allocated_small += cstats->bstats[i].curregs *
index2size(i);
cstats->nmalloc_small += cstats->bstats[i].nmalloc; cstats->nmalloc_small += cstats->bstats[i].nmalloc;
cstats->ndalloc_small += cstats->bstats[i].ndalloc; cstats->ndalloc_small += cstats->bstats[i].ndalloc;
cstats->nrequests_small += cstats->bstats[i].nrequests; cstats->nrequests_small += cstats->bstats[i].nrequests;
@ -517,20 +564,12 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->astats.allocated_huge += astats->astats.allocated_huge; sstats->astats.allocated_huge += astats->astats.allocated_huge;
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
for (i = 0; i < nlclasses; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
sstats->lstats[i].curruns += astats->lstats[i].curruns;
}
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
sstats->bstats[i].allocated += astats->bstats[i].allocated;
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests += astats->bstats[i].nrequests; sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
sstats->bstats[i].curregs += astats->bstats[i].curregs;
if (config_tcache) { if (config_tcache) {
sstats->bstats[i].nfills += astats->bstats[i].nfills; sstats->bstats[i].nfills += astats->bstats[i].nfills;
sstats->bstats[i].nflushes += sstats->bstats[i].nflushes +=
@ -540,6 +579,19 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->bstats[i].reruns += astats->bstats[i].reruns; sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns; sstats->bstats[i].curruns += astats->bstats[i].curruns;
} }
for (i = 0; i < nlclasses; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
sstats->lstats[i].curruns += astats->lstats[i].curruns;
}
for (i = 0; i < nhclasses; i++) {
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks;
}
} }
static void static void
@ -692,6 +744,8 @@ ctl_init(void)
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
a0free( a0free(
ctl_stats.arenas[j].lstats); ctl_stats.arenas[j].lstats);
a0free(
ctl_stats.arenas[j].hstats);
} }
a0free(ctl_stats.arenas); a0free(ctl_stats.arenas);
ctl_stats.arenas = NULL; ctl_stats.arenas = NULL;
@ -1600,7 +1654,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_bin_i_node); return (super_arenas_bin_i_node);
} }
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t) CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
@ -1611,6 +1665,17 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_lrun_i_node); return (super_arenas_lrun_i_node);
} }
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t)
static const ctl_named_node_t *
arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > nhclasses)
return (NULL);
return (super_arenas_hchunk_i_node);
}
static int static int
arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) void *newp, size_t newlen)
@ -1784,16 +1849,16 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t) ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
ctl_stats.arenas[mib[2]].astats.nrequests_huge, uint64_t) ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
@ -1832,6 +1897,25 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
return (super_stats_arenas_i_lruns_j_node); return (super_stats_arenas_i_lruns_j_node);
} }
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
static const ctl_named_node_t *
stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > nhclasses)
return (NULL);
return (super_stats_arenas_i_hchunks_j_node);
}
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{ {

View File

@ -104,6 +104,101 @@ huge_dalloc_junk(void *ptr, size_t usize)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif #endif
static void
huge_ralloc_no_move_stats_update(arena_t *arena, size_t oldsize, size_t usize)
{
index_t oldindex = size2index(oldsize) - nlclasses - NBINS;
index_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
arena->stats.ndalloc_huge++;
arena->stats.allocated_huge -= oldsize;
arena->stats.hstats[oldindex].ndalloc++;
arena->stats.hstats[oldindex].curhchunks--;
arena->stats.nmalloc_huge++;
arena->stats.allocated_huge += usize;
arena->stats.hstats[index].nmalloc++;
arena->stats.hstats[index].curhchunks++;
}
static void
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
size_t size, size_t extra, bool zero)
{
size_t usize_next;
extent_node_t *node, key;
arena_t *arena;
/* Increase usize to incorporate extra. */
while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) < oldsize)
usize = usize_next;
malloc_mutex_lock(&huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
arena = node->arena;
/* Update the size of the huge allocation if it changed. */
if (oldsize != usize) {
assert(node->size != usize);
node->size = usize;
}
malloc_mutex_unlock(&huge_mtx);
/* Fill if necessary. */
if (oldsize < usize) {
if (zero || (config_fill && unlikely(opt_zero)))
memset(ptr + oldsize, 0, usize - oldsize);
else if (config_fill && unlikely(opt_junk))
memset(ptr + oldsize, 0xa5, usize - oldsize);
} else if (config_fill && unlikely(opt_junk) && oldsize > usize)
memset(ptr + usize, 0x5a, oldsize - usize);
if (config_stats)
huge_ralloc_no_move_stats_update(arena, oldsize, usize);
}
static void
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
extent_node_t *node, key;
arena_t *arena;
void *excess_addr;
size_t excess_size;
malloc_mutex_lock(&huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
arena = node->arena;
/* Update the size of the huge allocation. */
node->size = usize;
malloc_mutex_unlock(&huge_mtx);
excess_addr = node->addr + CHUNK_CEILING(usize);
excess_size = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
/* Zap the excess chunks. */
huge_dalloc_junk(ptr + usize, oldsize - usize);
if (excess_size > 0)
arena_chunk_dalloc_huge(arena, excess_addr, excess_size);
if (config_stats)
huge_ralloc_no_move_stats_update(arena, oldsize, usize);
}
static bool static bool
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) { huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
size_t usize; size_t usize;
@ -131,7 +226,6 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
assert(node != NULL); assert(node != NULL);
assert(node->addr == ptr); assert(node->addr == ptr);
/* Find the current arena. */
arena = node->arena; arena = node->arena;
malloc_mutex_unlock(&huge_mtx); malloc_mutex_unlock(&huge_mtx);
@ -159,6 +253,10 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
else if (unlikely(opt_zero) && !is_zeroed) else if (unlikely(opt_zero) && !is_zeroed)
memset(ptr + oldsize, 0, usize - oldsize); memset(ptr + oldsize, 0, usize - oldsize);
} }
if (config_stats)
huge_ralloc_no_move_stats_update(arena, oldsize, usize);
return (false); return (false);
} }
@ -185,78 +283,20 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
*/ */
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize) if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
size_t usize_next; huge_ralloc_no_move_similar(ptr, oldsize, usize, size, extra,
zero);
/* Increase usize to incorporate extra. */
while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) <
oldsize)
usize = usize_next;
/* Update the size of the huge allocation if it changed. */
if (oldsize != usize) {
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
assert(node->size != usize);
node->size = usize;
malloc_mutex_unlock(&huge_mtx);
if (oldsize < usize) {
if (zero || (config_fill &&
unlikely(opt_zero))) {
memset(ptr + oldsize, 0, usize -
oldsize);
} else if (config_fill && unlikely(opt_junk)) {
memset(ptr + oldsize, 0xa5, usize -
oldsize);
}
} else if (config_fill && unlikely(opt_junk) && oldsize
> usize)
memset(ptr + usize, 0x5a, oldsize - usize);
}
return (false); return (false);
} }
/* Shrink the allocation in-place. */ /* Shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) { if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
extent_node_t *node, key; huge_ralloc_no_move_shrink(ptr, oldsize, usize);
void *excess_addr;
size_t excess_size;
malloc_mutex_lock(&huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
/* Update the size of the huge allocation. */
node->size = usize;
malloc_mutex_unlock(&huge_mtx);
excess_addr = node->addr + CHUNK_CEILING(usize);
excess_size = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
/* Zap the excess chunks. */
huge_dalloc_junk(ptr + usize, oldsize - usize);
if (excess_size > 0) {
arena_chunk_dalloc_huge(node->arena, excess_addr,
excess_size);
}
return (false); return (false);
} }
/* Attempt to expand the allocation in-place. */ /* Attempt to expand the allocation in-place. */
if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) { if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra,
zero)) {
if (extra == 0) if (extra == 0)
return (true); return (true);

View File

@ -48,8 +48,10 @@ static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i); void *cbopaque, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i); void *cbopaque, unsigned i);
static void stats_arena_hchunks_print(
void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *), static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i, bool bins, bool large); void *cbopaque, unsigned i, bool bins, bool large, bool huge);
/******************************************************************************/ /******************************************************************************/
@ -58,62 +60,55 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i) unsigned i)
{ {
size_t page; size_t page;
bool config_tcache; bool config_tcache, in_gap;
unsigned nbins, j, gap_start; unsigned nbins, j;
CTL_GET("arenas.page", &page, size_t); CTL_GET("arenas.page", &page, size_t);
CTL_GET("config.tcache", &config_tcache, bool); CTL_GET("config.tcache", &config_tcache, bool);
if (config_tcache) { if (config_tcache) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc" "bins: size ind allocated nmalloc"
" ndalloc nrequests nfills nflushes" " ndalloc nrequests curregs regs pgs"
" newruns reruns curruns\n"); " nfills nflushes newruns reruns"
" curruns\n");
} else { } else {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc" "bins: size ind allocated nmalloc"
" ndalloc newruns reruns curruns\n"); " ndalloc nrequests curregs regs pgs"
" newruns reruns curruns\n");
} }
CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) { for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nruns; uint64_t nruns;
CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t); CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
if (nruns == 0) { if (nruns == 0)
if (gap_start == UINT_MAX) in_gap = true;
gap_start = j; else {
} else { size_t reg_size, run_size, curregs;
size_t reg_size, run_size, allocated;
uint32_t nregs; uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns; uint64_t reruns;
size_t curruns; size_t curruns;
if (gap_start != UINT_MAX) { if (in_gap) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"[%u..%u]\n", gap_start, " ---\n");
j - 1); in_gap = false;
} else {
/* Gap of one size class. */
malloc_cprintf(write_cb, cbopaque,
"[%u]\n", gap_start);
}
gap_start = UINT_MAX;
} }
CTL_J_GET("arenas.bin.0.size", &reg_size, size_t); CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t); CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t); CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
&allocated, size_t);
CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc", CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
&nmalloc, uint64_t); &nmalloc, uint64_t);
CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc", CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
&ndalloc, uint64_t); &ndalloc, uint64_t);
if (config_tcache) { CTL_IJ_GET("stats.arenas.0.bins.0.curregs",
&curregs, size_t);
CTL_IJ_GET("stats.arenas.0.bins.0.nrequests", CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
&nrequests, uint64_t); &nrequests, uint64_t);
if (config_tcache) {
CTL_IJ_GET("stats.arenas.0.bins.0.nfills", CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
&nfills, uint64_t); &nfills, uint64_t);
CTL_IJ_GET("stats.arenas.0.bins.0.nflushes", CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
@ -125,33 +120,28 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
size_t); size_t);
if (config_tcache) { if (config_tcache) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"%13u %5zu %4u %3zu %12zu %12"PRIu64 "%20zu %3u %12zu %12"PRIu64" %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64 " %12"PRIu64" %12zu %4u %3zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64 " %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n", " %12zu\n",
j, reg_size, nregs, run_size / page, reg_size, j, curregs * reg_size, nmalloc,
allocated, nmalloc, ndalloc, nrequests, ndalloc, nrequests, curregs, nregs, run_size
nfills, nflushes, nruns, reruns, curruns); / page, nfills, nflushes, nruns, reruns,
curruns);
} else { } else {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"%13u %5zu %4u %3zu %12zu %12"PRIu64 "%20zu %3u %12zu %12"PRIu64" %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64 " %12"PRIu64" %12zu %4u %3zu %12"PRIu64
" %12zu\n", " %12"PRIu64" %12zu\n",
j, reg_size, nregs, run_size / page, reg_size, j, curregs * reg_size, nmalloc,
allocated, nmalloc, ndalloc, nruns, reruns, ndalloc, nrequests, curregs, nregs,
curruns); run_size / page, nruns, reruns, curruns);
} }
} }
} }
if (gap_start != UINT_MAX) { if (in_gap) {
if (j > gap_start + 1) { malloc_cprintf(write_cb, cbopaque,
/* Gap of more than one size class. */ " ---\n");
malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
gap_start, j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
}
} }
} }
@ -159,16 +149,15 @@ static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i) unsigned i)
{ {
size_t page, nlruns, j; unsigned nbins, nlruns, j;
ssize_t gap_start; bool in_gap;
CTL_GET("arenas.page", &page, size_t);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"large: size pages nmalloc ndalloc nrequests" "large: size ind allocated nmalloc ndalloc"
" curruns\n"); " nrequests curruns\n");
CTL_GET("arenas.nlruns", &nlruns, size_t); CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, gap_start = -1; j < nlruns; j++) { CTL_GET("arenas.nlruns", &nlruns, unsigned);
for (j = 0, in_gap = false; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests; uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns; size_t run_size, curruns;
@ -178,32 +167,82 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
uint64_t); uint64_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests, CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
uint64_t); uint64_t);
if (nrequests == 0) { if (nrequests == 0)
if (gap_start == -1) in_gap = true;
gap_start = j; else {
} else {
CTL_J_GET("arenas.lrun.0.size", &run_size, size_t); CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns, CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
size_t); size_t);
if (gap_start != -1) { if (in_gap) {
malloc_cprintf(write_cb, cbopaque, "[%zu]\n", malloc_cprintf(write_cb, cbopaque,
j - gap_start); " ---\n");
gap_start = -1; in_gap = false;
} }
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64 "%20zu %3u %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n", " %12zu\n",
run_size, run_size / page, nmalloc, ndalloc, run_size, nbins + j, curruns * run_size, nmalloc,
nrequests, curruns); ndalloc, nrequests, curruns);
} }
} }
if (gap_start != -1) if (in_gap) {
malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start); malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
}
static void
stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i)
{
unsigned nbins, nlruns, nhchunks, j;
bool in_gap;
malloc_cprintf(write_cb, cbopaque,
"huge: size ind allocated nmalloc ndalloc"
" nrequests curhchunks\n");
CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned);
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
for (j = 0, in_gap = false; j < nhchunks; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t hchunk_size, curhchunks;
CTL_IJ_GET("stats.arenas.0.hchunks.0.nmalloc", &nmalloc,
uint64_t);
CTL_IJ_GET("stats.arenas.0.hchunks.0.ndalloc", &ndalloc,
uint64_t);
CTL_IJ_GET("stats.arenas.0.hchunks.0.nrequests", &nrequests,
uint64_t);
if (nrequests == 0)
in_gap = true;
else {
CTL_J_GET("arenas.hchunk.0.size", &hchunk_size,
size_t);
CTL_IJ_GET("stats.arenas.0.hchunks.0.curhchunks",
&curhchunks, size_t);
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
in_gap = false;
}
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n",
hchunk_size, nbins + nlruns + j,
curhchunks * hchunk_size, nmalloc, ndalloc,
nrequests, curhchunks);
}
}
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
" ---\n");
}
} }
static void static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i, bool bins, bool large) unsigned i, bool bins, bool large, bool huge)
{ {
unsigned nthreads; unsigned nthreads;
const char *dss; const char *dss;
@ -236,42 +275,51 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
nmadvise, nmadvise == 1 ? "" : "s", purged); nmadvise, nmadvise == 1 ? "" : "s", purged);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
" allocated nmalloc ndalloc nrequests\n"); " allocated nmalloc ndalloc"
" nrequests\n");
CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t); CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t); CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t); CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t); CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64
"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests); small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t); CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t); CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t); CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t); CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64
"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests); large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
CTL_I_GET("stats.arenas.0.huge.allocated", &huge_allocated, size_t); CTL_I_GET("stats.arenas.0.huge.allocated", &huge_allocated, size_t);
CTL_I_GET("stats.arenas.0.huge.nmalloc", &huge_nmalloc, uint64_t); CTL_I_GET("stats.arenas.0.huge.nmalloc", &huge_nmalloc, uint64_t);
CTL_I_GET("stats.arenas.0.huge.ndalloc", &huge_ndalloc, uint64_t); CTL_I_GET("stats.arenas.0.huge.ndalloc", &huge_ndalloc, uint64_t);
CTL_I_GET("stats.arenas.0.huge.nrequests", &huge_nrequests, uint64_t); CTL_I_GET("stats.arenas.0.huge.nrequests", &huge_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"huge: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", "huge: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64
"\n",
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64
"\n",
small_allocated + large_allocated + huge_allocated, small_allocated + large_allocated + huge_allocated,
small_nmalloc + large_nmalloc + huge_nmalloc, small_nmalloc + large_nmalloc + huge_nmalloc,
small_ndalloc + large_ndalloc + huge_ndalloc, small_ndalloc + large_ndalloc + huge_ndalloc,
small_nrequests + large_nrequests + huge_nrequests); small_nrequests + large_nrequests + huge_nrequests);
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page); malloc_cprintf(write_cb, cbopaque, "active: %12zu\n",
pactive * page);
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t); CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped); malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n",
mapped);
if (bins) if (bins)
stats_arena_bins_print(write_cb, cbopaque, i); stats_arena_bins_print(write_cb, cbopaque, i);
if (large) if (large)
stats_arena_lruns_print(write_cb, cbopaque, i); stats_arena_lruns_print(write_cb, cbopaque, i);
if (huge)
stats_arena_hchunks_print(write_cb, cbopaque, i);
} }
void void
@ -286,6 +334,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool unmerged = true; bool unmerged = true;
bool bins = true; bool bins = true;
bool large = true; bool large = true;
bool huge = true;
/* /*
* Refresh stats, in case mallctl() was called by the application. * Refresh stats, in case mallctl() was called by the application.
@ -328,6 +377,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
case 'l': case 'l':
large = false; large = false;
break; break;
case 'h':
huge = false;
break;
default:; default:;
} }
} }
@ -515,7 +567,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n"); "\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque, stats_arena_print(write_cb, cbopaque,
narenas, bins, large); narenas, bins, large, huge);
} }
} }
} }
@ -541,7 +593,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
cbopaque, cbopaque,
"\narenas[%u]:\n", i); "\narenas[%u]:\n", i);
stats_arena_print(write_cb, stats_arena_print(write_cb,
cbopaque, i, bins, large); cbopaque, i, bins, large,
huge);
} }
} }
} }

View File

@ -321,7 +321,8 @@ TEST_BEGIN(test_arenas_constants)
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
TEST_ARENAS_CONSTANT(size_t, page, PAGE); TEST_ARENAS_CONSTANT(size_t, page, PAGE);
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
TEST_ARENAS_CONSTANT(size_t, nlruns, nlclasses); TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses);
TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses);
#undef TEST_ARENAS_CONSTANT #undef TEST_ARENAS_CONSTANT
} }
@ -363,6 +364,23 @@ TEST_BEGIN(test_arenas_lrun_constants)
} }
TEST_END TEST_END
TEST_BEGIN(test_arenas_hchunk_constants)
{
#define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \
0), 0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize);
#undef TEST_ARENAS_HCHUNK_CONSTANT
}
TEST_END
TEST_BEGIN(test_arenas_extend) TEST_BEGIN(test_arenas_extend)
{ {
unsigned narenas_before, arena, narenas_after; unsigned narenas_before, arena, narenas_after;
@ -420,6 +438,7 @@ main(void)
test_arenas_constants, test_arenas_constants,
test_arenas_bin_constants, test_arenas_bin_constants,
test_arenas_lrun_constants, test_arenas_lrun_constants,
test_arenas_hchunk_constants,
test_arenas_extend, test_arenas_extend,
test_stats_arenas)); test_stats_arenas));
} }

View File

@ -97,7 +97,7 @@ TEST_END
TEST_BEGIN(test_stats_arenas_summary) TEST_BEGIN(test_stats_arenas_summary)
{ {
unsigned arena; unsigned arena;
void *little, *large; void *little, *large, *huge;
uint64_t epoch; uint64_t epoch;
size_t sz; size_t sz;
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
@ -112,6 +112,8 @@ TEST_BEGIN(test_stats_arenas_summary)
assert_ptr_not_null(little, "Unexpected mallocx() failure"); assert_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx(arena_maxclass, 0); large = mallocx(arena_maxclass, 0);
assert_ptr_not_null(large, "Unexpected mallocx() failure"); assert_ptr_not_null(large, "Unexpected mallocx() failure");
huge = mallocx(chunksize, 0);
assert_ptr_not_null(huge, "Unexpected mallocx() failure");
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure"); "Unexpected mallctl() failure");
@ -139,6 +141,7 @@ TEST_BEGIN(test_stats_arenas_summary)
dallocx(little, 0); dallocx(little, 0);
dallocx(large, 0); dallocx(large, 0);
dallocx(huge, 0);
} }
TEST_END TEST_END
@ -251,11 +254,51 @@ TEST_BEGIN(test_stats_arenas_large)
} }
TEST_END TEST_END
TEST_BEGIN(test_stats_arenas_huge)
{
unsigned arena;
void *p;
size_t sz, allocated;
uint64_t epoch, nmalloc, ndalloc;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(chunksize, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_zu_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_zu_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_bins) TEST_BEGIN(test_stats_arenas_bins)
{ {
unsigned arena; unsigned arena;
void *p; void *p;
size_t sz, allocated, curruns; size_t sz, curruns, curregs;
uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nruns, nreruns; uint64_t nruns, nreruns;
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
@ -273,9 +316,6 @@ TEST_BEGIN(test_stats_arenas_bins)
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure"); "Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.bins.0.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t); sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
@ -283,6 +323,9 @@ TEST_BEGIN(test_stats_arenas_bins)
NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz, assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz, assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz,
NULL, 0), config_tcache ? expected : ENOENT, NULL, 0), config_tcache ? expected : ENOENT,
@ -300,14 +343,14 @@ TEST_BEGIN(test_stats_arenas_bins)
NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_gt(nmalloc, 0, assert_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero"); "nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc, assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc"); "nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0, assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero"); "nrequests should be greater than zero");
assert_zu_gt(curregs, 0,
"allocated should be greater than zero");
if (config_tcache) { if (config_tcache) {
assert_u64_gt(nfills, 0, assert_u64_gt(nfills, 0,
"At least one fill should have occurred"); "At least one fill should have occurred");
@ -336,7 +379,7 @@ TEST_BEGIN(test_stats_arenas_lruns)
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure"); 0, "Unexpected mallctl() failure");
p = mallocx(SMALL_MAXCLASS+1, 0); p = mallocx(LARGE_MINCLASS, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@ -368,6 +411,46 @@ TEST_BEGIN(test_stats_arenas_lruns)
} }
TEST_END TEST_END
TEST_BEGIN(test_stats_arenas_hchunks)
{
unsigned arena;
void *p;
uint64_t epoch, nmalloc, ndalloc;
size_t curhchunks, sz;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(chunksize, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(curhchunks, 0,
"At least one chunk should be currently allocated");
}
dallocx(p, 0);
}
TEST_END
int int
main(void) main(void)
{ {
@ -379,6 +462,8 @@ main(void)
test_stats_arenas_summary, test_stats_arenas_summary,
test_stats_arenas_small, test_stats_arenas_small,
test_stats_arenas_large, test_stats_arenas_large,
test_stats_arenas_huge,
test_stats_arenas_bins, test_stats_arenas_bins,
test_stats_arenas_lruns)); test_stats_arenas_lruns,
test_stats_arenas_hchunks));
} }