Add the "stats.arenas.<i>.lg_dirty_mult" mallctl.

This commit is contained in:
Jason Evans 2015-03-24 16:36:12 -07:00
parent bd16ea49c3
commit 562d266511
7 changed files with 35 additions and 19 deletions

View File

@ -38,7 +38,8 @@ found in the git revision history:
"opt.prof_thread_active_init", "prof.thread_active_init", and
"thread.prof.active" mallctls.
- Add support for per arena application-specified chunk allocators, configured
via the "arena<i>.chunk.alloc" and "arena<i>.chunk.dalloc" mallctls.
via the "arena<i>.chunk.alloc", "arena<i>.chunk.dalloc", and
"arena.<i>.chunk.purge" mallctls.
- Refactor huge allocation to be managed by arenas, so that arenas now
function as general purpose independent allocators. This is important in
the context of user-specified chunk allocators, aside from the scalability
@ -65,6 +66,9 @@ found in the git revision history:
"stats.arenas.<i>.metadata.allocated" mallctls.
- Add the "stats.resident" mallctl, which reports the upper limit of
physically resident memory mapped by the allocator.
- Add per arena control over unused dirty page purging, via the
"arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
"stats.arenas.<i>.lg_dirty_mult" mallctls.
- Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
feature on/off during program execution.
- Add sdallocx(), which implements sized deallocation. The primary

View File

@ -1983,6 +1983,18 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.lg_dirty_mult">
<term>
<mallctl>stats.arenas.&lt;i&gt;.lg_dirty_mult</mallctl>
(<type>ssize_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Minimum ratio (log base 2) of active to dirty pages.
See <link
linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
for details.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.nthreads">
<term>
<mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>

View File

@ -470,8 +470,9 @@ dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_lg_dirty_mult_default_get(void);
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
void arena_stats_merge(arena_t *arena, const char **dss,
ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
arena_t *arena_new(unsigned ind);
void arena_boot(void);

View File

@ -34,6 +34,7 @@ struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
ssize_t lg_dirty_mult;
size_t pactive;
size_t pdirty;
arena_stats_t astats;

View File

@ -2657,14 +2657,16 @@ arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
}
void
arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
malloc_huge_stats_t *hstats)
{
unsigned i;
malloc_mutex_lock(&arena->lock);
*dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult;
*nactive += arena->nactive;
*ndirty += arena->ndirty;

View File

@ -181,6 +181,7 @@ CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_lg_dirty_mult)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
@ -443,6 +444,7 @@ static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
@ -524,6 +526,7 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
{
astats->dss = dss_prec_names[dss_prec_limit];
astats->lg_dirty_mult = -1;
astats->pactive = 0;
astats->pdirty = 0;
if (config_stats) {
@ -545,9 +548,9 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
&cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats,
cstats->hstats);
arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
&cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats,
cstats->lstats, cstats->hstats);
for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].curregs *
@ -2000,6 +2003,8 @@ CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)

View File

@ -6,15 +6,6 @@
xmallctl(n, v, &sz, NULL, 0); \
} while (0)
#define CTL_M1_GET(n, i, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[1] = (i); \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_M2_GET(n, i, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
@ -285,7 +276,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
dss);
CTL_M1_GET("arena.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
if (lg_dirty_mult >= 0) {
malloc_cprintf(write_cb, cbopaque,
"min active:dirty page ratio: %u:1\n",