Add nonfull_slabs to bin_stats_t.

When config_stats is enabled track the size of bin->slabs_nonfull in
the new nonfull_slabs counter in bin_stats_t. This metric should be
useful for establishing an upper ceiling on the savings possible by
meshing.
This commit is contained in:
Doron Roberts-Kedes 2019-04-12 07:08:50 -04:00 committed by Qi Wang
parent ae124b8684
commit 7fc4f2a32c
7 changed files with 42 additions and 1 deletions

View File

@ -2947,6 +2947,17 @@ struct extent_hooks_s {
<listitem><para>Current number of slabs.</para></listitem> <listitem><para>Current number of slabs.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.bins.j.nonfull_slabs">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nonfull_slabs</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Current number of nonfull slabs.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.bins.mutex"> <varlistentry id="stats.arenas.i.bins.mutex">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.mutex.{counter}</mallctl> <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.mutex.{counter}</mallctl>

View File

@ -116,6 +116,7 @@ bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
dst_bin_stats->nslabs += bin->stats.nslabs; dst_bin_stats->nslabs += bin->stats.nslabs;
dst_bin_stats->reslabs += bin->stats.reslabs; dst_bin_stats->reslabs += bin->stats.reslabs;
dst_bin_stats->curslabs += bin->stats.curslabs; dst_bin_stats->curslabs += bin->stats.curslabs;
dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs;
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
} }

View File

@ -45,6 +45,9 @@ struct bin_stats_s {
/* Current number of slabs in this bin. */ /* Current number of slabs in this bin. */
size_t curslabs; size_t curslabs;
/* Current size of nonfull slabs heap in this bin. */
size_t nonfull_slabs;
mutex_prof_data_t mutex_data; mutex_prof_data_t mutex_data;
}; };

View File

@ -1002,11 +1002,17 @@ static void
arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
assert(extent_nfree_get(slab) > 0); assert(extent_nfree_get(slab) > 0);
extent_heap_insert(&bin->slabs_nonfull, slab); extent_heap_insert(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs++;
}
} }
static void static void
arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
extent_heap_remove(&bin->slabs_nonfull, slab); extent_heap_remove(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs--;
}
} }
static extent_t * static extent_t *
@ -1017,6 +1023,7 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
} }
if (config_stats) { if (config_stats) {
bin->stats.reslabs++; bin->stats.reslabs++;
bin->stats.nonfull_slabs--;
} }
return slab; return slab;
} }

View File

@ -169,6 +169,7 @@ CTL_PROTO(stats_arenas_i_bins_j_nflushes)
CTL_PROTO(stats_arenas_i_bins_j_nslabs) CTL_PROTO(stats_arenas_i_bins_j_nslabs)
CTL_PROTO(stats_arenas_i_bins_j_nreslabs) CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
CTL_PROTO(stats_arenas_i_bins_j_curslabs) CTL_PROTO(stats_arenas_i_bins_j_curslabs)
CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs)
INDEX_PROTO(stats_arenas_i_bins_j) INDEX_PROTO(stats_arenas_i_bins_j)
CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
@ -454,6 +455,7 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
{NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
{NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
{NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)},
{NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
}; };
@ -907,8 +909,11 @@ MUTEX_PROF_ARENA_MUTEXES
if (!destroyed) { if (!destroyed) {
sdstats->bstats[i].curslabs += sdstats->bstats[i].curslabs +=
astats->bstats[i].curslabs; astats->bstats[i].curslabs;
sdstats->bstats[i].nonfull_slabs +=
astats->bstats[i].nonfull_slabs;
} else { } else {
assert(astats->bstats[i].curslabs == 0); assert(astats->bstats[i].curslabs == 0);
assert(astats->bstats[i].nonfull_slabs == 0);
} }
malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
&astats->bstats[i].mutex_data); &astats->bstats[i].mutex_data);
@ -2966,6 +2971,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,

View File

@ -294,6 +294,7 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
COL_HDR(row, nshards, NULL, right, 9, unsigned) COL_HDR(row, nshards, NULL, right, 9, unsigned)
COL_HDR(row, curregs, NULL, right, 13, size) COL_HDR(row, curregs, NULL, right, 13, size)
COL_HDR(row, curslabs, NULL, right, 13, size) COL_HDR(row, curslabs, NULL, right, 13, size)
COL_HDR(row, nonfull_slabs, NULL, right, 15, size)
COL_HDR(row, regs, NULL, right, 5, unsigned) COL_HDR(row, regs, NULL, right, 5, unsigned)
COL_HDR(row, pgs, NULL, right, 4, size) COL_HDR(row, pgs, NULL, right, 4, size)
/* To buffer a right- and left-justified column. */ /* To buffer a right- and left-justified column. */
@ -337,6 +338,7 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
uint64_t nslabs; uint64_t nslabs;
size_t reg_size, slab_size, curregs; size_t reg_size, slab_size, curregs;
size_t curslabs; size_t curslabs;
size_t nonfull_slabs;
uint32_t nregs, nshards; uint32_t nregs, nshards;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nreslabs; uint64_t nreslabs;
@ -372,6 +374,8 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
uint64_t); uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs, CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
size_t); size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nonfull_slabs", i, j, &nonfull_slabs,
size_t);
if (mutex) { if (mutex) {
mutex_stats_read_arena_bin(i, j, col_mutex64, mutex_stats_read_arena_bin(i, j, col_mutex64,
@ -395,6 +399,8 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
&nreslabs); &nreslabs);
emitter_json_kv(emitter, "curslabs", emitter_type_size, emitter_json_kv(emitter, "curslabs", emitter_type_size,
&curslabs); &curslabs);
emitter_json_kv(emitter, "nonfull_slabs", emitter_type_size,
&nonfull_slabs);
if (mutex) { if (mutex) {
emitter_json_object_kv_begin(emitter, "mutex"); emitter_json_object_kv_begin(emitter, "mutex");
mutex_stats_emit(emitter, NULL, col_mutex64, mutex_stats_emit(emitter, NULL, col_mutex64,
@ -434,6 +440,7 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
col_nshards.unsigned_val = nshards; col_nshards.unsigned_val = nshards;
col_curregs.size_val = curregs; col_curregs.size_val = curregs;
col_curslabs.size_val = curslabs; col_curslabs.size_val = curslabs;
col_nonfull_slabs.size_val = nonfull_slabs;
col_regs.unsigned_val = nregs; col_regs.unsigned_val = nregs;
col_pgs.size_val = slab_size / page; col_pgs.size_val = slab_size / page;
col_util.str_val = util; col_util.str_val = util;

View File

@ -228,7 +228,7 @@ gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) {
TEST_BEGIN(test_stats_arenas_bins) { TEST_BEGIN(test_stats_arenas_bins) {
void *p; void *p;
size_t sz, curslabs, curregs; size_t sz, curslabs, curregs, nonfull_slabs;
uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nslabs, nreslabs; uint64_t nslabs, nreslabs;
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
@ -289,6 +289,9 @@ TEST_BEGIN(test_stats_arenas_bins) {
gen_mallctl_str(cmd, "curslabs", arena_ind); gen_mallctl_str(cmd, "curslabs", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected, assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
"Unexpected mallctl() result"); "Unexpected mallctl() result");
gen_mallctl_str(cmd, "nonfull_slabs", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_u64_gt(nmalloc, 0, assert_u64_gt(nmalloc, 0,
@ -309,6 +312,8 @@ TEST_BEGIN(test_stats_arenas_bins) {
"At least one slab should have been allocated"); "At least one slab should have been allocated");
assert_zu_gt(curslabs, 0, assert_zu_gt(curslabs, 0,
"At least one slab should be currently allocated"); "At least one slab should be currently allocated");
assert_zu_eq(nonfull_slabs, 0,
"slabs_nonfull should be empty");
} }
dallocx(p, 0); dallocx(p, 0);