Add MALLOC_CONF parsing for dynamic slab sizes.

This actually enables us to change the values.
This commit is contained in:
David T. Goldblatt 2018-04-20 21:11:03 -07:00 committed by David Goldblatt
parent 4610ffa942
commit 5112d9e5fd
4 changed files with 153 additions and 0 deletions

View File

@ -230,6 +230,7 @@ TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
$(srcroot)test/integration/posix_memalign.c \
$(srcroot)test/integration/rallocx.c \
$(srcroot)test/integration/sdallocx.c \
$(srcroot)test/integration/slab_sizes.c \
$(srcroot)test/integration/thread_arena.c \
$(srcroot)test/integration/thread_tcache_enabled.c \
$(srcroot)test/integration/xallocx.c

View File

@ -764,6 +764,49 @@ init_opt_stats_print_opts(const char *v, size_t vlen) {
assert(opts_len == strlen(opt_stats_print_opts));
}
static bool
malloc_conf_slab_sizes_next(const char **slab_size_segment_cur,
size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *pgs) {
const char *cur = *slab_size_segment_cur;
char *end;
uintmax_t um;
set_errno(0);
/* First number, then '-' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != '-') {
return true;
}
*slab_start = (size_t)um;
cur = end + 1;
/* Second number, then ':' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != ':') {
return true;
}
*slab_end = (size_t)um;
cur = end + 1;
/* Last number */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0) {
return true;
}
*pgs = (size_t)um;
/* Consume the separator if there is one. */
if (*end == '|') {
end++;
}
*vlen_left -= end - *slab_size_segment_cur;
*slab_size_segment_cur = end;
return false;
}
static bool
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
char const **v_p, size_t *vlen_p) {
@ -1192,6 +1235,31 @@ malloc_conf_init(void) {
"max_background_threads", 1,
opt_max_background_threads, yes, yes,
true);
if (CONF_MATCH("slab_sizes")) {
bool err;
const char *slab_size_segment_cur = v;
size_t vlen_left = vlen;
do {
size_t slab_start;
size_t slab_end;
size_t pgs;
err = malloc_conf_slab_sizes_next(
&slab_size_segment_cur,
&vlen_left, &slab_start, &slab_end,
&pgs);
if (!err) {
sc_data_update_slab_size(
&sc_data_global, slab_start,
slab_end, (int)pgs);
} else {
malloc_conf_error(
"Invalid settings for "
"slab_sizes", k, klen, v,
vlen);
}
} while (!err && vlen_left > 0);
continue;
}
if (config_prof) {
CONF_HANDLE_BOOL(opt_prof, "prof")
CONF_HANDLE_CHAR_P(opt_prof_prefix,

View File

@ -0,0 +1,80 @@
#include "test/jemalloc_test.h"
/* Note that this test relies on the unusual slab sizes set in slab_sizes.sh. */
TEST_BEGIN(test_slab_sizes) {
unsigned nbins;
size_t page;
size_t sizemib[4];
size_t slabmib[4];
size_t len;
len = sizeof(nbins);
assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
"nbins mallctl failure");
len = sizeof(page);
assert_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0,
"page mallctl failure");
len = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0,
"bin size mallctlnametomib failure");
len = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len),
0, "slab size mallctlnametomib failure");
size_t biggest_slab_seen = 0;
for (unsigned i = 0; i < nbins; i++) {
size_t bin_size;
size_t slab_size;
len = sizeof(size_t);
sizemib[2] = i;
slabmib[2] = i;
assert_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len,
NULL, 0), 0, "bin size mallctlbymib failure");
len = sizeof(size_t);
assert_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len,
NULL, 0), 0, "slab size mallctlbymib failure");
if (bin_size < 100) {
/*
* Then we should be as close to 17 as possible. Since
* not all page sizes are valid (because of bitmap
* limitations on the number of items in a slab), we
* should at least make sure that the number of pages
* goes up.
*/
assert_zu_ge(slab_size, biggest_slab_seen,
"Slab sizes should go up");
biggest_slab_seen = slab_size;
} else if (
(100 <= bin_size && bin_size < 128)
|| (128 < bin_size && bin_size <= 200)) {
assert_zu_eq(slab_size, page,
"Forced-small slabs should be small");
} else if (bin_size == 128) {
assert_zu_eq(slab_size, 2 * page,
"Forced-2-page slab should be 2 pages");
} else if (200 < bin_size && bin_size <= 4096) {
assert_zu_ge(slab_size, biggest_slab_seen,
"Slab sizes should go up");
biggest_slab_seen = slab_size;
}
}
/*
* For any reasonable configuration, 17 pages should be a valid slab
* size for 4096-byte items.
*/
assert_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target");
}
TEST_END
int
main(void) {
return test(
test_slab_sizes);
}

View File

@ -0,0 +1,4 @@
#!/bin/sh
# Some screwy-looking slab sizes.
export MALLOC_CONF="slab_sizes:1-4096:17|100-200:1|128-128:2"