Hide size class computation behind a layer of indirection.

This class removes almost all the dependencies on size_classes.h, accessing the
data there only via the new module sc.h, which does not depend on any
configuration options.

In a subsequent commit, we'll remove the configure-time size class computations,
doing them at boot time, instead.
This commit is contained in:
David Goldblatt
2017-12-14 12:46:39 -08:00
committed by David Goldblatt
parent fb924dd7bf
commit e904f813b4
46 changed files with 886 additions and 459 deletions

View File

@@ -77,7 +77,7 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
return 0;
}
if (szind == NSIZES) {
if (szind == SC_NSIZES) {
return 0;
}

View File

@@ -123,13 +123,14 @@ test_junk(size_t sz_min, size_t sz_max) {
TEST_BEGIN(test_junk_small) {
test_skip_if(!config_fill);
test_junk(1, SMALL_MAXCLASS-1);
test_junk(1, sc_data_global.small_maxclass - 1);
}
TEST_END
TEST_BEGIN(test_junk_large) {
test_skip_if(!config_fill);
test_junk(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
test_junk(sc_data_global.small_maxclass + 1,
(1U << (sc_data_global.lg_large_minclass + 1)));
}
TEST_END

View File

@@ -581,7 +581,7 @@ TEST_BEGIN(test_arena_i_retain_grow_limit) {
assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(default_limit, sz_pind2sz(EXTENT_GROW_MAX_PIND),
assert_zu_eq(default_limit, sz_pind2sz(sc_data_global.npsizes - 1),
"Unexpected default for retain_grow_limit");
new_limit = PAGE - 1;
@@ -686,8 +686,8 @@ TEST_BEGIN(test_arenas_constants) {
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
TEST_ARENAS_CONSTANT(unsigned, nlextents, NSIZES - NBINS);
TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS);
TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS);
#undef TEST_ARENAS_CONSTANT
}
@@ -720,7 +720,8 @@ TEST_BEGIN(test_arenas_lextent_constants) {
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, LARGE_MINCLASS);
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
sc_data_global.large_minclass);
#undef TEST_ARENAS_LEXTENT_CONSTANT
}

View File

@@ -29,12 +29,12 @@ TEST_BEGIN(test_gdump) {
prof_dump_open = prof_dump_open_intercept;
did_prof_dump_open = false;
p = mallocx((1U << LG_LARGE_MINCLASS), 0);
p = mallocx((1U << sc_data_global.lg_large_minclass), 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
did_prof_dump_open = false;
q = mallocx((1U << LG_LARGE_MINCLASS), 0);
q = mallocx((1U << sc_data_global.lg_large_minclass), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
@@ -45,7 +45,7 @@ TEST_BEGIN(test_gdump) {
"Unexpected mallctl failure while disabling prof.gdump");
assert(gdump_old);
did_prof_dump_open = false;
r = mallocx((1U << LG_LARGE_MINCLASS), 0);
r = mallocx((1U << sc_data_global.lg_large_minclass), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_false(did_prof_dump_open, "Unexpected profile dump");
@@ -56,7 +56,7 @@ TEST_BEGIN(test_gdump) {
"Unexpected mallctl failure while enabling prof.gdump");
assert(!gdump_old);
did_prof_dump_open = false;
s = mallocx((1U << LG_LARGE_MINCLASS), 0);
s = mallocx((1U << sc_data_global.lg_large_minclass), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");

View File

@@ -85,10 +85,10 @@ TEST_END
TEST_BEGIN(test_rtree_extrema) {
extent_t extent_a, extent_b;
extent_init(&extent_a, NULL, NULL, LARGE_MINCLASS, false,
sz_size2index(LARGE_MINCLASS), 0, extent_state_active, false,
false, true);
extent_init(&extent_b, NULL, NULL, 0, false, NSIZES, 0,
extent_init(&extent_a, NULL, NULL, sc_data_global.large_minclass, false,
sz_size2index(sc_data_global.large_minclass), 0,
extent_state_active, false, false, true);
extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true);
tsdn_t *tsdn = tsdn_fetch();
@@ -125,7 +125,7 @@ TEST_BEGIN(test_rtree_bits) {
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0,
extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true);
rtree_t *rtree = &test_rtree;
@@ -135,7 +135,7 @@ TEST_BEGIN(test_rtree_bits) {
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
&extent, NSIZES, false),
&extent, SC_NSIZES, false),
"Unexpected rtree_write() failure");
for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
@@ -166,7 +166,7 @@ TEST_BEGIN(test_rtree_random) {
rtree_ctx_data_init(&rtree_ctx);
extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0,
extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
@@ -177,7 +177,8 @@ TEST_BEGIN(test_rtree_random) {
&rtree_ctx, keys[i], false, true);
assert_ptr_not_null(elm,
"Unexpected rtree_leaf_elm_lookup() failure");
rtree_leaf_elm_write(tsdn, rtree, elm, &extent, NSIZES, false);
rtree_leaf_elm_write(tsdn, rtree, elm, &extent, SC_NSIZES,
false);
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
keys[i], true), &extent,
"rtree_extent_read() should return previously set value");

View File

@@ -142,11 +142,11 @@ TEST_BEGIN(test_overflow) {
max_size_class = get_max_size_class();
max_psz = max_size_class + PAGE;
assert_u_eq(sz_size2index(max_size_class+1), NSIZES,
assert_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
assert_u_eq(sz_size2index(SIZE_T_MAX), NSIZES,
assert_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
assert_zu_eq(sz_s2u(max_size_class+1), 0,
@@ -156,13 +156,16 @@ TEST_BEGIN(test_overflow) {
assert_zu_eq(sz_s2u(SIZE_T_MAX), 0,
"sz_s2u() should return 0 on overflow");
assert_u_eq(sz_psz2ind(max_size_class+1), NPSIZES,
assert_u_eq(sz_psz2ind(max_size_class+1), sc_data_global.npsizes,
"sz_psz2ind() should return NPSIZES on overflow");
assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), sc_data_global.npsizes,
"sz_psz2ind() should return NPSIZES on overflow");
assert_u_eq(sz_psz2ind(SIZE_T_MAX), NPSIZES,
assert_u_eq(sz_psz2ind(SIZE_T_MAX), sc_data_global.npsizes,
"sz_psz2ind() should return NPSIZES on overflow");
assert_u_le(sc_data_global.npsizes, SC_NPSIZES_MAX,
"Dynamic value of npsizes is higher than static bound.");
assert_zu_eq(sz_psz2u(max_size_class+1), max_psz,
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
" size");

View File

@@ -3,7 +3,7 @@
TEST_BEGIN(test_arena_slab_regind) {
szind_t binind;
for (binind = 0; binind < NBINS; binind++) {
for (binind = 0; binind < SC_NBINS; binind++) {
size_t regind;
extent_t slab;
const bin_info_t *bin_info = &bin_infos[binind];

View File

@@ -33,7 +33,7 @@ TEST_BEGIN(test_stats_large) {
size_t sz;
int expected = config_stats ? 0 : ENOENT;
p = mallocx(SMALL_MAXCLASS+1, MALLOCX_ARENA(0));
p = mallocx(sc_data_global.small_maxclass + 1, MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
@@ -74,9 +74,10 @@ TEST_BEGIN(test_stats_arenas_summary) {
uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
little = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0));
little = mallocx(sc_data_global.small_maxclass, MALLOCX_ARENA(0));
assert_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
large = mallocx((1U << sc_data_global.lg_large_minclass),
MALLOCX_ARENA(0));
assert_ptr_not_null(large, "Unexpected mallocx() failure");
dallocx(little, 0);
@@ -148,7 +149,7 @@ TEST_BEGIN(test_stats_arenas_small) {
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
p = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0));
p = mallocx(sc_data_global.small_maxclass, MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
@@ -191,7 +192,7 @@ TEST_BEGIN(test_stats_arenas_large) {
uint64_t epoch, nmalloc, ndalloc;
int expected = config_stats ? 0 : ENOENT;
p = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
p = mallocx((1U << sc_data_global.lg_large_minclass), MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),

View File

@@ -41,13 +41,14 @@ test_zero(size_t sz_min, size_t sz_max) {
TEST_BEGIN(test_zero_small) {
test_skip_if(!config_fill);
test_zero(1, SMALL_MAXCLASS-1);
test_zero(1, sc_data_global.small_maxclass - 1);
}
TEST_END
TEST_BEGIN(test_zero_large) {
test_skip_if(!config_fill);
test_zero(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
test_zero(sc_data_global.small_maxclass + 1,
1U << (sc_data_global.lg_large_minclass + 1));
}
TEST_END