Replace extent_tree_szad_* with extent_heap_*.

This commit is contained in:
Jason Evans 2016-05-17 14:58:56 -07:00
parent ffa45a5331
commit fc0372a15e
9 changed files with 333 additions and 104 deletions

View File

@ -136,6 +136,7 @@ TESTS_UNIT := \
$(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/ckh.c \
$(srcroot)test/unit/decay.c \
$(srcroot)test/unit/extent_quantize.c \
$(srcroot)test/unit/fork.c \
$(srcroot)test/unit/hash.c \
$(srcroot)test/unit/junk.c \

View File

@ -418,11 +418,11 @@ struct arena_s {
malloc_mutex_t huge_mtx;
/*
* Trees of chunks that were previously allocated. These are used when
* Heaps of chunks that were previously allocated. These are used when
* allocating chunks, in an attempt to re-use address space.
*/
extent_tree_t chunks_szad_cached;
extent_tree_t chunks_szad_retained;
extent_heap_t chunks_cached[NPSIZES];
extent_heap_t chunks_retained[NPSIZES];
malloc_mutex_t chunks_mtx;
/* Cache of extent structures that were allocated via base_alloc(). */

View File

@ -49,20 +49,29 @@ struct extent_s {
qr(extent_t) cc_link;
union {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_t) szad_link;
/* Linkage for per size class address-ordered heaps. */
phn(extent_t) ph_link;
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_t) ql_link;
};
};
typedef rb_tree(extent_t) extent_tree_t;
typedef ph(extent_t) extent_heap_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rb_proto(, extent_tree_szad_, extent_tree_t, extent_t)
#ifdef JEMALLOC_JET
typedef size_t (extent_size_quantize_t)(size_t);
extern extent_size_quantize_t *extent_size_quantize_floor;
extern extent_size_quantize_t *extent_size_quantize_ceil;
#else
size_t extent_size_quantize_floor(size_t size);
size_t extent_size_quantize_ceil(size_t size);
#endif
ph_proto(, extent_heap_, extent_heap_t, extent_t)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View File

@ -222,27 +222,10 @@ extent_prof_tctx_set
extent_retained_get
extent_size_get
extent_size_set
extent_size_quantize_ceil
extent_size_quantize_floor
extent_slab_get
extent_slab_set
extent_tree_szad_destroy
extent_tree_szad_destroy_recurse
extent_tree_szad_empty
extent_tree_szad_first
extent_tree_szad_insert
extent_tree_szad_iter
extent_tree_szad_iter_recurse
extent_tree_szad_iter_start
extent_tree_szad_last
extent_tree_szad_new
extent_tree_szad_next
extent_tree_szad_nsearch
extent_tree_szad_prev
extent_tree_szad_psearch
extent_tree_szad_remove
extent_tree_szad_reverse_iter
extent_tree_szad_reverse_iter_recurse
extent_tree_szad_reverse_iter_start
extent_tree_szad_search
extent_zeroed_get
extent_zeroed_set
ffs_llu

View File

@ -3411,10 +3411,6 @@ arena_new(tsdn_t *tsdn, unsigned ind)
arena->nactive = 0;
arena->ndirty = 0;
for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t);
i++)
arena_run_heap_new(&arena->runs_avail[i]);
qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link);
@ -3426,8 +3422,11 @@ arena_new(tsdn_t *tsdn, unsigned ind)
WITNESS_RANK_ARENA_HUGE))
return (NULL);
extent_tree_szad_new(&arena->chunks_szad_cached);
extent_tree_szad_new(&arena->chunks_szad_retained);
for (i = 0; i < NPSIZES; i++) {
extent_heap_new(&arena->chunks_cached[i]);
extent_heap_new(&arena->chunks_retained[i]);
}
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
WITNESS_RANK_ARENA_CHUNKS))
return (NULL);
@ -3450,6 +3449,9 @@ arena_new(tsdn_t *tsdn, unsigned ind)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
for (i = 0; i < NPSIZES; i++)
arena_run_heap_new(&arena->runs_avail[i]);
return (arena);
}

View File

@ -5,7 +5,7 @@
/* Data. */
static malloc_mutex_t base_mtx;
static extent_tree_t base_avail_szad;
static extent_heap_t base_avail[NSIZES];
static extent_t *base_extents;
static size_t base_allocated;
static size_t base_resident;
@ -79,9 +79,9 @@ void *
base_alloc(tsdn_t *tsdn, size_t size)
{
void *ret;
size_t csize, usize;
size_t csize;
szind_t i;
extent_t *extent;
extent_t key;
/*
* Round size up to nearest multiple of the cacheline size, so that
@ -89,14 +89,16 @@ base_alloc(tsdn_t *tsdn, size_t size)
*/
csize = CACHELINE_CEILING(size);
usize = s2u(csize);
extent_init(&key, NULL, NULL, usize, false, false, false, false);
extent = NULL;
malloc_mutex_lock(tsdn, &base_mtx);
extent = extent_tree_szad_nsearch(&base_avail_szad, &key);
for (i = size2index(csize); i < NSIZES; i++) {
extent = extent_heap_remove_first(&base_avail[i]);
if (extent != NULL) {
/* Use existing space. */
extent_tree_szad_remove(&base_avail_szad, extent);
} else {
break;
}
}
if (extent == NULL) {
/* Try to allocate more space. */
extent = base_chunk_alloc(tsdn, csize);
}
@ -107,9 +109,16 @@ base_alloc(tsdn_t *tsdn, size_t size)
ret = extent_addr_get(extent);
if (extent_size_get(extent) > csize) {
szind_t index_floor;
extent_addr_set(extent, (void *)((uintptr_t)ret + csize));
extent_size_set(extent, extent_size_get(extent) - csize);
extent_tree_szad_insert(&base_avail_szad, extent);
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
index_floor = size2index(extent_size_get(extent) + 1) - 1;
extent_heap_insert(&base_avail[index_floor], extent);
} else
base_extent_dalloc(tsdn, extent);
if (config_stats) {
@ -143,10 +152,12 @@ base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
bool
base_boot(void)
{
szind_t i;
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true);
extent_tree_szad_new(&base_avail_szad);
for (i = 0; i < NSIZES; i++)
extent_heap_new(&base_avail[i]);
base_extents = NULL;
return (false);

View File

@ -50,11 +50,27 @@ const chunk_hooks_t chunk_hooks_default = {
*/
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, bool cache,
chunk_hooks_t *chunk_hooks, extent_heap_t extent_heaps[NPSIZES], bool cache,
void *chunk, size_t size, bool zeroed, bool committed);
/******************************************************************************/
static void
extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
{
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
pszind_t pind = psz2ind(psz);
extent_heap_insert(&extent_heaps[pind], extent);
}
static void
extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
{
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
pszind_t pind = psz2ind(psz);
extent_heap_remove(&extent_heaps[pind], extent);
}
static chunk_hooks_t
chunk_hooks_get_locked(arena_t *arena)
{
@ -245,14 +261,21 @@ chunk_reregister(tsdn_t *tsdn, const extent_t *extent)
* fits.
*/
static extent_t *
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, size_t size)
chunk_first_best_fit(arena_t *arena, extent_heap_t extent_heaps[NPSIZES],
size_t size)
{
extent_t key;
pszind_t pind, i;
assert(size == CHUNK_CEILING(size));
extent_init(&key, arena, NULL, size, false, false, false, false);
return (extent_tree_szad_nsearch(chunks_szad, &key));
pind = psz2ind(extent_size_quantize_ceil(size));
for (i = pind; i < NPSIZES; i++) {
extent_t *extent = extent_heap_first(&extent_heaps[i]);
if (extent != NULL)
return (extent);
}
return (NULL);
}
static void
@ -272,8 +295,8 @@ chunk_leak(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool cache,
static void *
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, bool cache, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, bool dalloc_extent)
extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit, bool dalloc_extent)
{
void *ret;
extent_t *extent;
@ -309,7 +332,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
} else
extent = NULL;
} else
extent = chunk_first_best_fit(arena, chunks_szad, alloc_size);
extent = chunk_first_best_fit(arena, extent_heaps, alloc_size);
if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
size)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@ -334,9 +357,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
/* Remove extent from the tree. */
/* Remove extent from the heap. */
chunk_deregister(tsdn, extent);
extent_tree_szad_remove(chunks_szad, extent);
extent_heaps_remove(extent_heaps, extent);
arena_chunk_cache_maybe_remove(arena, extent, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
@ -346,7 +369,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_addr_get(extent), extent_size_get(extent));
arena_extent_dalloc(tsdn, arena, extent);
} else {
extent_tree_szad_insert(chunks_szad, extent);
extent_heaps_insert(extent_heaps, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
}
extent = NULL;
@ -358,7 +381,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (dalloc_extent && extent != NULL)
arena_extent_dalloc(tsdn, arena, extent);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
chunk_record(tsdn, arena, chunk_hooks, extent_heaps,
cache, ret, size + trailsize, zeroed, committed);
return (NULL);
}
@ -368,7 +391,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks,
chunks_szad, cache, ret, size + trailsize,
extent_heaps, cache, ret, size + trailsize,
zeroed, committed);
return (NULL);
}
@ -380,14 +403,14 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_addr_get(extent), extent_size_get(extent));
arena_extent_dalloc(tsdn, arena, extent);
} else {
extent_tree_szad_insert(chunks_szad, extent);
extent_heaps_insert(extent_heaps, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
}
extent = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szad, cache, ret,
chunk_record(tsdn, arena, chunk_hooks, extent_heaps, cache, ret,
size, zeroed, committed);
return (NULL);
}
@ -480,9 +503,8 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert((alignment & chunksize_mask) == 0);
commit = true;
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szad_cached, true, new_addr, size, alignment, zero,
&commit, dalloc_extent);
ret = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
true, new_addr, size, alignment, zero, &commit, dalloc_extent);
if (ret == NULL)
return (NULL);
assert(commit);
@ -532,9 +554,8 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szad_retained, false, new_addr, size, alignment,
zero, commit, true);
ret = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
false, new_addr, size, alignment, zero, commit, true);
if (config_stats && ret != NULL)
arena->stats.retained -= size;
@ -583,7 +604,7 @@ chunk_can_coalesce(const extent_t *a, const extent_t *b)
static void
chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_t *a, extent_t *b, extent_tree_t *chunks_szad, bool cache)
extent_t *a, extent_t *b, extent_heap_t extent_heaps[NPSIZES], bool cache)
{
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
@ -613,8 +634,8 @@ chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
} else
b_elm_b = b_elm_a;
extent_tree_szad_remove(chunks_szad, a);
extent_tree_szad_remove(chunks_szad, b);
extent_heaps_remove(extent_heaps, a);
extent_heaps_remove(extent_heaps, b);
arena_chunk_cache_maybe_remove(extent_arena_get(a), a, cache);
arena_chunk_cache_maybe_remove(extent_arena_get(b), b, cache);
@ -622,7 +643,7 @@ chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
extent_tree_szad_insert(chunks_szad, a);
extent_heaps_insert(extent_heaps, a);
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
extent_rtree_release(tsdn, a_elm_a, b_elm_b);
@ -634,7 +655,7 @@ chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
static void
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, bool cache, void *chunk, size_t size,
extent_heap_t extent_heaps[NPSIZES], bool cache, void *chunk, size_t size,
bool zeroed, bool committed)
{
extent_t *extent, *prev, *next;
@ -657,7 +678,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_leak(tsdn, arena, chunk_hooks, cache, chunk, size);
goto label_return;
}
extent_tree_szad_insert(chunks_szad, extent);
extent_heaps_insert(extent_heaps, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
/* Try to coalesce forward. */
@ -665,7 +686,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
CHUNK_CEILING((uintptr_t)extent_past_get(extent)), false);
if (next != NULL) {
chunk_try_coalesce(tsdn, arena, chunk_hooks, extent, next,
chunks_szad, cache);
extent_heaps, cache);
}
/* Try to coalesce backward. */
@ -673,7 +694,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
(uintptr_t)extent_addr_get(extent) - chunksize, false);
if (prev != NULL) {
chunk_try_coalesce(tsdn, arena, chunk_hooks, prev, extent,
chunks_szad, cache);
extent_heaps, cache);
}
label_return:
@ -690,7 +711,7 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached, true,
chunk_record(tsdn, arena, chunk_hooks, arena->chunks_cached, true,
chunk, size, false, committed);
arena_maybe_purge(tsdn, arena);
}
@ -726,8 +747,8 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
false, chunk, size, zeroed, committed);
chunk_record(tsdn, arena, chunk_hooks, arena->chunks_retained, false,
chunk, size, zeroed, committed);
if (config_stats)
arena->stats.retained += size;

View File

@ -3,39 +3,86 @@
/******************************************************************************/
JEMALLOC_INLINE_C size_t
extent_quantize(size_t size)
#ifdef JEMALLOC_JET
#undef extent_size_quantize_floor
#define extent_size_quantize_floor JEMALLOC_N(n_extent_size_quantize_floor)
#endif
size_t
extent_size_quantize_floor(size_t size)
{
size_t ret;
pszind_t pind;
assert(size > 0);
assert(size <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
assert(size != 0);
assert(size == PAGE_CEILING(size));
pind = psz2ind(size - large_pad + 1);
if (pind == 0) {
/*
* Round down to the nearest chunk size that can actually be requested
* during normal huge allocation.
* Avoid underflow. This short-circuit would also do the right
* thing for all sizes in the range for which there are
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return (index2size(size2index(size + 1) - 1));
return (size);
}
ret = pind2sz(pind - 1) + large_pad;
assert(ret <= size);
return (ret);
}
#ifdef JEMALLOC_JET
#undef extent_size_quantize_floor
#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor)
extent_size_quantize_t *extent_size_quantize_floor =
JEMALLOC_N(n_extent_size_quantize_floor);
#endif
#ifdef JEMALLOC_JET
#undef extent_size_quantize_ceil
#define extent_size_quantize_ceil JEMALLOC_N(n_extent_size_quantize_ceil)
#endif
size_t
extent_size_quantize_ceil(size_t size)
{
size_t ret;
assert(size > 0);
assert(size <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size);
if (ret < size) {
/*
* Skip a quantization that may have an adequately large extent,
* because under-sized extents may be mixed in. This only
* happens when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
}
return (ret);
}
#ifdef JEMALLOC_JET
#undef extent_size_quantize_ceil
#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil)
extent_size_quantize_t *extent_size_quantize_ceil =
JEMALLOC_N(n_extent_size_quantize_ceil);
#endif
JEMALLOC_INLINE_C int
extent_szad_comp(const extent_t *a, const extent_t *b)
extent_ad_comp(const extent_t *a, const extent_t *b)
{
int ret;
size_t a_qsize = extent_quantize(extent_size_get(a));
size_t b_qsize = extent_quantize(extent_size_get(b));
/*
* Compare based on quantized size rather than size, in order to sort
* equally useful extents only by address.
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
ret = (a_addr > b_addr) - (a_addr < b_addr);
return ((a_addr > b_addr) - (a_addr < b_addr));
}
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_t, szad_link,
extent_szad_comp)
/* Generate pairing heap functions. */
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_ad_comp)

155
test/unit/extent_quantize.c Normal file
View File

@ -0,0 +1,155 @@
#include "test/jemalloc_test.h"
TEST_BEGIN(test_small_extent_size)
{
unsigned nbins, i;
size_t sz, extent_size;
size_t mib[4];
size_t miblen = sizeof(mib) / sizeof(size_t);
/*
* Iterate over all small size classes, get their extent sizes, and
* verify that the quantized size is the same as the extent size.
*/
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
for (i = 0; i < nbins; i++) {
mib[2] = i;
sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, &extent_size, &sz, NULL,
0), 0, "Unexpected mallctlbymib failure");
assert_zu_eq(extent_size,
extent_size_quantize_floor(extent_size),
"Small extent quantization should be a no-op "
"(extent_size=%zu)", extent_size);
assert_zu_eq(extent_size,
extent_size_quantize_ceil(extent_size),
"Small extent quantization should be a no-op "
"(extent_size=%zu)", extent_size);
}
}
TEST_END
TEST_BEGIN(test_large_extent_size)
{
bool cache_oblivious;
unsigned nlruns, i;
size_t sz, extent_size_prev, ceil_prev;
size_t mib[4];
size_t miblen = sizeof(mib) / sizeof(size_t);
/*
* Iterate over all large size classes, get their extent sizes, and
* verify that the quantized size is the same as the extent size.
*/
sz = sizeof(bool);
assert_d_eq(mallctl("config.cache_oblivious", &cache_oblivious, &sz,
NULL, 0), 0, "Unexpected mallctl failure");
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
for (i = 0; i < nlruns; i++) {
size_t lextent_size, extent_size, floor, ceil;
mib[2] = i;
sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, &lextent_size, &sz, NULL,
0), 0, "Unexpected mallctlbymib failure");
extent_size = cache_oblivious ? lextent_size + PAGE :
lextent_size;
floor = extent_size_quantize_floor(extent_size);
ceil = extent_size_quantize_ceil(extent_size);
assert_zu_eq(extent_size, floor,
"Large run quantization should be a no-op for precise "
"size (lextent_size=%zu, extent_size=%zu)", lextent_size,
extent_size);
assert_zu_eq(extent_size, ceil,
"Large run quantization should be a no-op for precise "
"size (lextent_size=%zu, extent_size=%zu)", lextent_size,
extent_size);
if (i > 0) {
assert_zu_eq(extent_size_prev,
extent_size_quantize_floor(extent_size - PAGE),
"Floor should be a precise size");
if (extent_size_prev < ceil_prev) {
assert_zu_eq(ceil_prev, extent_size,
"Ceiling should be a precise size "
"(extent_size_prev=%zu, ceil_prev=%zu, "
"extent_size=%zu)", extent_size_prev,
ceil_prev, extent_size);
}
}
extent_size_prev = floor;
ceil_prev = extent_size_quantize_ceil(extent_size + PAGE);
}
}
TEST_END
TEST_BEGIN(test_monotonic)
{
unsigned nbins, nlruns, i;
size_t sz, floor_prev, ceil_prev;
/*
* Iterate over all extent sizes and verify that
* extent_size_quantize_{floor,ceil}() are monotonic.
*/
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
floor_prev = 0;
ceil_prev = 0;
for (i = 1; i <= large_maxclass >> LG_PAGE; i++) {
size_t extent_size, floor, ceil;
extent_size = i << LG_PAGE;
floor = extent_size_quantize_floor(extent_size);
ceil = extent_size_quantize_ceil(extent_size);
assert_zu_le(floor, extent_size,
"Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)",
floor, extent_size, ceil);
assert_zu_ge(ceil, extent_size,
"Ceiling should be >= (floor=%zu, extent_size=%zu, "
"ceil=%zu)", floor, extent_size, ceil);
assert_zu_le(floor_prev, floor, "Floor should be monotonic "
"(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)",
floor_prev, floor, extent_size, ceil);
assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
"(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)",
floor, extent_size, ceil_prev, ceil);
floor_prev = floor;
ceil_prev = ceil;
}
}
TEST_END
int
main(void)
{
return (test(
test_small_extent_size,
test_large_extent_size,
test_monotonic));
}