Move page quantization to sz module.

This commit is contained in:
David T. Goldblatt 2019-09-20 23:54:57 -07:00 committed by David Goldblatt
parent 63d1b7a7a7
commit 820f070c6b
4 changed files with 64 additions and 66 deletions

View File

@ -315,4 +315,7 @@ sz_sa2u(size_t size, size_t alignment) {
return usize; return usize;
} }
size_t sz_psz_quantize_floor(size_t size);
size_t sz_psz_quantize_ceil(size_t size);
#endif /* JEMALLOC_INTERNAL_SIZE_H */ #endif /* JEMALLOC_INTERNAL_SIZE_H */

View File

@ -249,59 +249,6 @@ extent_hooks_assure_initialized(arena_t *arena,
} }
} }
#ifndef JEMALLOC_JET
static
#endif
size_t
extent_size_quantize_floor(size_t size) {
size_t ret;
pszind_t pind;
assert(size > 0);
assert((size & PAGE_MASK) == 0);
pind = sz_psz2ind(size - sz_large_pad + 1);
if (pind == 0) {
/*
* Avoid underflow. This short-circuit would also do the right
* thing for all sizes in the range for which there are
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return size;
}
ret = sz_pind2sz(pind - 1) + sz_large_pad;
assert(ret <= size);
return ret;
}
#ifndef JEMALLOC_JET
static
#endif
size_t
extent_size_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size);
if (ret < size) {
/*
* Skip a quantization that may have an adequately large extent,
* because under-sized extents may be mixed in. This only
* happens when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
sz_large_pad;
}
return ret;
}
/* Generate pairing heap functions. */ /* Generate pairing heap functions. */
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
@ -342,7 +289,7 @@ extents_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
assert(extent_state_get(extent) == eset->state); assert(extent_state_get(extent) == eset->state);
size_t size = extent_size_get(extent); size_t size = extent_size_get(extent);
size_t psz = extent_size_quantize_floor(size); size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz); pszind_t pind = sz_psz2ind(psz);
if (extent_heap_empty(&eset->heaps[pind])) { if (extent_heap_empty(&eset->heaps[pind])) {
bitmap_unset(eset->bitmap, &eset_bitmap_info, bitmap_unset(eset->bitmap, &eset_bitmap_info,
@ -373,7 +320,7 @@ extents_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
assert(extent_state_get(extent) == eset->state); assert(extent_state_get(extent) == eset->state);
size_t size = extent_size_get(extent); size_t size = extent_size_get(extent);
size_t psz = extent_size_quantize_floor(size); size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz); pszind_t pind = sz_psz2ind(psz);
extent_heap_remove(&eset->heaps[pind], extent); extent_heap_remove(&eset->heaps[pind], extent);
@ -405,8 +352,8 @@ extents_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
static extent_t * static extent_t *
extents_fit_alignment(eset_t *eset, size_t min_size, size_t max_size, extents_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t alignment) { size_t alignment) {
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size)); pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size)); pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap, for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
&eset_bitmap_info, (size_t)pind); i < pind_max; i = &eset_bitmap_info, (size_t)pind); i < pind_max; i =
@ -444,7 +391,7 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset,
size_t size) { size_t size) {
extent_t *ret = NULL; extent_t *ret = NULL;
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
if (!maps_coalesce && !opt_retain) { if (!maps_coalesce && !opt_retain) {
/* /*

View File

@ -4,6 +4,54 @@
JEMALLOC_ALIGNED(CACHELINE) JEMALLOC_ALIGNED(CACHELINE)
size_t sz_pind2sz_tab[SC_NPSIZES+1]; size_t sz_pind2sz_tab[SC_NPSIZES+1];
size_t
sz_psz_quantize_floor(size_t size) {
size_t ret;
pszind_t pind;
assert(size > 0);
assert((size & PAGE_MASK) == 0);
pind = sz_psz2ind(size - sz_large_pad + 1);
if (pind == 0) {
/*
* Avoid underflow. This short-circuit would also do the right
* thing for all sizes in the range for which there are
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return size;
}
ret = sz_pind2sz(pind - 1) + sz_large_pad;
assert(ret <= size);
return ret;
}
size_t
sz_psz_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = sz_psz_quantize_floor(size);
if (ret < size) {
/*
* Skip a quantization that may have an adequately large extent,
* because under-sized extents may be mixed in. This only
* happens when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
sz_large_pad;
}
return ret;
}
static void static void
sz_boot_pind2sz_tab(const sc_data_t *sc_data) { sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
int pind = 0; int pind = 0;

View File

@ -23,11 +23,11 @@ TEST_BEGIN(test_small_extent_size) {
assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz, assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz,
NULL, 0), 0, "Unexpected mallctlbymib failure"); NULL, 0), 0, "Unexpected mallctlbymib failure");
assert_zu_eq(extent_size, assert_zu_eq(extent_size,
extent_size_quantize_floor(extent_size), sz_psz_quantize_floor(extent_size),
"Small extent quantization should be a no-op " "Small extent quantization should be a no-op "
"(extent_size=%zu)", extent_size); "(extent_size=%zu)", extent_size);
assert_zu_eq(extent_size, assert_zu_eq(extent_size,
extent_size_quantize_ceil(extent_size), sz_psz_quantize_ceil(extent_size),
"Small extent quantization should be a no-op " "Small extent quantization should be a no-op "
"(extent_size=%zu)", extent_size); "(extent_size=%zu)", extent_size);
} }
@ -65,8 +65,8 @@ TEST_BEGIN(test_large_extent_size) {
&sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); &sz, NULL, 0), 0, "Unexpected mallctlbymib failure");
extent_size = cache_oblivious ? lextent_size + PAGE : extent_size = cache_oblivious ? lextent_size + PAGE :
lextent_size; lextent_size;
floor = extent_size_quantize_floor(extent_size); floor = sz_psz_quantize_floor(extent_size);
ceil = extent_size_quantize_ceil(extent_size); ceil = sz_psz_quantize_ceil(extent_size);
assert_zu_eq(extent_size, floor, assert_zu_eq(extent_size, floor,
"Extent quantization should be a no-op for precise size " "Extent quantization should be a no-op for precise size "
@ -79,7 +79,7 @@ TEST_BEGIN(test_large_extent_size) {
if (i > 0) { if (i > 0) {
assert_zu_eq(extent_size_prev, assert_zu_eq(extent_size_prev,
extent_size_quantize_floor(extent_size - PAGE), sz_psz_quantize_floor(extent_size - PAGE),
"Floor should be a precise size"); "Floor should be a precise size");
if (extent_size_prev < ceil_prev) { if (extent_size_prev < ceil_prev) {
assert_zu_eq(ceil_prev, extent_size, assert_zu_eq(ceil_prev, extent_size,
@ -91,7 +91,7 @@ TEST_BEGIN(test_large_extent_size) {
} }
if (i + 1 < nlextents) { if (i + 1 < nlextents) {
extent_size_prev = floor; extent_size_prev = floor;
ceil_prev = extent_size_quantize_ceil(extent_size + ceil_prev = sz_psz_quantize_ceil(extent_size +
PAGE); PAGE);
} }
} }
@ -109,8 +109,8 @@ TEST_BEGIN(test_monotonic) {
size_t extent_size, floor, ceil; size_t extent_size, floor, ceil;
extent_size = i << LG_PAGE; extent_size = i << LG_PAGE;
floor = extent_size_quantize_floor(extent_size); floor = sz_psz_quantize_floor(extent_size);
ceil = extent_size_quantize_ceil(extent_size); ceil = sz_psz_quantize_ceil(extent_size);
assert_zu_le(floor, extent_size, assert_zu_le(floor, extent_size,
"Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)", "Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)",