Extent -> Eset: Move extent fit functions.

This commit is contained in:
David T. Goldblatt 2019-09-21 09:36:22 -07:00 committed by David Goldblatt
parent 1210af9a4e
commit 77bbb35a92
3 changed files with 119 additions and 120 deletions

View File

@ -72,5 +72,11 @@ size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
void eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent);
void eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent);
/*
* Select an extent from this eset of the given size and alignment. Returns
* null if no such item could be found.
*/
extent_t *eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize,
size_t alignment);
#endif /* JEMALLOC_INTERNAL_ESET_H */

View File

@ -2,6 +2,8 @@
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/eset.h"
/* For opt_retain */
#include "jemalloc/internal/extent_mmap.h"
const bitmap_info_t eset_bitmap_info =
BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
@ -121,3 +123,113 @@ eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
atomic_store_zu(&eset->npages,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
}
/*
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static extent_t *
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
&eset_bitmap_info, (size_t)pind); i < pind_max; i =
(pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
(size_t)i+1)) {
assert(i < SC_NPSIZES);
assert(!extent_heap_empty(&eset->heaps[i]));
extent_t *extent = extent_heap_first(&eset->heaps[i]);
uintptr_t base = (uintptr_t)extent_base_get(extent);
size_t candidate_size = extent_size_get(extent);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
PAGE_CEILING(alignment));
if (base > next_align || base + candidate_size <= next_align) {
/* Overflow or not crossing the next alignment. */
continue;
}
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
return extent;
}
}
return NULL;
}
/*
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*/
static extent_t *
eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
extent_t *ret = NULL;
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
if (!maps_coalesce && !opt_retain) {
/*
* No split / merge allowed (Windows w/o retain). Try exact fit
* only.
*/
return extent_heap_empty(&eset->heaps[pind]) ? NULL :
extent_heap_first(&eset->heaps[pind]);
}
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
&eset_bitmap_info, (size_t)pind);
i < SC_NPSIZES + 1;
i = (pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
(size_t)i+1)) {
assert(!extent_heap_empty(&eset->heaps[i]));
extent_t *extent = extent_heap_first(&eset->heaps[i]);
assert(extent_size_get(extent) >= size);
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large eset for much smaller sizes.
*
* Only do check for dirty eset (delay_coalesce).
*/
if (eset->delay_coalesce &&
(sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
break;
}
if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
ret = extent;
}
if (i == SC_NPSIZES) {
break;
}
assert(i < SC_NPSIZES);
}
return ret;
}
extent_t *
eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (max_size < esize) {
return NULL;
}
extent_t *extent = eset_first_fit_locked(tsdn, eset, max_size);
if (alignment > PAGE && extent == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
extent = eset_fit_alignment(eset, esize, max_size, alignment);
}
return extent;
}

View File

@ -251,124 +251,6 @@ extent_hooks_assure_initialized(arena_t *arena,
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
/*
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static extent_t *
extents_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
&eset_bitmap_info, (size_t)pind); i < pind_max; i =
(pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
(size_t)i+1)) {
assert(i < SC_NPSIZES);
assert(!extent_heap_empty(&eset->heaps[i]));
extent_t *extent = extent_heap_first(&eset->heaps[i]);
uintptr_t base = (uintptr_t)extent_base_get(extent);
size_t candidate_size = extent_size_get(extent);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
PAGE_CEILING(alignment));
if (base > next_align || base + candidate_size <= next_align) {
/* Overflow or not crossing the next alignment. */
continue;
}
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
return extent;
}
}
return NULL;
}
/*
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*/
static extent_t *
extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset,
size_t size) {
extent_t *ret = NULL;
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
if (!maps_coalesce && !opt_retain) {
/*
* No split / merge allowed (Windows w/o retain). Try exact fit
* only.
*/
return extent_heap_empty(&eset->heaps[pind]) ? NULL :
extent_heap_first(&eset->heaps[pind]);
}
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
&eset_bitmap_info, (size_t)pind);
i < SC_NPSIZES + 1;
i = (pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
(size_t)i+1)) {
assert(!extent_heap_empty(&eset->heaps[i]));
extent_t *extent = extent_heap_first(&eset->heaps[i]);
assert(extent_size_get(extent) >= size);
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large eset for much smaller sizes.
*
* Only do check for dirty eset (delay_coalesce).
*/
if (eset->delay_coalesce &&
(sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
break;
}
if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
ret = extent;
}
if (i == SC_NPSIZES) {
break;
}
assert(i < SC_NPSIZES);
}
return ret;
}
/*
* Do first-fit extent selection, where the selection policy choice is
* based on eset->delay_coalesce.
*/
static extent_t *
extents_fit_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset,
size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (max_size < esize) {
return NULL;
}
extent_t *extent =
extents_first_fit_locked(tsdn, arena, eset, max_size);
if (alignment > PAGE && extent == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
extent = extents_fit_alignment(eset, esize, max_size,
alignment);
}
return extent;
}
static bool
extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, eset_t *eset,
@ -790,8 +672,7 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
extent_unlock(tsdn, unlock_extent);
}
} else {
extent = extents_fit_locked(tsdn, arena, eset, esize,
alignment);
extent = eset_fit_locked(tsdn, eset, esize, alignment);
}
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &eset->mtx);