2019-09-21 11:17:23 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
|
|
|
|
|
|
|
#include "jemalloc/internal/eset.h"
|
2019-09-21 11:37:15 +08:00
|
|
|
|
|
|
|
const bitmap_info_t eset_bitmap_info =
|
|
|
|
BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
|
|
|
|
|
2019-12-13 08:25:24 +08:00
|
|
|
void
|
2019-12-13 08:33:19 +08:00
|
|
|
eset_init(eset_t *eset, extent_state_t state) {
|
2019-09-21 11:37:15 +08:00
|
|
|
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_heap_new(&eset->heaps[i]);
|
2019-09-21 11:37:15 +08:00
|
|
|
}
|
|
|
|
bitmap_init(eset->bitmap, &eset_bitmap_info, true);
|
2020-06-12 06:15:51 +08:00
|
|
|
edata_list_inactive_init(&eset->lru);
|
2019-09-21 11:37:15 +08:00
|
|
|
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
|
|
|
|
eset->state = state;
|
|
|
|
}
|
2019-09-21 11:45:16 +08:00
|
|
|
|
2019-09-21 11:52:13 +08:00
|
|
|
size_t
|
|
|
|
eset_npages_get(eset_t *eset) {
|
|
|
|
return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
eset_nextents_get(eset_t *eset, pszind_t pind) {
|
|
|
|
return atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
eset_nbytes_get(eset_t *eset, pszind_t pind) {
|
|
|
|
return atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED);
|
|
|
|
}
|
2019-09-21 14:51:13 +08:00
|
|
|
|
|
|
|
static void
|
|
|
|
eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) {
|
|
|
|
size_t cur = atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&eset->nextents[pind], cur + 1, ATOMIC_RELAXED);
|
|
|
|
cur = atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&eset->nbytes[pind], cur + sz, ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
|
|
|
|
size_t cur = atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&eset->nextents[pind], cur - 1, ATOMIC_RELAXED);
|
|
|
|
cur = atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&eset->nbytes[pind], cur - sz, ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-12-13 08:25:24 +08:00
|
|
|
eset_insert(eset_t *eset, edata_t *edata) {
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(edata_state_get(edata) == eset->state);
|
2019-09-21 14:51:13 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
size_t size = edata_size_get(edata);
|
2019-09-21 14:51:13 +08:00
|
|
|
size_t psz = sz_psz_quantize_floor(size);
|
|
|
|
pszind_t pind = sz_psz2ind(psz);
|
2019-12-10 06:36:45 +08:00
|
|
|
if (edata_heap_empty(&eset->heaps[pind])) {
|
2019-09-21 14:51:13 +08:00
|
|
|
bitmap_unset(eset->bitmap, &eset_bitmap_info,
|
|
|
|
(size_t)pind);
|
|
|
|
}
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_heap_insert(&eset->heaps[pind], edata);
|
2019-09-21 14:51:13 +08:00
|
|
|
|
|
|
|
if (config_stats) {
|
|
|
|
eset_stats_add(eset, pind, size);
|
|
|
|
}
|
|
|
|
|
2020-06-12 06:15:51 +08:00
|
|
|
edata_list_inactive_append(&eset->lru, edata);
|
2019-09-21 14:51:13 +08:00
|
|
|
size_t npages = size >> LG_PAGE;
|
|
|
|
/*
|
|
|
|
* All modifications to npages hold the mutex (as asserted above), so we
|
|
|
|
* don't need an atomic fetch-add; we can get by with a load followed by
|
|
|
|
* a store.
|
|
|
|
*/
|
|
|
|
size_t cur_eset_npages =
|
|
|
|
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&eset->npages, cur_eset_npages + npages,
|
|
|
|
ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-12-13 08:25:24 +08:00
|
|
|
eset_remove(eset_t *eset, edata_t *edata) {
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(edata_state_get(edata) == eset->state);
|
2019-09-21 14:51:13 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
size_t size = edata_size_get(edata);
|
2019-09-21 14:51:13 +08:00
|
|
|
size_t psz = sz_psz_quantize_floor(size);
|
|
|
|
pszind_t pind = sz_psz2ind(psz);
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_heap_remove(&eset->heaps[pind], edata);
|
2019-09-21 14:51:13 +08:00
|
|
|
|
|
|
|
if (config_stats) {
|
|
|
|
eset_stats_sub(eset, pind, size);
|
|
|
|
}
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
if (edata_heap_empty(&eset->heaps[pind])) {
|
2019-09-21 14:51:13 +08:00
|
|
|
bitmap_set(eset->bitmap, &eset_bitmap_info,
|
|
|
|
(size_t)pind);
|
|
|
|
}
|
2020-06-12 06:15:51 +08:00
|
|
|
edata_list_inactive_remove(&eset->lru, edata);
|
2019-09-21 14:51:13 +08:00
|
|
|
size_t npages = size >> LG_PAGE;
|
|
|
|
/*
|
2019-12-13 08:25:24 +08:00
|
|
|
* As in eset_insert, we hold eset->mtx and so don't need atomic
|
2019-09-21 14:51:13 +08:00
|
|
|
* operations for updating eset->npages.
|
|
|
|
*/
|
2019-12-13 08:25:24 +08:00
|
|
|
/*
|
|
|
|
* This class is not thread-safe in general; we rely on external
|
|
|
|
* synchronization for all mutating operations.
|
|
|
|
*/
|
2019-09-21 14:51:13 +08:00
|
|
|
size_t cur_extents_npages =
|
|
|
|
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
|
|
|
|
assert(cur_extents_npages >= npages);
|
|
|
|
atomic_store_zu(&eset->npages,
|
|
|
|
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
|
|
|
|
}
|
2019-09-22 00:36:22 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Find an extent with size [min_size, max_size) to satisfy the alignment
|
|
|
|
* requirement. For each size, try only the first extent in the heap.
|
|
|
|
*/
|
2019-12-10 06:36:45 +08:00
|
|
|
static edata_t *
|
2019-09-22 00:36:22 +08:00
|
|
|
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
|
|
|
|
size_t alignment) {
|
|
|
|
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
|
|
|
|
pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
|
|
|
|
|
|
|
|
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
|
|
|
|
&eset_bitmap_info, (size_t)pind); i < pind_max; i =
|
|
|
|
(pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
|
|
|
|
(size_t)i+1)) {
|
|
|
|
assert(i < SC_NPSIZES);
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(!edata_heap_empty(&eset->heaps[i]));
|
|
|
|
edata_t *edata = edata_heap_first(&eset->heaps[i]);
|
|
|
|
uintptr_t base = (uintptr_t)edata_base_get(edata);
|
|
|
|
size_t candidate_size = edata_size_get(edata);
|
2019-09-22 00:36:22 +08:00
|
|
|
assert(candidate_size >= min_size);
|
|
|
|
|
|
|
|
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
|
|
|
|
PAGE_CEILING(alignment));
|
|
|
|
if (base > next_align || base + candidate_size <= next_align) {
|
|
|
|
/* Overflow or not crossing the next alignment. */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t leadsize = next_align - base;
|
|
|
|
if (candidate_size - leadsize >= min_size) {
|
2019-12-10 06:36:45 +08:00
|
|
|
return edata;
|
2019-09-22 00:36:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
|
|
|
|
* large enough.
|
2020-03-15 00:46:09 +08:00
|
|
|
*
|
|
|
|
* lg_max_fit is the (log of the) maximum ratio between the requested size and
|
|
|
|
* the returned size that we'll allow. This can reduce fragmentation by
|
|
|
|
* avoiding reusing and splitting large extents for smaller sizes. In practice,
|
|
|
|
* it's set to opt_lg_extent_max_active_fit for the dirty eset and SC_PTR_BITS
|
|
|
|
* for others.
|
2019-09-22 00:36:22 +08:00
|
|
|
*/
|
2019-12-10 06:36:45 +08:00
|
|
|
static edata_t *
|
2020-03-15 01:05:12 +08:00
|
|
|
eset_first_fit(eset_t *eset, size_t size, bool exact_only,
|
|
|
|
unsigned lg_max_fit) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *ret = NULL;
|
2019-09-22 00:36:22 +08:00
|
|
|
|
|
|
|
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
|
|
|
|
|
2020-03-15 01:05:12 +08:00
|
|
|
if (exact_only) {
|
2019-12-10 06:36:45 +08:00
|
|
|
return edata_heap_empty(&eset->heaps[pind]) ? NULL :
|
|
|
|
edata_heap_first(&eset->heaps[pind]);
|
2019-09-22 00:36:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
|
|
|
|
&eset_bitmap_info, (size_t)pind);
|
|
|
|
i < SC_NPSIZES + 1;
|
|
|
|
i = (pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
|
|
|
|
(size_t)i+1)) {
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(!edata_heap_empty(&eset->heaps[i]));
|
|
|
|
edata_t *edata = edata_heap_first(&eset->heaps[i]);
|
|
|
|
assert(edata_size_get(edata) >= size);
|
2020-03-15 00:46:09 +08:00
|
|
|
if (lg_max_fit == SC_PTR_BITS) {
|
|
|
|
/*
|
|
|
|
* We'll shift by this below, and shifting out all the
|
|
|
|
* bits is undefined. Decreasing is safe, since the
|
|
|
|
* page size is larger than 1 byte.
|
|
|
|
*/
|
|
|
|
lg_max_fit = SC_PTR_BITS - 1;
|
|
|
|
}
|
|
|
|
if ((sz_pind2sz(i) >> lg_max_fit) > size) {
|
2019-09-22 00:36:22 +08:00
|
|
|
break;
|
|
|
|
}
|
2019-12-10 06:36:45 +08:00
|
|
|
if (ret == NULL || edata_snad_comp(edata, ret) < 0) {
|
|
|
|
ret = edata;
|
2019-09-22 00:36:22 +08:00
|
|
|
}
|
|
|
|
if (i == SC_NPSIZES) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert(i < SC_NPSIZES);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *
|
2020-03-15 01:05:12 +08:00
|
|
|
eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
|
|
|
|
unsigned lg_max_fit) {
|
2019-09-22 00:36:22 +08:00
|
|
|
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
|
|
|
|
/* Beware size_t wrap-around. */
|
|
|
|
if (max_size < esize) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-03-15 01:05:12 +08:00
|
|
|
edata_t *edata = eset_first_fit(eset, max_size, exact_only, lg_max_fit);
|
2019-09-22 00:36:22 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
if (alignment > PAGE && edata == NULL) {
|
2019-09-22 00:36:22 +08:00
|
|
|
/*
|
|
|
|
* max_size guarantees the alignment requirement but is rather
|
|
|
|
* pessimistic. Next we try to satisfy the aligned allocation
|
|
|
|
* with sizes in [esize, max_size).
|
|
|
|
*/
|
2019-12-10 06:36:45 +08:00
|
|
|
edata = eset_fit_alignment(eset, esize, max_size, alignment);
|
2019-09-22 00:36:22 +08:00
|
|
|
}
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
return edata;
|
2019-09-22 00:36:22 +08:00
|
|
|
}
|