2019-09-21 11:17:23 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
|
|
|
|
|
|
|
#include "jemalloc/internal/eset.h"
|
2019-09-21 11:37:15 +08:00
|
|
|
|
|
|
|
const bitmap_info_t eset_bitmap_info =
|
|
|
|
BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
|
|
|
|
|
|
|
|
bool
|
|
|
|
eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
|
|
|
|
bool delay_coalesce) {
|
|
|
|
if (malloc_mutex_init(&eset->mtx, "extents", WITNESS_RANK_EXTENTS,
|
|
|
|
malloc_mutex_rank_exclusive)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
|
|
|
|
extent_heap_new(&eset->heaps[i]);
|
|
|
|
}
|
|
|
|
bitmap_init(eset->bitmap, &eset_bitmap_info, true);
|
|
|
|
extent_list_init(&eset->lru);
|
|
|
|
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
|
|
|
|
eset->state = state;
|
|
|
|
eset->delay_coalesce = delay_coalesce;
|
|
|
|
return false;
|
|
|
|
}
|
2019-09-21 11:45:16 +08:00
|
|
|
|
|
|
|
extent_state_t
|
|
|
|
eset_state_get(const eset_t *eset) {
|
|
|
|
return eset->state;
|
|
|
|
}
|
2019-09-21 11:52:13 +08:00
|
|
|
|
|
|
|
size_t
|
|
|
|
eset_npages_get(eset_t *eset) {
|
|
|
|
return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
eset_nextents_get(eset_t *eset, pszind_t pind) {
|
|
|
|
return atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
eset_nbytes_get(eset_t *eset, pszind_t pind) {
|
|
|
|
return atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED);
|
|
|
|
}
|
2019-09-21 14:51:13 +08:00
|
|
|
|
|
|
|
static void
|
|
|
|
eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) {
|
|
|
|
size_t cur = atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&eset->nextents[pind], cur + 1, ATOMIC_RELAXED);
|
|
|
|
cur = atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&eset->nbytes[pind], cur + sz, ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
|
|
|
|
size_t cur = atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&eset->nextents[pind], cur - 1, ATOMIC_RELAXED);
|
|
|
|
cur = atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&eset->nbytes[pind], cur - sz, ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &eset->mtx);
|
|
|
|
assert(extent_state_get(extent) == eset->state);
|
|
|
|
|
|
|
|
size_t size = extent_size_get(extent);
|
|
|
|
size_t psz = sz_psz_quantize_floor(size);
|
|
|
|
pszind_t pind = sz_psz2ind(psz);
|
|
|
|
if (extent_heap_empty(&eset->heaps[pind])) {
|
|
|
|
bitmap_unset(eset->bitmap, &eset_bitmap_info,
|
|
|
|
(size_t)pind);
|
|
|
|
}
|
|
|
|
extent_heap_insert(&eset->heaps[pind], extent);
|
|
|
|
|
|
|
|
if (config_stats) {
|
|
|
|
eset_stats_add(eset, pind, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
extent_list_append(&eset->lru, extent);
|
|
|
|
size_t npages = size >> LG_PAGE;
|
|
|
|
/*
|
|
|
|
* All modifications to npages hold the mutex (as asserted above), so we
|
|
|
|
* don't need an atomic fetch-add; we can get by with a load followed by
|
|
|
|
* a store.
|
|
|
|
*/
|
|
|
|
size_t cur_eset_npages =
|
|
|
|
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&eset->npages, cur_eset_npages + npages,
|
|
|
|
ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &eset->mtx);
|
|
|
|
assert(extent_state_get(extent) == eset->state);
|
|
|
|
|
|
|
|
size_t size = extent_size_get(extent);
|
|
|
|
size_t psz = sz_psz_quantize_floor(size);
|
|
|
|
pszind_t pind = sz_psz2ind(psz);
|
|
|
|
extent_heap_remove(&eset->heaps[pind], extent);
|
|
|
|
|
|
|
|
if (config_stats) {
|
|
|
|
eset_stats_sub(eset, pind, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (extent_heap_empty(&eset->heaps[pind])) {
|
|
|
|
bitmap_set(eset->bitmap, &eset_bitmap_info,
|
|
|
|
(size_t)pind);
|
|
|
|
}
|
|
|
|
extent_list_remove(&eset->lru, extent);
|
|
|
|
size_t npages = size >> LG_PAGE;
|
|
|
|
/*
|
|
|
|
* As in eset_insert_locked, we hold eset->mtx and so don't need atomic
|
|
|
|
* operations for updating eset->npages.
|
|
|
|
*/
|
|
|
|
size_t cur_extents_npages =
|
|
|
|
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
|
|
|
|
assert(cur_extents_npages >= npages);
|
|
|
|
atomic_store_zu(&eset->npages,
|
|
|
|
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
|
|
|
|
}
|