From 1210af9a4e26994c6f340085554f3519994ae682 Mon Sep 17 00:00:00 2001 From: "David T. Goldblatt" Date: Fri, 20 Sep 2019 23:51:13 -0700 Subject: [PATCH] Extent -> Eset: Move insertion and removal. --- include/jemalloc/internal/eset.h | 3 ++ src/eset.c | 78 ++++++++++++++++++++++++++++ src/extent.c | 87 ++------------------------------ 3 files changed, 85 insertions(+), 83 deletions(-) diff --git a/include/jemalloc/internal/eset.h b/include/jemalloc/internal/eset.h index 1e055397..400316ed 100644 --- a/include/jemalloc/internal/eset.h +++ b/include/jemalloc/internal/eset.h @@ -70,4 +70,7 @@ size_t eset_nextents_get(eset_t *eset, pszind_t ind); /* Get the sum total bytes of the extents in the given page size index. */ size_t eset_nbytes_get(eset_t *eset, pszind_t ind); +void eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent); +void eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent); + #endif /* JEMALLOC_INTERNAL_ESET_H */ diff --git a/src/eset.c b/src/eset.c index d9457ee9..21dcccad 100644 --- a/src/eset.c +++ b/src/eset.c @@ -43,3 +43,81 @@ size_t eset_nbytes_get(eset_t *eset, pszind_t pind) { return atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED); } + +static void +eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) { + size_t cur = atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED); + atomic_store_zu(&eset->nextents[pind], cur + 1, ATOMIC_RELAXED); + cur = atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED); + atomic_store_zu(&eset->nbytes[pind], cur + sz, ATOMIC_RELAXED); +} + +static void +eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) { + size_t cur = atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED); + atomic_store_zu(&eset->nextents[pind], cur - 1, ATOMIC_RELAXED); + cur = atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED); + atomic_store_zu(&eset->nbytes[pind], cur - sz, ATOMIC_RELAXED); +} + +void +eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) { + malloc_mutex_assert_owner(tsdn, &eset->mtx); + assert(extent_state_get(extent) == eset->state); + + size_t size = extent_size_get(extent); + size_t psz = sz_psz_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + if (extent_heap_empty(&eset->heaps[pind])) { + bitmap_unset(eset->bitmap, &eset_bitmap_info, + (size_t)pind); + } + extent_heap_insert(&eset->heaps[pind], extent); + + if (config_stats) { + eset_stats_add(eset, pind, size); + } + + extent_list_append(&eset->lru, extent); + size_t npages = size >> LG_PAGE; + /* + * All modifications to npages hold the mutex (as asserted above), so we + * don't need an atomic fetch-add; we can get by with a load followed by + * a store. + */ + size_t cur_eset_npages = + atomic_load_zu(&eset->npages, ATOMIC_RELAXED); + atomic_store_zu(&eset->npages, cur_eset_npages + npages, + ATOMIC_RELAXED); +} + +void +eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) { + malloc_mutex_assert_owner(tsdn, &eset->mtx); + assert(extent_state_get(extent) == eset->state); + + size_t size = extent_size_get(extent); + size_t psz = sz_psz_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + extent_heap_remove(&eset->heaps[pind], extent); + + if (config_stats) { + eset_stats_sub(eset, pind, size); + } + + if (extent_heap_empty(&eset->heaps[pind])) { + bitmap_set(eset->bitmap, &eset_bitmap_info, + (size_t)pind); + } + extent_list_remove(&eset->lru, extent); + size_t npages = size >> LG_PAGE; + /* + * As in eset_insert_locked, we hold eset->mtx and so don't need atomic + * operations for updating eset->npages. + */ + size_t cur_extents_npages = + atomic_load_zu(&eset->npages, ATOMIC_RELAXED); + assert(cur_extents_npages >= npages); + atomic_store_zu(&eset->npages, + cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); +} diff --git a/src/extent.c b/src/extent.c index 81ce308a..069899c2 100644 --- a/src/extent.c +++ b/src/extent.c @@ -249,87 +249,8 @@ extent_hooks_assure_initialized(arena_t *arena, } } -/* Generate pairing heap functions. */ ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) -static void -extents_stats_add(eset_t *eset, pszind_t pind, size_t sz) { - size_t cur = atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED); - atomic_store_zu(&eset->nextents[pind], cur + 1, ATOMIC_RELAXED); - cur = atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED); - atomic_store_zu(&eset->nbytes[pind], cur + sz, ATOMIC_RELAXED); -} - -static void -extents_stats_sub(eset_t *eset, pszind_t pind, size_t sz) { - size_t cur = atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED); - atomic_store_zu(&eset->nextents[pind], cur - 1, ATOMIC_RELAXED); - cur = atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED); - atomic_store_zu(&eset->nbytes[pind], cur - sz, ATOMIC_RELAXED); -} - -static void -extents_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) { - malloc_mutex_assert_owner(tsdn, &eset->mtx); - assert(extent_state_get(extent) == eset->state); - - size_t size = extent_size_get(extent); - size_t psz = sz_psz_quantize_floor(size); - pszind_t pind = sz_psz2ind(psz); - if (extent_heap_empty(&eset->heaps[pind])) { - bitmap_unset(eset->bitmap, &eset_bitmap_info, - (size_t)pind); - } - extent_heap_insert(&eset->heaps[pind], extent); - - if (config_stats) { - extents_stats_add(eset, pind, size); - } - - extent_list_append(&eset->lru, extent); - size_t npages = size >> LG_PAGE; - /* - * All modifications to npages hold the mutex (as asserted above), so we - * don't need an atomic fetch-add; we can get by with a load followed by - * a store. - */ - size_t cur_eset_npages = - atomic_load_zu(&eset->npages, ATOMIC_RELAXED); - atomic_store_zu(&eset->npages, cur_eset_npages + npages, - ATOMIC_RELAXED); -} - -static void -extents_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) { - malloc_mutex_assert_owner(tsdn, &eset->mtx); - assert(extent_state_get(extent) == eset->state); - - size_t size = extent_size_get(extent); - size_t psz = sz_psz_quantize_floor(size); - pszind_t pind = sz_psz2ind(psz); - extent_heap_remove(&eset->heaps[pind], extent); - - if (config_stats) { - extents_stats_sub(eset, pind, size); - } - - if (extent_heap_empty(&eset->heaps[pind])) { - bitmap_set(eset->bitmap, &eset_bitmap_info, - (size_t)pind); - } - extent_list_remove(&eset->lru, extent); - size_t npages = size >> LG_PAGE; - /* - * As in extents_insert_locked, we hold eset->mtx and so don't need - * atomic operations for updating eset->npages. - */ - size_t cur_extents_npages = - atomic_load_zu(&eset->npages, ATOMIC_RELAXED); - assert(cur_extents_npages >= npages); - atomic_store_zu(&eset->npages, - cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); -} - /* * Find an extent with size [min_size, max_size) to satisfy the alignment * requirement. For each size, try only the first extent in the heap. @@ -461,7 +382,7 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, if (!coalesced) { return true; } - extents_insert_locked(tsdn, eset, extent); + eset_insert_locked(tsdn, eset, extent); return false; } @@ -521,7 +442,7 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent = NULL; goto label_return; } - extents_remove_locked(tsdn, eset, extent); + eset_remove_locked(tsdn, eset, extent); if (!eset->delay_coalesce) { break; } @@ -607,7 +528,7 @@ extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset, assert(extent_state_get(extent) == extent_state_active); extent_state_set(extent, eset_state_get(eset)); - extents_insert_locked(tsdn, eset, extent); + eset_insert_locked(tsdn, eset, extent); } static void @@ -624,7 +545,7 @@ extent_activate_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset, assert(extent_arena_ind_get(extent) == arena_ind_get(arena)); assert(extent_state_get(extent) == eset_state_get(eset)); - extents_remove_locked(tsdn, eset, extent); + eset_remove_locked(tsdn, eset, extent); extent_state_set(extent, extent_state_active); }