diff --git a/include/jemalloc/internal/eset.h b/include/jemalloc/internal/eset.h index 1c18f4ee..55db75e1 100644 --- a/include/jemalloc/internal/eset.h +++ b/include/jemalloc/internal/eset.h @@ -6,6 +6,9 @@ #include "jemalloc/internal/extent.h" #include "jemalloc/internal/mutex.h" +/* This is a transitional declarion, while we move extent.c into eset.c. */ +extern const bitmap_info_t eset_bitmap_info; + /* * An eset ("extent set") is a quantized collection of extents, with built-in * LRU queue. @@ -57,4 +60,7 @@ struct eset_s { bool delay_coalesce; }; +bool eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state, + bool delay_coalesce); + #endif /* JEMALLOC_INTERNAL_ESET_H */ diff --git a/include/jemalloc/internal/extent_externs.h b/include/jemalloc/internal/extent_externs.h index 45271d7c..7a223840 100644 --- a/include/jemalloc/internal/extent_externs.h +++ b/include/jemalloc/internal/extent_externs.h @@ -27,8 +27,6 @@ size_t extent_size_quantize_ceil(size_t size); ph_proto(, extent_avail_, extent_tree_t, extent_t) ph_proto(, extent_heap_, extent_heap_t, extent_t) -bool extents_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state, - bool delay_coalesce); extent_state_t extents_state_get(const eset_t *eset); size_t extents_npages_get(eset_t *eset); /* Get the number of extents in the given page size index. */ diff --git a/src/arena.c b/src/arena.c index 5380deed..1d269dc7 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2022,16 +2022,14 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { * are likely to be reused soon after deallocation, and the cost of * merging/splitting extents is non-trivial. */ - if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, - true)) { + if (eset_init(tsdn, &arena->extents_dirty, extent_state_dirty, true)) { goto label_error; } /* * Coalesce muzzy extents immediately, because operations on them are in * the critical path much less often than for dirty extents. */ - if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, - false)) { + if (eset_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, false)) { goto label_error; } /* @@ -2040,7 +2038,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { * coalescing), but also because operations on retained extents are not * in the critical path. */ - if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, + if (eset_init(tsdn, &arena->extents_retained, extent_state_retained, false)) { goto label_error; } diff --git a/src/eset.c b/src/eset.c index 3b8d1cbc..09148d0c 100644 --- a/src/eset.c +++ b/src/eset.c @@ -2,3 +2,24 @@ #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/eset.h" + +const bitmap_info_t eset_bitmap_info = + BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); + +bool +eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state, + bool delay_coalesce) { + if (malloc_mutex_init(&eset->mtx, "extents", WITNESS_RANK_EXTENTS, + malloc_mutex_rank_exclusive)) { + return true; + } + for (unsigned i = 0; i < SC_NPSIZES + 1; i++) { + extent_heap_new(&eset->heaps[i]); + } + bitmap_init(eset->bitmap, &eset_bitmap_info, true); + extent_list_init(&eset->lru); + atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED); + eset->state = state; + eset->delay_coalesce = delay_coalesce; + return false; +} diff --git a/src/extent.c b/src/extent.c index d5350142..51a145df 100644 --- a/src/extent.c +++ b/src/extent.c @@ -19,9 +19,6 @@ mutex_pool_t extent_mutex_pool; size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; -static const bitmap_info_t eset_bitmap_info = - BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); - static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind); @@ -308,24 +305,6 @@ extent_size_quantize_ceil(size_t size) { /* Generate pairing heap functions. */ ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) -bool -extents_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state, - bool delay_coalesce) { - if (malloc_mutex_init(&eset->mtx, "extents", WITNESS_RANK_EXTENTS, - malloc_mutex_rank_exclusive)) { - return true; - } - for (unsigned i = 0; i < SC_NPSIZES + 1; i++) { - extent_heap_new(&eset->heaps[i]); - } - bitmap_init(eset->bitmap, &eset_bitmap_info, true); - extent_list_init(&eset->lru); - atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED); - eset->state = state; - eset->delay_coalesce = delay_coalesce; - return false; -} - extent_state_t extents_state_get(const eset_t *eset) { return eset->state;