Extents -> Eset: rename/move extents_init.

This commit is contained in:
David T. Goldblatt 2019-09-20 20:37:15 -07:00 committed by David Goldblatt
parent e6180fe1b4
commit b416b96a39
5 changed files with 30 additions and 28 deletions

View File

@ -6,6 +6,9 @@
#include "jemalloc/internal/extent.h" #include "jemalloc/internal/extent.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
/* This is a transitional declarion, while we move extent.c into eset.c. */
extern const bitmap_info_t eset_bitmap_info;
/* /*
* An eset ("extent set") is a quantized collection of extents, with built-in * An eset ("extent set") is a quantized collection of extents, with built-in
* LRU queue. * LRU queue.
@ -57,4 +60,7 @@ struct eset_s {
bool delay_coalesce; bool delay_coalesce;
}; };
bool eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
bool delay_coalesce);
#endif /* JEMALLOC_INTERNAL_ESET_H */ #endif /* JEMALLOC_INTERNAL_ESET_H */

View File

@ -27,8 +27,6 @@ size_t extent_size_quantize_ceil(size_t size);
ph_proto(, extent_avail_, extent_tree_t, extent_t) ph_proto(, extent_avail_, extent_tree_t, extent_t)
ph_proto(, extent_heap_, extent_heap_t, extent_t) ph_proto(, extent_heap_, extent_heap_t, extent_t)
bool extents_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
bool delay_coalesce);
extent_state_t extents_state_get(const eset_t *eset); extent_state_t extents_state_get(const eset_t *eset);
size_t extents_npages_get(eset_t *eset); size_t extents_npages_get(eset_t *eset);
/* Get the number of extents in the given page size index. */ /* Get the number of extents in the given page size index. */

View File

@ -2022,16 +2022,14 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
* are likely to be reused soon after deallocation, and the cost of * are likely to be reused soon after deallocation, and the cost of
* merging/splitting extents is non-trivial. * merging/splitting extents is non-trivial.
*/ */
if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, if (eset_init(tsdn, &arena->extents_dirty, extent_state_dirty, true)) {
true)) {
goto label_error; goto label_error;
} }
/* /*
* Coalesce muzzy extents immediately, because operations on them are in * Coalesce muzzy extents immediately, because operations on them are in
* the critical path much less often than for dirty extents. * the critical path much less often than for dirty extents.
*/ */
if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, if (eset_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, false)) {
false)) {
goto label_error; goto label_error;
} }
/* /*
@ -2040,7 +2038,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
* coalescing), but also because operations on retained extents are not * coalescing), but also because operations on retained extents are not
* in the critical path. * in the critical path.
*/ */
if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, if (eset_init(tsdn, &arena->extents_retained, extent_state_retained,
false)) { false)) {
goto label_error; goto label_error;
} }

View File

@ -2,3 +2,24 @@
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/eset.h" #include "jemalloc/internal/eset.h"
const bitmap_info_t eset_bitmap_info =
BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
bool
eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
bool delay_coalesce) {
if (malloc_mutex_init(&eset->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
return true;
}
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
extent_heap_new(&eset->heaps[i]);
}
bitmap_init(eset->bitmap, &eset_bitmap_info, true);
extent_list_init(&eset->lru);
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
eset->state = state;
eset->delay_coalesce = delay_coalesce;
return false;
}

View File

@ -19,9 +19,6 @@ mutex_pool_t extent_mutex_pool;
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
static const bitmap_info_t eset_bitmap_info =
BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit, size_t size, size_t alignment, bool *zero, bool *commit,
unsigned arena_ind); unsigned arena_ind);
@ -308,24 +305,6 @@ extent_size_quantize_ceil(size_t size) {
/* Generate pairing heap functions. */ /* Generate pairing heap functions. */
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
bool
extents_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
bool delay_coalesce) {
if (malloc_mutex_init(&eset->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
return true;
}
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
extent_heap_new(&eset->heaps[i]);
}
bitmap_init(eset->bitmap, &eset_bitmap_info, true);
extent_list_init(&eset->lru);
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
eset->state = state;
eset->delay_coalesce = delay_coalesce;
return false;
}
extent_state_t extent_state_t
extents_state_get(const eset_t *eset) { extents_state_get(const eset_t *eset) {
return eset->state; return eset->state;