Rename ecache_grow -> geom_grow.
We're about to start using it outside of the ecaches, in the HPA central allocator.
This commit is contained in:
parent
b399463fba
commit
131b1b5338
@ -117,6 +117,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
||||
$(srcroot)src/extent.c \
|
||||
$(srcroot)src/extent_dss.c \
|
||||
$(srcroot)src/extent_mmap.c \
|
||||
$(srcroot)src/geom_grow.c \
|
||||
$(srcroot)src/hook.c \
|
||||
$(srcroot)src/inspect.c \
|
||||
$(srcroot)src/large.c \
|
||||
|
@ -19,26 +19,6 @@ struct ecache_s {
|
||||
bool delay_coalesce;
|
||||
};
|
||||
|
||||
typedef struct ecache_grow_s ecache_grow_t;
|
||||
struct ecache_grow_s {
|
||||
/*
|
||||
* Next extent size class in a growing series to use when satisfying a
|
||||
* request via the extent hooks (only if opt_retain). This limits the
|
||||
* number of disjoint virtual memory ranges so that extent merging can
|
||||
* be effective even if multiple arenas' extent allocation requests are
|
||||
* highly interleaved.
|
||||
*
|
||||
* retain_grow_limit is the max allowed size ind to expand (unless the
|
||||
* required size is greater). Default is no limit, and controlled
|
||||
* through mallctl only.
|
||||
*
|
||||
* Synchronization: extent_grow_mtx
|
||||
*/
|
||||
pszind_t next;
|
||||
pszind_t limit;
|
||||
malloc_mutex_t mtx;
|
||||
};
|
||||
|
||||
static inline size_t
|
||||
ecache_npages_get(ecache_t *ecache) {
|
||||
return eset_npages_get(&ecache->eset);
|
||||
@ -65,9 +45,4 @@ void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
|
||||
void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
|
||||
void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
|
||||
|
||||
bool ecache_grow_init(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
||||
void ecache_grow_prefork(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
||||
void ecache_grow_postfork_parent(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
||||
void ecache_grow_postfork_child(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ECACHE_H */
|
||||
|
29
include/jemalloc/internal/geom_grow.h
Normal file
29
include/jemalloc/internal/geom_grow.h
Normal file
@ -0,0 +1,29 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ECACHE_GROW_H
|
||||
#define JEMALLOC_INTERNAL_ECACHE_GROW_H
|
||||
|
||||
typedef struct geom_grow_s geom_grow_t;
|
||||
struct geom_grow_s {
|
||||
/*
|
||||
* Next extent size class in a growing series to use when satisfying a
|
||||
* request via the extent hooks (only if opt_retain). This limits the
|
||||
* number of disjoint virtual memory ranges so that extent merging can
|
||||
* be effective even if multiple arenas' extent allocation requests are
|
||||
* highly interleaved.
|
||||
*
|
||||
* retain_grow_limit is the max allowed size ind to expand (unless the
|
||||
* required size is greater). Default is no limit, and controlled
|
||||
* through mallctl only.
|
||||
*
|
||||
* Synchronization: mtx
|
||||
*/
|
||||
pszind_t next;
|
||||
pszind_t limit;
|
||||
malloc_mutex_t mtx;
|
||||
};
|
||||
|
||||
bool geom_grow_init(tsdn_t *tsdn, geom_grow_t *geom_grow);
|
||||
void geom_grow_prefork(tsdn_t *tsdn, geom_grow_t *geom_grow);
|
||||
void geom_grow_postfork_parent(tsdn_t *tsdn, geom_grow_t *geom_grow);
|
||||
void geom_grow_postfork_child(tsdn_t *tsdn, geom_grow_t *geom_grow);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ECACHE_GROW_H */
|
@ -1,8 +1,10 @@
|
||||
#ifndef JEMALLOC_INTERNAL_PAC_H
|
||||
#define JEMALLOC_INTERNAL_PAC_H
|
||||
|
||||
#include "jemalloc/internal/geom_grow.h"
|
||||
#include "jemalloc/internal/pai.h"
|
||||
|
||||
|
||||
/*
|
||||
* Page allocator classic; an implementation of the PAI interface that:
|
||||
* - Can be used for arenas with custom extent hooks.
|
||||
@ -93,7 +95,7 @@ struct pac_s {
|
||||
edata_cache_t *edata_cache;
|
||||
|
||||
/* The grow info for the retained ecache. */
|
||||
ecache_grow_t ecache_grow;
|
||||
geom_grow_t geom_grow;
|
||||
|
||||
/*
|
||||
* Decay-based purging state, responsible for scheduling extent state
|
||||
|
26
src/ecache.c
26
src/ecache.c
@ -29,29 +29,3 @@ void
|
||||
ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache) {
|
||||
malloc_mutex_postfork_child(tsdn, &ecache->mtx);
|
||||
}
|
||||
|
||||
bool
|
||||
ecache_grow_init(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
|
||||
ecache_grow->next = sz_psz2ind(HUGEPAGE);
|
||||
ecache_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
|
||||
if (malloc_mutex_init(&ecache_grow->mtx, "extent_grow",
|
||||
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
ecache_grow_prefork(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
|
||||
malloc_mutex_prefork(tsdn, &ecache_grow->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
ecache_grow_postfork_parent(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
|
||||
malloc_mutex_postfork_parent(tsdn, &ecache_grow->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
ecache_grow_postfork_child(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
|
||||
malloc_mutex_postfork_child(tsdn, &ecache_grow->mtx);
|
||||
}
|
||||
|
26
src/extent.c
26
src/extent.c
@ -614,7 +614,7 @@ extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
static edata_t *
|
||||
extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
size_t size, size_t alignment, bool zero, bool *commit) {
|
||||
malloc_mutex_assert_owner(tsdn, &pac->ecache_grow.mtx);
|
||||
malloc_mutex_assert_owner(tsdn, &pac->geom_grow.mtx);
|
||||
|
||||
size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
|
||||
/* Beware size_t wrap-around. */
|
||||
@ -626,15 +626,15 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
* satisfy this request.
|
||||
*/
|
||||
pszind_t egn_skip = 0;
|
||||
size_t alloc_size = sz_pind2sz(pac->ecache_grow.next + egn_skip);
|
||||
size_t alloc_size = sz_pind2sz(pac->geom_grow.next + egn_skip);
|
||||
while (alloc_size < alloc_size_min) {
|
||||
egn_skip++;
|
||||
if (pac->ecache_grow.next + egn_skip >=
|
||||
if (pac->geom_grow.next + egn_skip >=
|
||||
sz_psz2ind(SC_LARGE_MAXCLASS)) {
|
||||
/* Outside legal range. */
|
||||
goto label_err;
|
||||
}
|
||||
alloc_size = sz_pind2sz(pac->ecache_grow.next + egn_skip);
|
||||
alloc_size = sz_pind2sz(pac->geom_grow.next + egn_skip);
|
||||
}
|
||||
|
||||
edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
|
||||
@ -727,13 +727,13 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
* Increment extent_grow_next if doing so wouldn't exceed the allowed
|
||||
* range.
|
||||
*/
|
||||
if (pac->ecache_grow.next + egn_skip + 1 <= pac->ecache_grow.limit) {
|
||||
pac->ecache_grow.next += egn_skip + 1;
|
||||
if (pac->geom_grow.next + egn_skip + 1 <= pac->geom_grow.limit) {
|
||||
pac->geom_grow.next += egn_skip + 1;
|
||||
} else {
|
||||
pac->ecache_grow.next = pac->ecache_grow.limit;
|
||||
pac->geom_grow.next = pac->geom_grow.limit;
|
||||
}
|
||||
/* All opportunities for failure are past. */
|
||||
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
|
||||
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx);
|
||||
|
||||
if (config_prof) {
|
||||
/* Adjust gdump stats now that extent is final size. */
|
||||
@ -747,7 +747,7 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
return edata;
|
||||
label_err:
|
||||
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
|
||||
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -757,13 +757,13 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
|
||||
malloc_mutex_lock(tsdn, &pac->ecache_grow.mtx);
|
||||
malloc_mutex_lock(tsdn, &pac->geom_grow.mtx);
|
||||
|
||||
edata_t *edata = extent_recycle(tsdn, pac, ehooks,
|
||||
&pac->ecache_retained, new_addr, size, alignment, zero,
|
||||
commit, /* growing_retained */ true);
|
||||
if (edata != NULL) {
|
||||
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
|
||||
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx);
|
||||
if (config_prof) {
|
||||
extent_gdump_add(tsdn, edata);
|
||||
}
|
||||
@ -772,9 +772,9 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
alignment, zero, commit);
|
||||
/* extent_grow_retained() always releases extent_grow_mtx. */
|
||||
} else {
|
||||
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
|
||||
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx);
|
||||
}
|
||||
malloc_mutex_assert_not_owner(tsdn, &pac->ecache_grow.mtx);
|
||||
malloc_mutex_assert_not_owner(tsdn, &pac->geom_grow.mtx);
|
||||
|
||||
return edata;
|
||||
}
|
||||
|
29
src/geom_grow.c
Normal file
29
src/geom_grow.c
Normal file
@ -0,0 +1,29 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
bool
|
||||
geom_grow_init(tsdn_t *tsdn, geom_grow_t *geom_grow) {
|
||||
geom_grow->next = sz_psz2ind(HUGEPAGE);
|
||||
geom_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
|
||||
if (malloc_mutex_init(&geom_grow->mtx, "extent_grow",
|
||||
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
geom_grow_prefork(tsdn_t *tsdn, geom_grow_t *geom_grow) {
|
||||
malloc_mutex_prefork(tsdn, &geom_grow->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
geom_grow_postfork_parent(tsdn_t *tsdn, geom_grow_t *geom_grow) {
|
||||
malloc_mutex_postfork_parent(tsdn, &geom_grow->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
geom_grow_postfork_child(tsdn_t *tsdn, geom_grow_t *geom_grow) {
|
||||
malloc_mutex_postfork_child(tsdn, &geom_grow->mtx);
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
|
||||
void
|
||||
pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
ecache_grow_prefork(tsdn, &shard->pac.ecache_grow);
|
||||
geom_grow_prefork(tsdn, &shard->pac.geom_grow);
|
||||
}
|
||||
|
||||
void
|
||||
@ -37,7 +37,7 @@ pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty);
|
||||
ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
|
||||
ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
|
||||
ecache_grow_postfork_parent(tsdn, &shard->pac.ecache_grow);
|
||||
geom_grow_postfork_parent(tsdn, &shard->pac.geom_grow);
|
||||
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx);
|
||||
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx);
|
||||
}
|
||||
@ -48,7 +48,7 @@ pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
ecache_postfork_child(tsdn, &shard->pac.ecache_dirty);
|
||||
ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
|
||||
ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
|
||||
ecache_grow_postfork_child(tsdn, &shard->pac.ecache_grow);
|
||||
geom_grow_postfork_child(tsdn, &shard->pac.geom_grow);
|
||||
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx);
|
||||
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx);
|
||||
}
|
||||
|
10
src/pac.c
10
src/pac.c
@ -68,7 +68,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
||||
ind, /* delay_coalesce */ false)) {
|
||||
return true;
|
||||
}
|
||||
if (ecache_grow_init(tsdn, &pac->ecache_grow)) {
|
||||
if (geom_grow_init(tsdn, &pac->geom_grow)) {
|
||||
return true;
|
||||
}
|
||||
if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
|
||||
@ -203,14 +203,14 @@ pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
|
||||
}
|
||||
}
|
||||
|
||||
malloc_mutex_lock(tsdn, &pac->ecache_grow.mtx);
|
||||
malloc_mutex_lock(tsdn, &pac->geom_grow.mtx);
|
||||
if (old_limit != NULL) {
|
||||
*old_limit = sz_pind2sz(pac->ecache_grow.limit);
|
||||
*old_limit = sz_pind2sz(pac->geom_grow.limit);
|
||||
}
|
||||
if (new_limit != NULL) {
|
||||
pac->ecache_grow.limit = new_ind;
|
||||
pac->geom_grow.limit = new_ind;
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
|
||||
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ TEST_BEGIN(test_retained) {
|
||||
size_t usable = 0;
|
||||
size_t fragmented = 0;
|
||||
for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
|
||||
arena->pa_shard.pac.ecache_grow.next; pind++) {
|
||||
arena->pa_shard.pac.geom_grow.next; pind++) {
|
||||
size_t psz = sz_pind2sz(pind);
|
||||
size_t psz_fragmented = psz % esz;
|
||||
size_t psz_usable = psz - psz_fragmented;
|
||||
|
Loading…
Reference in New Issue
Block a user