Introduce hpdata_t.
Using an edata_t both for hugepages and the allocations within those hugepages was convenient at first, but has outlived its usefulness. Representing hugepages explicitly, with their own data structure, will make future development easier.
This commit is contained in:
committed by
David Goldblatt
parent
4a15008cfb
commit
ca30b5db2b
@@ -4,4 +4,3 @@
|
||||
ph_gen(, edata_avail_, edata_avail_t, edata_t, ph_link,
|
||||
edata_esnead_comp)
|
||||
ph_gen(, edata_heap_, edata_heap_t, edata_t, ph_link, edata_snad_comp)
|
||||
ph_gen(, edata_age_heap_, edata_age_heap_t, edata_t, ph_link, edata_age_comp)
|
||||
|
178
src/hpa.c
178
src/hpa.c
@@ -33,22 +33,22 @@ hpa_supported() {
|
||||
* We fundamentally rely on a address-space-hungry growth strategy for
|
||||
* hugepages.
|
||||
*/
|
||||
if (LG_SIZEOF_PTR == 2) {
|
||||
if (LG_SIZEOF_PTR != 3) {
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* We use the edata bitmap; it needs to have at least as many bits as a
|
||||
* hugepage has pages.
|
||||
* If we couldn't detect the value of HUGEPAGE, HUGEPAGE_PAGES becomes
|
||||
* this sentinel value -- see the comment in pages.h.
|
||||
*/
|
||||
if (HUGEPAGE / PAGE > BITMAP_GROUPS_MAX * sizeof(bitmap_t) * 8) {
|
||||
if (HUGEPAGE_PAGES == 1) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
hpa_shard_init(hpa_shard_t *shard, emap_t *emap, edata_cache_t *edata_cache,
|
||||
unsigned ind, size_t alloc_max) {
|
||||
hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
|
||||
edata_cache_t *edata_cache, unsigned ind, size_t alloc_max) {
|
||||
/* malloc_conf processing should have filtered out these cases. */
|
||||
assert(hpa_supported());
|
||||
bool err;
|
||||
@@ -64,11 +64,14 @@ hpa_shard_init(hpa_shard_t *shard, emap_t *emap, edata_cache_t *edata_cache,
|
||||
}
|
||||
|
||||
assert(edata_cache != NULL);
|
||||
shard->base = base;
|
||||
edata_cache_small_init(&shard->ecs, edata_cache);
|
||||
psset_init(&shard->psset);
|
||||
shard->alloc_max = alloc_max;
|
||||
edata_list_inactive_init(&shard->unused_slabs);
|
||||
hpdata_list_init(&shard->unused_slabs);
|
||||
shard->age_counter = 0;
|
||||
shard->eden = NULL;
|
||||
shard->eden_len = 0;
|
||||
shard->ind = ind;
|
||||
shard->emap = emap;
|
||||
|
||||
@@ -104,22 +107,27 @@ hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
|
||||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
}
|
||||
|
||||
static hpdata_t *
|
||||
hpa_alloc_ps(tsdn_t *tsdn, hpa_shard_t *shard) {
|
||||
return (hpdata_t *)base_alloc(tsdn, shard->base, sizeof(hpdata_t),
|
||||
CACHELINE);
|
||||
}
|
||||
|
||||
static bool
|
||||
hpa_should_hugify(hpa_shard_t *shard, edata_t *ps) {
|
||||
hpa_should_hugify(hpa_shard_t *shard, hpdata_t *ps) {
|
||||
/*
|
||||
* For now, just use a static check; hugify a page if it's <= 5%
|
||||
* inactive. Eventually, this should be a malloc conf option.
|
||||
*/
|
||||
return !edata_hugeified_get(ps)
|
||||
&& edata_nfree_get(ps) < (HUGEPAGE / PAGE) * 5 / 100;
|
||||
return !hpdata_huge_get(ps)
|
||||
&& hpdata_nfree_get(ps) < (HUGEPAGE / PAGE) * 5 / 100;
|
||||
}
|
||||
|
||||
/* Returns true on error. */
|
||||
static void
|
||||
hpa_hugify(edata_t *ps) {
|
||||
assert(edata_size_get(ps) == HUGEPAGE);
|
||||
assert(edata_hugeified_get(ps));
|
||||
bool err = pages_huge(edata_base_get(ps), HUGEPAGE);
|
||||
hpa_hugify(hpdata_t *ps) {
|
||||
assert(hpdata_huge_get(ps));
|
||||
bool err = pages_huge(hpdata_addr_get(ps), HUGEPAGE);
|
||||
/*
|
||||
* Eat the error; even if the hugeification failed, it's still safe to
|
||||
* pretend it didn't (and would require extraordinary measures to
|
||||
@@ -129,30 +137,36 @@ hpa_hugify(edata_t *ps) {
|
||||
}
|
||||
|
||||
static void
|
||||
hpa_dehugify(edata_t *ps) {
|
||||
hpa_dehugify(hpdata_t *ps) {
|
||||
/* Purge, then dehugify while unbacked. */
|
||||
pages_purge_forced(edata_addr_get(ps), HUGEPAGE);
|
||||
pages_nohuge(edata_addr_get(ps), HUGEPAGE);
|
||||
edata_hugeified_set(ps, false);
|
||||
pages_purge_forced(hpdata_addr_get(ps), HUGEPAGE);
|
||||
pages_nohuge(hpdata_addr_get(ps), HUGEPAGE);
|
||||
hpdata_huge_set(ps, false);
|
||||
}
|
||||
|
||||
static edata_t *
|
||||
static hpdata_t *
|
||||
hpa_grow(tsdn_t *tsdn, hpa_shard_t *shard) {
|
||||
malloc_mutex_assert_owner(tsdn, &shard->grow_mtx);
|
||||
edata_t *ps = NULL;
|
||||
hpdata_t *ps = NULL;
|
||||
|
||||
/* Is there address space waiting for reuse? */
|
||||
malloc_mutex_assert_owner(tsdn, &shard->grow_mtx);
|
||||
ps = edata_list_inactive_first(&shard->unused_slabs);
|
||||
ps = hpdata_list_first(&shard->unused_slabs);
|
||||
if (ps != NULL) {
|
||||
edata_list_inactive_remove(&shard->unused_slabs, ps);
|
||||
hpdata_list_remove(&shard->unused_slabs, ps);
|
||||
hpdata_age_set(ps, shard->age_counter++);
|
||||
return ps;
|
||||
}
|
||||
|
||||
/* Is eden a perfect fit? */
|
||||
if (shard->eden != NULL && edata_size_get(shard->eden) == HUGEPAGE) {
|
||||
ps = shard->eden;
|
||||
if (shard->eden != NULL && shard->eden_len == HUGEPAGE) {
|
||||
ps = hpa_alloc_ps(tsdn, shard);
|
||||
if (ps == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
hpdata_init(ps, shard->eden, shard->age_counter++);
|
||||
shard->eden = NULL;
|
||||
shard->eden_len = 0;
|
||||
return ps;
|
||||
}
|
||||
|
||||
@@ -173,78 +187,32 @@ hpa_grow(tsdn_t *tsdn, hpa_shard_t *shard) {
|
||||
if (new_eden == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
malloc_mutex_lock(tsdn, &shard->mtx);
|
||||
/* Allocate ps edata, bailing if we fail. */
|
||||
ps = edata_cache_small_get(tsdn, &shard->ecs);
|
||||
ps = hpa_alloc_ps(tsdn, shard);
|
||||
if (ps == NULL) {
|
||||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
pages_unmap(new_eden, HPA_EDEN_SIZE);
|
||||
return NULL;
|
||||
}
|
||||
/* Allocate eden edata, bailing if we fail. */
|
||||
shard->eden = edata_cache_small_get(tsdn, &shard->ecs);
|
||||
if (shard->eden == NULL) {
|
||||
edata_cache_small_put(tsdn, &shard->ecs, ps);
|
||||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
pages_unmap(new_eden, HPA_EDEN_SIZE);
|
||||
return NULL;
|
||||
}
|
||||
/* Success. */
|
||||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
|
||||
/*
|
||||
* Note that the values here don't really make sense (e.g. eden
|
||||
* is actually zeroed). But we don't use the slab metadata in
|
||||
* determining subsequent allocation metadata (e.g. zero
|
||||
* tracking should be done at the per-page level, not at the
|
||||
* level of the hugepage). It's just a convenient data
|
||||
* structure that contains much of the helpers we need (defined
|
||||
* lists, a bitmap, an address field, etc.). Eventually, we'll
|
||||
* have a "real" representation of a hugepage that's unconnected
|
||||
* to the edata_ts it will serve allocations into.
|
||||
*/
|
||||
edata_init(shard->eden, shard->ind, new_eden, HPA_EDEN_SIZE,
|
||||
/* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_dirty,
|
||||
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
|
||||
/* is_head */ true);
|
||||
edata_hugeified_set(shard->eden, false);
|
||||
shard->eden = new_eden;
|
||||
shard->eden_len = HPA_EDEN_SIZE;
|
||||
} else {
|
||||
/* Eden is already nonempty; only need an edata for ps. */
|
||||
malloc_mutex_lock(tsdn, &shard->mtx);
|
||||
ps = edata_cache_small_get(tsdn, &shard->ecs);
|
||||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
ps = hpa_alloc_ps(tsdn, shard);
|
||||
if (ps == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* We should have dropped mtx since we're not touching ecs any more, but
|
||||
* we should continue to hold the grow mutex, since we're about to touch
|
||||
* eden.
|
||||
*/
|
||||
malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
|
||||
malloc_mutex_assert_owner(tsdn, &shard->grow_mtx);
|
||||
|
||||
assert(ps != NULL);
|
||||
assert(shard->eden != NULL);
|
||||
assert(edata_size_get(shard->eden) > HUGEPAGE);
|
||||
assert(edata_size_get(shard->eden) % HUGEPAGE == 0);
|
||||
assert(edata_addr_get(shard->eden)
|
||||
== HUGEPAGE_ADDR2BASE(edata_addr_get(shard->eden)));
|
||||
malloc_mutex_lock(tsdn, &shard->mtx);
|
||||
ps = edata_cache_small_get(tsdn, &shard->ecs);
|
||||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
if (ps == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
edata_init(ps, edata_arena_ind_get(shard->eden),
|
||||
edata_addr_get(shard->eden), HUGEPAGE, /* slab */ false,
|
||||
/* szind */ SC_NSIZES, /* sn */ 0, extent_state_dirty,
|
||||
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
|
||||
/* is_head */ true);
|
||||
edata_hugeified_set(ps, false);
|
||||
edata_addr_set(shard->eden, edata_past_get(ps));
|
||||
edata_size_set(shard->eden,
|
||||
edata_size_get(shard->eden) - HUGEPAGE);
|
||||
assert(shard->eden_len > HUGEPAGE);
|
||||
assert(shard->eden_len % HUGEPAGE == 0);
|
||||
assert(HUGEPAGE_ADDR2BASE(shard->eden) == shard->eden);
|
||||
|
||||
hpdata_init(ps, shard->eden, shard->age_counter++);
|
||||
|
||||
char *eden_char = (char *)shard->eden;
|
||||
eden_char += HUGEPAGE;
|
||||
shard->eden = (void *)eden_char;
|
||||
shard->eden_len -= HUGEPAGE;
|
||||
|
||||
return ps;
|
||||
}
|
||||
@@ -255,7 +223,7 @@ hpa_grow(tsdn_t *tsdn, hpa_shard_t *shard) {
|
||||
* their address space in a list outside the psset.
|
||||
*/
|
||||
static void
|
||||
hpa_handle_ps_eviction(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *ps) {
|
||||
hpa_handle_ps_eviction(tsdn_t *tsdn, hpa_shard_t *shard, hpdata_t *ps) {
|
||||
/*
|
||||
* We do relatively expensive system calls. The ps was evicted, so no
|
||||
* one should touch it while we're also touching it.
|
||||
@@ -263,9 +231,6 @@ hpa_handle_ps_eviction(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *ps) {
|
||||
malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
|
||||
malloc_mutex_assert_not_owner(tsdn, &shard->grow_mtx);
|
||||
|
||||
assert(edata_size_get(ps) == HUGEPAGE);
|
||||
assert(HUGEPAGE_ADDR2BASE(edata_addr_get(ps)) == edata_addr_get(ps));
|
||||
|
||||
/*
|
||||
* We do this unconditionally, even for pages which were not originally
|
||||
* hugeified; it has the same effect.
|
||||
@@ -273,7 +238,7 @@ hpa_handle_ps_eviction(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *ps) {
|
||||
hpa_dehugify(ps);
|
||||
|
||||
malloc_mutex_lock(tsdn, &shard->grow_mtx);
|
||||
edata_list_inactive_prepend(&shard->unused_slabs, ps);
|
||||
hpdata_list_prepend(&shard->unused_slabs, ps);
|
||||
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
|
||||
}
|
||||
|
||||
@@ -307,7 +272,7 @@ hpa_try_alloc_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, bool *oom)
|
||||
err = emap_register_boundary(tsdn, shard->emap, edata,
|
||||
SC_NSIZES, /* slab */ false);
|
||||
if (err) {
|
||||
edata_t *ps = psset_dalloc(&shard->psset, edata);
|
||||
hpdata_t *ps = psset_dalloc(&shard->psset, edata);
|
||||
/*
|
||||
* The pageslab was nonempty before we started; it
|
||||
* should still be nonempty now, and so shouldn't get
|
||||
@@ -320,7 +285,7 @@ hpa_try_alloc_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, bool *oom)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
edata_t *ps = edata_ps_get(edata);
|
||||
hpdata_t *ps = edata_ps_get(edata);
|
||||
assert(ps != NULL);
|
||||
bool hugify = hpa_should_hugify(shard, ps);
|
||||
if (hugify) {
|
||||
@@ -378,16 +343,11 @@ hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
|
||||
* deallocations (and allocations of smaller sizes) may still succeed
|
||||
* while we're doing this potentially expensive system call.
|
||||
*/
|
||||
edata_t *grow_edata = hpa_grow(tsdn, shard);
|
||||
if (grow_edata == NULL) {
|
||||
hpdata_t *grow_ps = hpa_grow(tsdn, shard);
|
||||
if (grow_ps == NULL) {
|
||||
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
|
||||
return NULL;
|
||||
}
|
||||
assert(edata_arena_ind_get(grow_edata) == shard->ind);
|
||||
|
||||
edata_slab_set(grow_edata, true);
|
||||
fb_group_t *fb = edata_slab_data_get(grow_edata)->bitmap;
|
||||
fb_init(fb, HUGEPAGE / PAGE);
|
||||
|
||||
/* We got the new edata; allocate from it. */
|
||||
malloc_mutex_lock(tsdn, &shard->mtx);
|
||||
@@ -395,18 +355,19 @@ hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
|
||||
if (edata == NULL) {
|
||||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
|
||||
hpa_handle_ps_eviction(tsdn, shard, grow_ps);
|
||||
return NULL;
|
||||
}
|
||||
psset_alloc_new(&shard->psset, grow_edata, edata, size);
|
||||
psset_alloc_new(&shard->psset, grow_ps, edata, size);
|
||||
err = emap_register_boundary(tsdn, shard->emap, edata,
|
||||
SC_NSIZES, /* slab */ false);
|
||||
if (err) {
|
||||
edata_t *ps = psset_dalloc(&shard->psset, edata);
|
||||
hpdata_t *ps = psset_dalloc(&shard->psset, edata);
|
||||
/*
|
||||
* The pageslab was empty except for the new allocation; it
|
||||
* should get evicted.
|
||||
*/
|
||||
assert(ps == grow_edata);
|
||||
assert(ps == grow_ps);
|
||||
edata_cache_small_put(tsdn, &shard->ecs, edata);
|
||||
/*
|
||||
* Technically the same as fallthrough at the time of this
|
||||
@@ -496,7 +457,7 @@ hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata) {
|
||||
assert(edata_committed_get(edata));
|
||||
assert(edata_base_get(edata) != NULL);
|
||||
|
||||
edata_t *ps = edata_ps_get(edata);
|
||||
hpdata_t *ps = edata_ps_get(edata);
|
||||
/* Currently, all edatas come from pageslabs. */
|
||||
assert(ps != NULL);
|
||||
emap_deregister_boundary(tsdn, shard->emap, edata);
|
||||
@@ -506,7 +467,7 @@ hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata) {
|
||||
* Page slabs can move between pssets (and have their hugeified status
|
||||
* change) in racy ways.
|
||||
*/
|
||||
edata_t *evicted_ps = psset_dalloc(&shard->psset, edata);
|
||||
hpdata_t *evicted_ps = psset_dalloc(&shard->psset, edata);
|
||||
/*
|
||||
* If a pageslab became empty because of the dalloc, it better have been
|
||||
* the one we expected.
|
||||
@@ -562,11 +523,10 @@ hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
|
||||
hpa_assert_empty(tsdn, shard, &shard->psset);
|
||||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
}
|
||||
edata_t *ps;
|
||||
while ((ps = edata_list_inactive_first(&shard->unused_slabs)) != NULL) {
|
||||
assert(edata_size_get(ps) == HUGEPAGE);
|
||||
edata_list_inactive_remove(&shard->unused_slabs, ps);
|
||||
pages_unmap(edata_base_get(ps), HUGEPAGE);
|
||||
hpdata_t *ps;
|
||||
while ((ps = hpdata_list_first(&shard->unused_slabs)) != NULL) {
|
||||
hpdata_list_remove(&shard->unused_slabs, ps);
|
||||
pages_unmap(hpdata_addr_get(ps), HUGEPAGE);
|
||||
}
|
||||
}
|
||||
|
||||
|
18
src/hpdata.c
Normal file
18
src/hpdata.c
Normal file
@@ -0,0 +1,18 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/hpdata.h"
|
||||
|
||||
static int
|
||||
hpdata_age_comp(const hpdata_t *a, const hpdata_t *b) {
|
||||
uint64_t a_age = hpdata_age_get(a);
|
||||
uint64_t b_age = hpdata_age_get(b);
|
||||
/*
|
||||
* hpdata ages are operation counts in the psset; no two should be the
|
||||
* same.
|
||||
*/
|
||||
assert(a_age != b_age);
|
||||
return (a_age > b_age) - (a_age < b_age);
|
||||
}
|
||||
|
||||
ph_gen(, hpdata_age_heap_, hpdata_age_heap_t, hpdata_t, ph_link, hpdata_age_comp)
|
4
src/pa.c
4
src/pa.c
@@ -51,8 +51,8 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
|
||||
bool
|
||||
pa_shard_enable_hpa(pa_shard_t *shard, size_t alloc_max, size_t sec_nshards,
|
||||
size_t sec_alloc_max, size_t sec_bytes_max) {
|
||||
if (hpa_shard_init(&shard->hpa_shard, shard->emap, &shard->edata_cache,
|
||||
shard->ind, alloc_max)) {
|
||||
if (hpa_shard_init(&shard->hpa_shard, shard->emap, shard->base,
|
||||
&shard->edata_cache, shard->ind, alloc_max)) {
|
||||
return true;
|
||||
}
|
||||
if (sec_init(&shard->hpa_sec, &shard->hpa_shard.pai, sec_nshards,
|
||||
|
134
src/psset.c
134
src/psset.c
@@ -11,11 +11,10 @@ static const bitmap_info_t psset_bitmap_info =
|
||||
void
|
||||
psset_init(psset_t *psset) {
|
||||
for (unsigned i = 0; i < PSSET_NPSIZES; i++) {
|
||||
edata_age_heap_new(&psset->pageslabs[i]);
|
||||
hpdata_age_heap_new(&psset->pageslabs[i]);
|
||||
}
|
||||
bitmap_init(psset->bitmap, &psset_bitmap_info, /* fill */ true);
|
||||
memset(&psset->stats, 0, sizeof(psset->stats));
|
||||
psset->age_counter = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -49,18 +48,17 @@ psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) {
|
||||
* ensure we don't miss any heap modification operations.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
psset_bin_stats_insert_remove(psset_bin_stats_t *binstats, edata_t *ps,
|
||||
psset_bin_stats_insert_remove(psset_bin_stats_t *binstats, hpdata_t *ps,
|
||||
bool insert) {
|
||||
size_t *npageslabs_dst = edata_hugeified_get(ps)
|
||||
size_t *npageslabs_dst = hpdata_huge_get(ps)
|
||||
? &binstats->npageslabs_huge : &binstats->npageslabs_nonhuge;
|
||||
size_t *nactive_dst = edata_hugeified_get(ps)
|
||||
size_t *nactive_dst = hpdata_huge_get(ps)
|
||||
? &binstats->nactive_huge : &binstats->nactive_nonhuge;
|
||||
size_t *ninactive_dst = edata_hugeified_get(ps)
|
||||
size_t *ninactive_dst = hpdata_huge_get(ps)
|
||||
? &binstats->ninactive_huge : &binstats->ninactive_nonhuge;
|
||||
|
||||
size_t npages = edata_size_get(ps) >> LG_PAGE;
|
||||
size_t ninactive = edata_nfree_get(ps);
|
||||
size_t nactive = npages - ninactive;
|
||||
size_t ninactive = hpdata_nfree_get(ps);
|
||||
size_t nactive = HUGEPAGE_PAGES - ninactive;
|
||||
|
||||
size_t mul = insert ? (size_t)1 : (size_t)-1;
|
||||
*npageslabs_dst += mul * 1;
|
||||
@@ -69,12 +67,12 @@ psset_bin_stats_insert_remove(psset_bin_stats_t *binstats, edata_t *ps,
|
||||
}
|
||||
|
||||
static void
|
||||
psset_bin_stats_insert(psset_bin_stats_t *binstats, edata_t *ps) {
|
||||
psset_bin_stats_insert(psset_bin_stats_t *binstats, hpdata_t *ps) {
|
||||
psset_bin_stats_insert_remove(binstats, ps, /* insert */ true);
|
||||
}
|
||||
|
||||
static void
|
||||
psset_bin_stats_remove(psset_bin_stats_t *binstats, edata_t *ps) {
|
||||
psset_bin_stats_remove(psset_bin_stats_t *binstats, hpdata_t *ps) {
|
||||
psset_bin_stats_insert_remove(binstats, ps, /* insert */ false);
|
||||
}
|
||||
|
||||
@@ -96,27 +94,27 @@ psset_bin_stats_deactivate(psset_bin_stats_t *binstats, bool huge, size_t num) {
|
||||
}
|
||||
|
||||
static void
|
||||
psset_edata_heap_remove(psset_t *psset, pszind_t pind, edata_t *ps) {
|
||||
edata_age_heap_remove(&psset->pageslabs[pind], ps);
|
||||
psset_hpdata_heap_remove(psset_t *psset, pszind_t pind, hpdata_t *ps) {
|
||||
hpdata_age_heap_remove(&psset->pageslabs[pind], ps);
|
||||
psset_bin_stats_remove(&psset->stats.nonfull_slabs[pind], ps);
|
||||
}
|
||||
|
||||
static void
|
||||
psset_edata_heap_insert(psset_t *psset, pszind_t pind, edata_t *ps) {
|
||||
edata_age_heap_insert(&psset->pageslabs[pind], ps);
|
||||
psset_hpdata_heap_insert(psset_t *psset, pszind_t pind, hpdata_t *ps) {
|
||||
hpdata_age_heap_insert(&psset->pageslabs[pind], ps);
|
||||
psset_bin_stats_insert(&psset->stats.nonfull_slabs[pind], ps);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
psset_assert_ps_consistent(edata_t *ps) {
|
||||
assert(fb_urange_longest(edata_slab_data_get(ps)->bitmap,
|
||||
edata_size_get(ps) >> LG_PAGE) == edata_longest_free_range_get(ps));
|
||||
psset_assert_ps_consistent(hpdata_t *ps) {
|
||||
assert(fb_urange_longest(ps->active_pages, HUGEPAGE_PAGES)
|
||||
== hpdata_longest_free_range_get(ps));
|
||||
}
|
||||
|
||||
void
|
||||
psset_insert(psset_t *psset, edata_t *ps) {
|
||||
psset_insert(psset_t *psset, hpdata_t *ps) {
|
||||
psset_assert_ps_consistent(ps);
|
||||
size_t longest_free_range = edata_longest_free_range_get(ps);
|
||||
size_t longest_free_range = hpdata_longest_free_range_get(ps);
|
||||
|
||||
if (longest_free_range == 0) {
|
||||
/*
|
||||
@@ -131,16 +129,16 @@ psset_insert(psset_t *psset, edata_t *ps) {
|
||||
longest_free_range << LG_PAGE));
|
||||
|
||||
assert(pind < PSSET_NPSIZES);
|
||||
if (edata_age_heap_empty(&psset->pageslabs[pind])) {
|
||||
if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
|
||||
bitmap_unset(psset->bitmap, &psset_bitmap_info, (size_t)pind);
|
||||
}
|
||||
psset_edata_heap_insert(psset, pind, ps);
|
||||
psset_hpdata_heap_insert(psset, pind, ps);
|
||||
}
|
||||
|
||||
void
|
||||
psset_remove(psset_t *psset, edata_t *ps) {
|
||||
psset_remove(psset_t *psset, hpdata_t *ps) {
|
||||
psset_assert_ps_consistent(ps);
|
||||
size_t longest_free_range = edata_longest_free_range_get(ps);
|
||||
size_t longest_free_range = hpdata_longest_free_range_get(ps);
|
||||
|
||||
if (longest_free_range == 0) {
|
||||
psset_bin_stats_remove(&psset->stats.full_slabs, ps);
|
||||
@@ -150,18 +148,18 @@ psset_remove(psset_t *psset, edata_t *ps) {
|
||||
pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
|
||||
longest_free_range << LG_PAGE));
|
||||
assert(pind < PSSET_NPSIZES);
|
||||
psset_edata_heap_remove(psset, pind, ps);
|
||||
if (edata_age_heap_empty(&psset->pageslabs[pind])) {
|
||||
psset_hpdata_heap_remove(psset, pind, ps);
|
||||
if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
|
||||
bitmap_set(psset->bitmap, &psset_bitmap_info, (size_t)pind);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
psset_hugify(psset_t *psset, edata_t *ps) {
|
||||
assert(!edata_hugeified_get(ps));
|
||||
psset_hugify(psset_t *psset, hpdata_t *ps) {
|
||||
assert(!hpdata_huge_get(ps));
|
||||
psset_assert_ps_consistent(ps);
|
||||
|
||||
size_t longest_free_range = edata_longest_free_range_get(ps);
|
||||
size_t longest_free_range = hpdata_longest_free_range_get(ps);
|
||||
psset_bin_stats_t *bin_stats;
|
||||
if (longest_free_range == 0) {
|
||||
bin_stats = &psset->stats.full_slabs;
|
||||
@@ -172,7 +170,7 @@ psset_hugify(psset_t *psset, edata_t *ps) {
|
||||
bin_stats = &psset->stats.nonfull_slabs[pind];
|
||||
}
|
||||
psset_bin_stats_remove(bin_stats, ps);
|
||||
edata_hugeified_set(ps, true);
|
||||
hpdata_huge_set(ps, true);
|
||||
psset_bin_stats_insert(bin_stats, ps);
|
||||
}
|
||||
|
||||
@@ -180,7 +178,7 @@ psset_hugify(psset_t *psset, edata_t *ps) {
|
||||
* Similar to PAC's extent_recycle_extract. Out of all the pageslabs in the
|
||||
* set, picks one that can satisfy the allocation and remove it from the set.
|
||||
*/
|
||||
static edata_t *
|
||||
static hpdata_t *
|
||||
psset_recycle_extract(psset_t *psset, size_t size) {
|
||||
pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size));
|
||||
pszind_t pind = (pszind_t)bitmap_ffu(psset->bitmap, &psset_bitmap_info,
|
||||
@@ -188,13 +186,13 @@ psset_recycle_extract(psset_t *psset, size_t size) {
|
||||
if (pind == PSSET_NPSIZES) {
|
||||
return NULL;
|
||||
}
|
||||
edata_t *ps = edata_age_heap_first(&psset->pageslabs[pind]);
|
||||
hpdata_t *ps = hpdata_age_heap_first(&psset->pageslabs[pind]);
|
||||
if (ps == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
psset_edata_heap_remove(psset, pind, ps);
|
||||
if (edata_age_heap_empty(&psset->pageslabs[pind])) {
|
||||
psset_hpdata_heap_remove(psset, pind, ps);
|
||||
if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
|
||||
bitmap_set(psset->bitmap, &psset_bitmap_info, pind);
|
||||
}
|
||||
|
||||
@@ -207,7 +205,7 @@ psset_recycle_extract(psset_t *psset, size_t size) {
|
||||
* edata with a range in the pageslab, and puts ps back in the set.
|
||||
*/
|
||||
static void
|
||||
psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
|
||||
psset_ps_alloc_insert(psset_t *psset, hpdata_t *ps, edata_t *r_edata,
|
||||
size_t size) {
|
||||
size_t start = 0;
|
||||
/*
|
||||
@@ -217,15 +215,14 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
|
||||
size_t begin = 0;
|
||||
size_t len = 0;
|
||||
|
||||
fb_group_t *ps_fb = edata_slab_data_get(ps)->bitmap;
|
||||
fb_group_t *ps_fb = ps->active_pages;
|
||||
|
||||
size_t npages = size >> LG_PAGE;
|
||||
size_t ps_npages = edata_size_get(ps) >> LG_PAGE;
|
||||
|
||||
size_t largest_unchosen_range = 0;
|
||||
while (true) {
|
||||
bool found = fb_urange_iter(ps_fb, ps_npages, start, &begin,
|
||||
&len);
|
||||
bool found = fb_urange_iter(ps_fb, HUGEPAGE_PAGES, start,
|
||||
&begin, &len);
|
||||
/*
|
||||
* A precondition to this function is that ps must be able to
|
||||
* serve the allocation.
|
||||
@@ -245,14 +242,14 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
|
||||
}
|
||||
start = begin + len;
|
||||
}
|
||||
uintptr_t addr = (uintptr_t)edata_base_get(ps) + begin * PAGE;
|
||||
uintptr_t addr = (uintptr_t)hpdata_addr_get(ps) + begin * PAGE;
|
||||
edata_init(r_edata, edata_arena_ind_get(r_edata), (void *)addr, size,
|
||||
/* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
|
||||
/* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
|
||||
EXTENT_NOT_HEAD);
|
||||
edata_ps_set(r_edata, ps);
|
||||
fb_set_range(ps_fb, ps_npages, begin, npages);
|
||||
edata_nfree_set(ps, (uint32_t)(edata_nfree_get(ps) - npages));
|
||||
fb_set_range(ps_fb, HUGEPAGE_PAGES, begin, npages);
|
||||
hpdata_nfree_set(ps, (uint32_t)(hpdata_nfree_get(ps) - npages));
|
||||
/* The pageslab isn't in a bin, so no bin stats need to change. */
|
||||
|
||||
/*
|
||||
@@ -267,8 +264,8 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
|
||||
* this check in the case where we're allocating from some smaller run.
|
||||
*/
|
||||
start = begin + npages;
|
||||
while (start < ps_npages) {
|
||||
bool found = fb_urange_iter(ps_fb, ps_npages, start, &begin,
|
||||
while (start < HUGEPAGE_PAGES) {
|
||||
bool found = fb_urange_iter(ps_fb, HUGEPAGE_PAGES, start, &begin,
|
||||
&len);
|
||||
if (!found) {
|
||||
break;
|
||||
@@ -278,7 +275,7 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
|
||||
}
|
||||
start = begin + len;
|
||||
}
|
||||
edata_longest_free_range_set(ps, (uint32_t)largest_unchosen_range);
|
||||
hpdata_longest_free_range_set(ps, (uint32_t)largest_unchosen_range);
|
||||
if (largest_unchosen_range == 0) {
|
||||
psset_bin_stats_insert(&psset->stats.full_slabs, ps);
|
||||
} else {
|
||||
@@ -288,7 +285,7 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
|
||||
|
||||
bool
|
||||
psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) {
|
||||
edata_t *ps = psset_recycle_extract(psset, size);
|
||||
hpdata_t *ps = psset_recycle_extract(psset, size);
|
||||
if (ps == NULL) {
|
||||
return true;
|
||||
}
|
||||
@@ -297,48 +294,43 @@ psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) {
|
||||
}
|
||||
|
||||
void
|
||||
psset_alloc_new(psset_t *psset, edata_t *ps, edata_t *r_edata, size_t size) {
|
||||
fb_group_t *ps_fb = edata_slab_data_get(ps)->bitmap;
|
||||
size_t ps_npages = edata_size_get(ps) >> LG_PAGE;
|
||||
assert(fb_empty(ps_fb, ps_npages));
|
||||
assert(ps_npages >= (size >> LG_PAGE));
|
||||
edata_nfree_set(ps, (uint32_t)ps_npages);
|
||||
edata_age_set(ps, psset->age_counter);
|
||||
psset->age_counter++;
|
||||
psset_alloc_new(psset_t *psset, hpdata_t *ps, edata_t *r_edata, size_t size) {
|
||||
fb_group_t *ps_fb = ps->active_pages;
|
||||
assert(fb_empty(ps_fb, HUGEPAGE_PAGES));
|
||||
assert(hpdata_nfree_get(ps) == HUGEPAGE_PAGES);
|
||||
psset_ps_alloc_insert(psset, ps, r_edata, size);
|
||||
}
|
||||
|
||||
edata_t *
|
||||
hpdata_t *
|
||||
psset_dalloc(psset_t *psset, edata_t *edata) {
|
||||
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
|
||||
assert(edata_ps_get(edata) != NULL);
|
||||
edata_t *ps = edata_ps_get(edata);
|
||||
hpdata_t *ps = edata_ps_get(edata);
|
||||
|
||||
fb_group_t *ps_fb = edata_slab_data_get(ps)->bitmap;
|
||||
size_t ps_old_longest_free_range = edata_longest_free_range_get(ps);
|
||||
fb_group_t *ps_fb = ps->active_pages;
|
||||
size_t ps_old_longest_free_range = hpdata_longest_free_range_get(ps);
|
||||
pszind_t old_pind = SC_NPSIZES;
|
||||
if (ps_old_longest_free_range != 0) {
|
||||
old_pind = sz_psz2ind(sz_psz_quantize_floor(
|
||||
ps_old_longest_free_range << LG_PAGE));
|
||||
}
|
||||
|
||||
size_t ps_npages = edata_size_get(ps) >> LG_PAGE;
|
||||
size_t begin =
|
||||
((uintptr_t)edata_base_get(edata) - (uintptr_t)edata_base_get(ps))
|
||||
((uintptr_t)edata_base_get(edata) - (uintptr_t)hpdata_addr_get(ps))
|
||||
>> LG_PAGE;
|
||||
size_t len = edata_size_get(edata) >> LG_PAGE;
|
||||
fb_unset_range(ps_fb, ps_npages, begin, len);
|
||||
fb_unset_range(ps_fb, HUGEPAGE_PAGES, begin, len);
|
||||
|
||||
/* The pageslab is still in the bin; adjust its stats first. */
|
||||
psset_bin_stats_t *bin_stats = (ps_old_longest_free_range == 0
|
||||
? &psset->stats.full_slabs : &psset->stats.nonfull_slabs[old_pind]);
|
||||
psset_bin_stats_deactivate(bin_stats, edata_hugeified_get(ps), len);
|
||||
psset_bin_stats_deactivate(bin_stats, hpdata_huge_get(ps), len);
|
||||
|
||||
edata_nfree_set(ps, (uint32_t)(edata_nfree_get(ps) + len));
|
||||
hpdata_nfree_set(ps, (uint32_t)(hpdata_nfree_get(ps) + len));
|
||||
|
||||
/* We might have just created a new, larger range. */
|
||||
size_t new_begin = (size_t)(fb_fls(ps_fb, ps_npages, begin) + 1);
|
||||
size_t new_end = fb_ffs(ps_fb, ps_npages, begin + len - 1);
|
||||
size_t new_begin = (size_t)(fb_fls(ps_fb, HUGEPAGE_PAGES, begin) + 1);
|
||||
size_t new_end = fb_ffs(ps_fb, HUGEPAGE_PAGES, begin + len - 1);
|
||||
size_t new_range_len = new_end - new_begin;
|
||||
/*
|
||||
* If the new free range is no longer than the previous longest one,
|
||||
@@ -352,7 +344,7 @@ psset_dalloc(psset_t *psset, edata_t *edata) {
|
||||
* Otherwise, it might need to get evicted from the set, or change its
|
||||
* bin.
|
||||
*/
|
||||
edata_longest_free_range_set(ps, (uint32_t)new_range_len);
|
||||
hpdata_longest_free_range_set(ps, (uint32_t)new_range_len);
|
||||
/*
|
||||
* If it was previously non-full, then it's in some (possibly now
|
||||
* incorrect) bin already; remove it.
|
||||
@@ -366,8 +358,8 @@ psset_dalloc(psset_t *psset, edata_t *edata) {
|
||||
* and the issue becomes moot).
|
||||
*/
|
||||
if (ps_old_longest_free_range > 0) {
|
||||
psset_edata_heap_remove(psset, old_pind, ps);
|
||||
if (edata_age_heap_empty(&psset->pageslabs[old_pind])) {
|
||||
psset_hpdata_heap_remove(psset, old_pind, ps);
|
||||
if (hpdata_age_heap_empty(&psset->pageslabs[old_pind])) {
|
||||
bitmap_set(psset->bitmap, &psset_bitmap_info,
|
||||
(size_t)old_pind);
|
||||
}
|
||||
@@ -379,16 +371,16 @@ psset_dalloc(psset_t *psset, edata_t *edata) {
|
||||
psset_bin_stats_remove(&psset->stats.full_slabs, ps);
|
||||
}
|
||||
/* If the pageslab is empty, it gets evicted from the set. */
|
||||
if (new_range_len == ps_npages) {
|
||||
if (new_range_len == HUGEPAGE_PAGES) {
|
||||
return ps;
|
||||
}
|
||||
/* Otherwise, it gets reinserted. */
|
||||
pszind_t new_pind = sz_psz2ind(sz_psz_quantize_floor(
|
||||
new_range_len << LG_PAGE));
|
||||
if (edata_age_heap_empty(&psset->pageslabs[new_pind])) {
|
||||
if (hpdata_age_heap_empty(&psset->pageslabs[new_pind])) {
|
||||
bitmap_unset(psset->bitmap, &psset_bitmap_info,
|
||||
(size_t)new_pind);
|
||||
}
|
||||
psset_edata_heap_insert(psset, new_pind, ps);
|
||||
psset_hpdata_heap_insert(psset, new_pind, ps);
|
||||
return NULL;
|
||||
}
|
||||
|
Reference in New Issue
Block a user