Use the edata_cache_small_t in the HPA.

This commit is contained in:
David Goldblatt 2020-10-30 14:43:43 -07:00 committed by David Goldblatt
parent 03a6047111
commit 589638182a
5 changed files with 36 additions and 13 deletions

View File

@ -24,6 +24,9 @@ struct hpa_s {
/*
* This edata cache is the global one that we use for new allocations in
* growing; practically, it comes from a0.
*
* We don't use an edata_cache_small in front of this, since we expect a
* small finite number of allocations from it.
*/
edata_cache_t *edata_cache;
geom_grow_t geom_grow;
@ -50,7 +53,7 @@ struct hpa_shard_s {
* from a pageslab. The pageslab itself comes from the centralized
* allocator, and so will use its edata_cache.
*/
edata_cache_t *edata_cache;
edata_cache_small_t ecs;
hpa_t *hpa;
psset_t psset;
@ -86,6 +89,12 @@ bool hpa_init(hpa_t *hpa, base_t *base, emap_t *emap,
bool hpa_shard_init(hpa_shard_t *shard, hpa_t *hpa,
edata_cache_t *edata_cache, unsigned ind, size_t ps_goal,
size_t ps_alloc_max, size_t small_max, size_t large_min);
/*
* Notify the shard that we won't use it for allocations much longer. Due to
* the possibility of races, we don't actually prevent allocations; just flush
* and disable the embedded edata_cache_small.
*/
void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
/*

View File

@ -9,7 +9,7 @@ struct hpa_central_s {
/* The emap we use for metadata operations. */
emap_t *emap;
edata_cache_t *edata_cache;
edata_cache_small_t ecs;
eset_t eset;
size_t sn_next;

View File

@ -64,7 +64,8 @@ hpa_shard_init(hpa_shard_t *shard, hpa_t *hpa, edata_cache_t *edata_cache,
return true;
}
shard->edata_cache = edata_cache;
assert(edata_cache != NULL);
edata_cache_small_init(&shard->ecs, edata_cache);
shard->hpa = hpa;
psset_init(&shard->psset);
shard->ps_goal = ps_goal;
@ -201,13 +202,14 @@ hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
assert(size <= shard->ps_alloc_max);
bool err;
edata_t *edata = edata_cache_get(tsdn, shard->edata_cache);
malloc_mutex_lock(tsdn, &shard->mtx);
edata_t *edata = edata_cache_small_get(tsdn, &shard->ecs);
if (edata == NULL) {
malloc_mutex_unlock(tsdn, &shard->mtx);
return NULL;
}
edata_arena_ind_set(edata, shard->ind);
malloc_mutex_lock(tsdn, &shard->mtx);
err = psset_alloc_reuse(&shard->psset, edata, size);
malloc_mutex_unlock(tsdn, &shard->mtx);
if (!err) {
@ -229,7 +231,11 @@ hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
shard->ps_goal);
if (grow_edata == NULL) {
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
edata_cache_put(tsdn, shard->edata_cache, edata);
malloc_mutex_lock(tsdn, &shard->mtx);
edata_cache_small_put(tsdn, &shard->ecs, edata);
malloc_mutex_unlock(tsdn, &shard->mtx);
return NULL;
}
edata_arena_ind_set(grow_edata, shard->ind);
@ -351,9 +357,9 @@ hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata) {
malloc_mutex_lock(tsdn, &shard->mtx);
edata_t *evicted_ps = psset_dalloc(&shard->psset, edata);
edata_cache_small_put(tsdn, &shard->ecs, edata);
malloc_mutex_unlock(tsdn, &shard->mtx);
edata_cache_put(tsdn, shard->edata_cache, edata);
if (evicted_ps != NULL) {
/*
@ -387,6 +393,13 @@ hpa_shard_assert_stats_empty(psset_bin_stats_t *bin_stats) {
assert(bin_stats->ninactive == 0);
}
void
hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_lock(tsdn, &shard->mtx);
edata_cache_small_disable(tsdn, &shard->ecs);
malloc_mutex_unlock(tsdn, &shard->mtx);
}
void
hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
/*

View File

@ -7,7 +7,7 @@ void
hpa_central_init(hpa_central_t *central, edata_cache_t *edata_cache,
emap_t *emap) {
central->emap = emap;
central->edata_cache = edata_cache;
edata_cache_small_init(&central->ecs, edata_cache);
eset_init(&central->eset, extent_state_dirty);
central->sn_next = 0;
}
@ -19,7 +19,7 @@ hpa_central_init(hpa_central_t *central, edata_cache_t *edata_cache,
static edata_t *
hpa_central_split(tsdn_t *tsdn, hpa_central_t *central, edata_t *edata,
size_t size) {
edata_t *trail = edata_cache_get(tsdn, central->edata_cache);
edata_t *trail = edata_cache_small_get(tsdn, &central->ecs);
if (trail == NULL) {
return NULL;
}
@ -34,7 +34,7 @@ hpa_central_split(tsdn_t *tsdn, hpa_central_t *central, edata_t *edata,
bool err = emap_split_prepare(tsdn, central->emap, &prepare, edata,
size, trail, cursize - size);
if (err) {
edata_cache_put(tsdn, central->edata_cache, trail);
edata_cache_small_put(tsdn, &central->ecs, trail);
return NULL;
}
emap_lock_edata2(tsdn, central->emap, edata, trail);
@ -102,7 +102,7 @@ hpa_central_alloc_grow(tsdn_t *tsdn, hpa_central_t *central,
assert(edata_base_get(edata) == edata_addr_get(edata));
assert(edata_size_get(edata) >= size);
assert(edata_arena_ind_get(edata)
== base_ind_get(central->edata_cache->base));
== base_ind_get(central->ecs.fallback->base));
assert(edata_is_head_get(edata));
assert(edata_state_get(edata) == extent_state_active);
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
@ -173,7 +173,7 @@ hpa_central_dalloc_merge(tsdn_t *tsdn, hpa_central_t *central, edata_t *a,
edata_size_set(a, edata_size_get(a) + edata_size_get(b));
emap_merge_commit(tsdn, central->emap, &prepare, a, b);
emap_unlock_edata2(tsdn, central->emap, a, b);
edata_cache_put(tsdn, central->edata_cache, b);
edata_cache_small_put(tsdn, &central->ecs, b);
}
void

View File

@ -76,6 +76,7 @@ pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard) {
atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
if (shard->ever_used_hpa) {
sec_disable(tsdn, &shard->hpa_sec);
hpa_shard_disable(tsdn, &shard->hpa_shard);
}
}
@ -89,10 +90,10 @@ pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard) {
void
pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) {
sec_flush(tsdn, &shard->hpa_sec);
pac_destroy(tsdn, &shard->pac);
if (shard->ever_used_hpa) {
sec_flush(tsdn, &shard->hpa_sec);
hpa_shard_disable(tsdn, &shard->hpa_shard);
}
}