Extent: Stop passing an edata_cache everywhere.

We already pass the pa_shard_t around everywhere; we can just use that.
This commit is contained in:
David Goldblatt 2020-03-15 09:53:09 -07:00 committed by David Goldblatt
parent a4759a1911
commit 93b99dd140
3 changed files with 54 additions and 63 deletions

View File

@ -48,10 +48,10 @@ bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
edata_t *extent_split_wrapper(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, edata_t *edata, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_cache_t *edata_cache, edata_t *a, edata_t *b);
edata_t *a, edata_t *b);
bool extent_boot(void);

View File

@ -20,11 +20,11 @@ static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, size_t offset, size_t length, bool growing_retained);
static edata_t *extent_split_impl(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, edata_t *edata, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained);
static bool extent_merge_impl(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_cache_t *edata_cache, edata_t *a, edata_t *b, bool growing_retained);
edata_t *a, edata_t *b, bool growing_retained);
/* Used exclusively for gdump triggering. */
static atomic_zu_t curpages;
@ -42,8 +42,8 @@ static edata_t *extent_recycle(tsdn_t *tsdn, pa_shard_t *shard,
size_t alignment, bool slab, szind_t szind, bool zero, bool *commit,
bool growing_retained);
static edata_t *extent_try_coalesce(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata, bool *coalesced, bool growing_retained);
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
bool growing_retained);
static void extent_record(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool growing_retained);
static edata_t *extent_alloc_retained(tsdn_t *tsdn, pa_shard_t *shard,
@ -54,11 +54,10 @@ static edata_t *extent_alloc_retained(tsdn_t *tsdn, pa_shard_t *shard,
static bool
extent_try_delayed_coalesce(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata) {
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata) {
edata_state_set(edata, extent_state_active);
bool coalesced;
edata = extent_try_coalesce(tsdn, shard, edata_cache, ehooks, ecache,
edata = extent_try_coalesce(tsdn, shard, ehooks, ecache,
edata, &coalesced, false);
edata_state_set(edata, ecache->state);
@ -157,8 +156,8 @@ ecache_evict(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
break;
}
/* Try to coalesce. */
if (extent_try_delayed_coalesce(tsdn, shard,
&shard->edata_cache, ehooks, ecache, edata)) {
if (extent_try_delayed_coalesce(tsdn, shard, ehooks, ecache,
edata)) {
break;
}
/*
@ -482,9 +481,9 @@ extent_split_interior(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
/* Split the lead. */
if (leadsize != 0) {
*lead = *edata;
*edata = extent_split_impl(tsdn, shard, &shard->edata_cache,
ehooks, *lead, leadsize, SC_NSIZES, false, size + trailsize,
szind, slab, growing_retained);
*edata = extent_split_impl(tsdn, shard, ehooks, *lead, leadsize,
SC_NSIZES, false, size + trailsize, szind, slab,
growing_retained);
if (*edata == NULL) {
*to_leak = *lead;
*lead = NULL;
@ -494,9 +493,8 @@ extent_split_interior(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
/* Split the trail. */
if (trailsize != 0) {
*trail = extent_split_impl(tsdn, shard, &shard->edata_cache,
ehooks, *edata, size, szind, slab, trailsize, SC_NSIZES,
false, growing_retained);
*trail = extent_split_impl(tsdn, shard, ehooks, *edata, size,
szind, slab, trailsize, SC_NSIZES, false, growing_retained);
if (*trail == NULL) {
*to_leak = *edata;
*to_salvage = *lead;
@ -862,15 +860,15 @@ extent_can_coalesce(ecache_t *ecache, const edata_t *inner,
}
static bool
extent_coalesce(tsdn_t *tsdn, pa_shard_t *shard, edata_cache_t *edata_cache,
ehooks_t *ehooks, ecache_t *ecache, edata_t *inner, edata_t *outer,
bool forward, bool growing_retained) {
extent_coalesce(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
ecache_t *ecache, edata_t *inner, edata_t *outer, bool forward,
bool growing_retained) {
assert(extent_can_coalesce(ecache, inner, outer));
extent_activate_locked(tsdn, ecache, outer);
malloc_mutex_unlock(tsdn, &ecache->mtx);
bool err = extent_merge_impl(tsdn, shard, ehooks, edata_cache,
bool err = extent_merge_impl(tsdn, shard, ehooks,
forward ? inner : outer, forward ? outer : inner, growing_retained);
malloc_mutex_lock(tsdn, &ecache->mtx);
@ -882,9 +880,8 @@ extent_coalesce(tsdn_t *tsdn, pa_shard_t *shard, edata_cache_t *edata_cache,
}
static edata_t *
extent_try_coalesce_impl(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata, bool *coalesced, bool growing_retained,
extent_try_coalesce_impl(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced, bool growing_retained,
bool inactive_only) {
/*
* We avoid checking / locking inactive neighbors for large size
@ -914,7 +911,7 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pa_shard_t *shard,
emap_unlock_edata(tsdn, shard->emap, next);
if (can_coalesce && !extent_coalesce(tsdn, shard,
edata_cache, ehooks, ecache, edata, next, true,
ehooks, ecache, edata, next, true,
growing_retained)) {
if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
@ -934,7 +931,7 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pa_shard_t *shard,
emap_unlock_edata(tsdn, shard->emap, prev);
if (can_coalesce && !extent_coalesce(tsdn, shard,
edata_cache, ehooks, ecache, edata, prev, false,
ehooks, ecache, edata, prev, false,
growing_retained)) {
edata = prev;
if (ecache->delay_coalesce) {
@ -954,19 +951,17 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pa_shard_t *shard,
}
static edata_t *
extent_try_coalesce(tsdn_t *tsdn, pa_shard_t *shard, edata_cache_t *edata_cache,
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
bool growing_retained) {
return extent_try_coalesce_impl(tsdn, shard, edata_cache, ehooks,
ecache, edata, coalesced, growing_retained, false);
extent_try_coalesce(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced, bool growing_retained) {
return extent_try_coalesce_impl(tsdn, shard, ehooks, ecache, edata,
coalesced, growing_retained, false);
}
static edata_t *
extent_try_coalesce_large(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata, bool *coalesced, bool growing_retained) {
return extent_try_coalesce_impl(tsdn, shard, edata_cache, ehooks,
ecache, edata, coalesced, growing_retained, true);
extent_try_coalesce_large(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced, bool growing_retained) {
return extent_try_coalesce_impl(tsdn, shard, ehooks, ecache, edata,
coalesced, growing_retained, true);
}
/* Purge a single extent to retained / unmapped directly. */
@ -1014,17 +1009,16 @@ extent_record(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
emap_assert_mapped(tsdn, shard->emap, edata);
if (!ecache->delay_coalesce) {
edata = extent_try_coalesce(tsdn, shard, &shard->edata_cache,
ehooks, ecache, edata, NULL, growing_retained);
edata = extent_try_coalesce(tsdn, shard, ehooks, ecache, edata,
NULL, growing_retained);
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
assert(ecache == &shard->ecache_dirty);
/* Always coalesce large extents eagerly. */
bool coalesced;
do {
assert(edata_state_get(edata) == extent_state_active);
edata = extent_try_coalesce_large(tsdn, shard,
&shard->edata_cache, ehooks, ecache, edata,
&coalesced, growing_retained);
edata = extent_try_coalesce_large(tsdn, shard, ehooks,
ecache, edata, &coalesced, growing_retained);
} while (coalesced);
if (edata_size_get(edata) >= oversize_threshold &&
pa_shard_may_force_decay(shard)) {
@ -1213,10 +1207,9 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
* and returns the trail (except in case of error).
*/
static edata_t *
extent_split_impl(tsdn_t *tsdn, pa_shard_t *shard, edata_cache_t *edata_cache,
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained) {
extent_split_impl(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a, size_t size_b,
szind_t szind_b, bool slab_b, bool growing_retained) {
assert(edata_size_get(edata) == size_a + size_b);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
@ -1225,7 +1218,7 @@ extent_split_impl(tsdn_t *tsdn, pa_shard_t *shard, edata_cache_t *edata_cache,
return NULL;
}
edata_t *trail = edata_cache_get(tsdn, edata_cache);
edata_t *trail = edata_cache_get(tsdn, &shard->edata_cache);
if (trail == NULL) {
goto label_error_a;
}
@ -1262,22 +1255,22 @@ extent_split_impl(tsdn_t *tsdn, pa_shard_t *shard, edata_cache_t *edata_cache,
label_error_c:
emap_unlock_edata2(tsdn, shard->emap, edata, trail);
label_error_b:
edata_cache_put(tsdn, edata_cache, trail);
edata_cache_put(tsdn, &shard->edata_cache, trail);
label_error_a:
return NULL;
}
edata_t *
extent_split_wrapper(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, edata_t *edata, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
return extent_split_impl(tsdn, shard, edata_cache, ehooks, edata,
size_a, szind_a, slab_a, size_b, szind_b, slab_b, false);
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
return extent_split_impl(tsdn, shard, ehooks, edata, size_a, szind_a,
slab_a, size_b, szind_b, slab_b, false);
}
static bool
extent_merge_impl(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_cache_t *edata_cache, edata_t *a, edata_t *b, bool growing_retained) {
extent_merge_impl(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks, edata_t *a,
edata_t *b, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(edata_base_get(a) < edata_base_get(b));
@ -1312,15 +1305,15 @@ extent_merge_impl(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
emap_merge_commit(tsdn, shard->emap, &prepare, a, b);
emap_unlock_edata2(tsdn, shard->emap, a, b);
edata_cache_put(tsdn, edata_cache, b);
edata_cache_put(tsdn, &shard->edata_cache, b);
return false;
}
bool
extent_merge_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_cache_t *edata_cache, edata_t *a, edata_t *b) {
return extent_merge_impl(tsdn, shard, ehooks, edata_cache, a, b, false);
edata_t *a, edata_t *b) {
return extent_merge_impl(tsdn, shard, ehooks, a, b, false);
}
bool

View File

@ -175,8 +175,7 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
if (trail == NULL) {
return true;
}
if (extent_merge_wrapper(tsdn, shard, ehooks, &shard->edata_cache,
edata, trail)) {
if (extent_merge_wrapper(tsdn, shard, ehooks, edata, trail)) {
extent_dalloc_wrapper(tsdn, shard, ehooks, trail);
return true;
}
@ -205,9 +204,8 @@ pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
return true;
}
edata_t *trail = extent_split_wrapper(tsdn, shard, &shard->edata_cache,
ehooks, edata, new_size, szind, slab, shrink_amount, SC_NSIZES,
false);
edata_t *trail = extent_split_wrapper(tsdn, shard, ehooks, edata,
new_size, szind, slab, shrink_amount, SC_NSIZES, false);
if (trail == NULL) {
return true;
}