Add PAC: Page allocator classic.

For now, this is just a stub containing the ecaches, with no surrounding code
changed.  Eventually all the core allocator bits will be moved in, in the
subsequent stack of commits.
This commit is contained in:
David Goldblatt 2020-05-29 16:57:31 -07:00 committed by David Goldblatt
parent 1b5f632e0f
commit 777b0ba965
9 changed files with 106 additions and 84 deletions

View File

@ -7,8 +7,17 @@
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/pac.h"
#include "jemalloc/internal/pai.h"
/*
* The page allocator; responsible for acquiring pages of memory for
* allocations. It picks the implementation of the page allocator interface
* (i.e. a pai_t) to handle a given page-level allocation request. For now, the
* only such implementation is the PAC code ("page allocator classic"), but
* others will be coming soon.
*/
enum pa_decay_purge_setting_e {
PA_DECAY_PURGE_ALWAYS,
PA_DECAY_PURGE_NEVER,
@ -16,11 +25,6 @@ enum pa_decay_purge_setting_e {
};
typedef enum pa_decay_purge_setting_e pa_decay_purge_setting_t;
/*
* The page allocator; responsible for acquiring pages of memory for
* allocations.
*/
typedef struct pa_shard_decay_stats_s pa_shard_decay_stats_t;
struct pa_shard_decay_stats_s {
/* Total number of purge sweeps. */
@ -117,16 +121,7 @@ struct pa_shard_s {
* this is the *only* pai, but we'll soon grow another.
*/
pai_t ecache_pai;
/*
* Collections of extents that were previously allocated. These are
* used when allocating extents, in an attempt to re-use address space.
*
* Synchronization: internal.
*/
ecache_t ecache_dirty;
ecache_t ecache_muzzy;
ecache_t ecache_retained;
pac_t pac;
/* The source of edata_t objects. */
edata_cache_t edata_cache;
@ -167,7 +162,7 @@ pa_shard_muzzy_decay_ms_get(pa_shard_t *shard) {
static inline bool
pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
return ecache_npages_get(&shard->ecache_muzzy) == 0 &&
return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 &&
pa_shard_muzzy_decay_ms_get(shard) <= 0;
}

View File

@ -0,0 +1,25 @@
#ifndef JEMALLOC_INTERNAL_PAC_H
#define JEMALLOC_INTERNAL_PAC_H
/*
* Page allocator classic; an implementation of the PAI interface that:
* - Can be used for arenas with custom extent hooks.
* - Can always satisfy any allocation request (including highly-fragmentary
* ones).
* - Can use efficient OS-level zeroing primitives for demand-filled pages.
*/
typedef struct pac_s pac_t;
struct pac_s {
/*
* Collections of extents that were previously allocated. These are
* used when allocating extents, in an attempt to re-use address space.
*
* Synchronization: internal.
*/
ecache_t ecache_dirty;
ecache_t ecache_muzzy;
ecache_t ecache_retained;
};
#endif /* JEMALLOC_INTERNAL_PAC_H */

View File

@ -454,16 +454,16 @@ bool
arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.decay_dirty,
&arena->pa_shard.stats->decay_dirty, &arena->pa_shard.ecache_dirty,
decay_ms);
&arena->pa_shard.stats->decay_dirty,
&arena->pa_shard.pac.ecache_dirty, decay_ms);
}
bool
arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.decay_muzzy,
&arena->pa_shard.stats->decay_muzzy, &arena->pa_shard.ecache_muzzy,
decay_ms);
&arena->pa_shard.stats->decay_muzzy,
&arena->pa_shard.pac.ecache_muzzy, decay_ms);
}
static bool
@ -521,8 +521,8 @@ static bool
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) {
return arena_decay_impl(tsdn, arena, &arena->pa_shard.decay_dirty,
&arena->pa_shard.stats->decay_dirty, &arena->pa_shard.ecache_dirty,
is_background_thread, all);
&arena->pa_shard.stats->decay_dirty,
&arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
}
static bool
@ -532,8 +532,8 @@ arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
return false;
}
return arena_decay_impl(tsdn, arena, &arena->pa_shard.decay_muzzy,
&arena->pa_shard.stats->decay_muzzy, &arena->pa_shard.ecache_muzzy,
is_background_thread, all);
&arena->pa_shard.stats->decay_muzzy,
&arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
}
void

View File

@ -201,12 +201,12 @@ static uint64_t
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
uint64_t i1, i2;
i1 = arena_decay_compute_purge_interval_impl(tsdn,
&arena->pa_shard.decay_dirty, &arena->pa_shard.ecache_dirty);
&arena->pa_shard.decay_dirty, &arena->pa_shard.pac.ecache_dirty);
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
return i1;
}
i2 = arena_decay_compute_purge_interval_impl(tsdn,
&arena->pa_shard.decay_muzzy, &arena->pa_shard.ecache_muzzy);
&arena->pa_shard.decay_muzzy, &arena->pa_shard.pac.ecache_muzzy);
return i1 < i2 ? i1 : i2;
}
@ -716,8 +716,8 @@ background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
should_signal = true;
} else if (unlikely(background_thread_indefinite_sleep(info)) &&
(ecache_npages_get(&arena->pa_shard.ecache_dirty) > 0 ||
ecache_npages_get(&arena->pa_shard.ecache_muzzy) > 0 ||
(ecache_npages_get(&arena->pa_shard.pac.ecache_dirty) > 0 ||
ecache_npages_get(&arena->pa_shard.pac.ecache_muzzy) > 0 ||
info->npages_to_purge_new > 0)) {
should_signal = true;
} else {

View File

@ -3127,9 +3127,9 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
}
MUTEX_PROF_RESET(arena->large_mtx);
MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx);
MUTEX_PROF_RESET(arena->pa_shard.ecache_dirty.mtx);
MUTEX_PROF_RESET(arena->pa_shard.ecache_muzzy.mtx);
MUTEX_PROF_RESET(arena->pa_shard.ecache_retained.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx);
MUTEX_PROF_RESET(arena->pa_shard.decay_dirty.mtx);
MUTEX_PROF_RESET(arena->pa_shard.decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->tcache_ql_mtx);

View File

@ -647,7 +647,7 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
goto label_err;
}
edata_init(edata, ecache_ind_get(&shard->ecache_retained), ptr,
edata_init(edata, ecache_ind_get(&shard->pac.ecache_retained), ptr,
alloc_size, false, SC_NSIZES, pa_shard_extent_sn_next(shard),
extent_state_active, zeroed, committed, /* ranged */ false,
EXTENT_IS_HEAD);
@ -673,11 +673,11 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
if (result == extent_split_interior_ok) {
if (lead != NULL) {
extent_record(tsdn, shard, ehooks,
&shard->ecache_retained, lead, true);
&shard->pac.ecache_retained, lead, true);
}
if (trail != NULL) {
extent_record(tsdn, shard, ehooks,
&shard->ecache_retained, trail, true);
&shard->pac.ecache_retained, trail, true);
}
} else {
/*
@ -690,12 +690,12 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
extent_gdump_add(tsdn, to_salvage);
}
extent_record(tsdn, shard, ehooks,
&shard->ecache_retained, to_salvage, true);
&shard->pac.ecache_retained, to_salvage, true);
}
if (to_leak != NULL) {
extent_deregister_no_gdump_sub(tsdn, shard, to_leak);
extents_abandon_vm(tsdn, shard, ehooks,
&shard->ecache_retained, to_leak, true);
&shard->pac.ecache_retained, to_leak, true);
}
goto label_err;
}
@ -704,7 +704,7 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
if (extent_commit_impl(tsdn, ehooks, edata, 0,
edata_size_get(edata), true)) {
extent_record(tsdn, shard, ehooks,
&shard->ecache_retained, edata, true);
&shard->pac.ecache_retained, edata, true);
goto label_err;
}
/* A successful commit should return zeroed memory. */
@ -756,8 +756,8 @@ extent_alloc_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
malloc_mutex_lock(tsdn, &shard->ecache_grow.mtx);
edata_t *edata = extent_recycle(tsdn, shard, ehooks,
&shard->ecache_retained, new_addr, size, alignment, zero, commit,
/* growing_retained */ true);
&shard->pac.ecache_retained, new_addr, size, alignment, zero,
commit, /* growing_retained */ true);
if (edata != NULL) {
malloc_mutex_unlock(tsdn, &shard->ecache_grow.mtx);
if (config_prof) {
@ -792,7 +792,7 @@ extent_alloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_cache_put(tsdn, &shard->edata_cache, edata);
return NULL;
}
edata_init(edata, ecache_ind_get(&shard->ecache_dirty), addr,
edata_init(edata, ecache_ind_get(&shard->pac.ecache_dirty), addr,
size, /* slab */ false, SC_NSIZES, pa_shard_extent_sn_next(shard),
extent_state_active, zero, *commit, /* ranged */ false,
EXTENT_NOT_HEAD);
@ -972,7 +972,7 @@ extent_record(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata = extent_try_coalesce(tsdn, shard, ehooks, ecache, edata,
NULL, growing_retained);
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
assert(ecache == &shard->ecache_dirty);
assert(ecache == &shard->pac.ecache_dirty);
/* Always coalesce large extents eagerly. */
bool coalesced;
do {
@ -1072,7 +1072,7 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
extent_gdump_sub(tsdn, edata);
}
extent_record(tsdn, shard, ehooks, &shard->ecache_retained, edata,
extent_record(tsdn, shard, ehooks, &shard->pac.ecache_retained, edata,
false);
}

View File

@ -32,7 +32,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
* are likely to be reused soon after deallocation, and the cost of
* merging/splitting extents is non-trivial.
*/
if (ecache_init(tsdn, &shard->ecache_dirty, extent_state_dirty, ind,
if (ecache_init(tsdn, &shard->pac.ecache_dirty, extent_state_dirty, ind,
/* delay_coalesce */ true)) {
return true;
}
@ -40,7 +40,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
* Coalesce muzzy extents immediately, because operations on them are in
* the critical path much less often than for dirty extents.
*/
if (ecache_init(tsdn, &shard->ecache_muzzy, extent_state_muzzy, ind,
if (ecache_init(tsdn, &shard->pac.ecache_muzzy, extent_state_muzzy, ind,
/* delay_coalesce */ false)) {
return true;
}
@ -50,7 +50,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
* coalescing), but also because operations on retained extents are not
* in the critical path.
*/
if (ecache_init(tsdn, &shard->ecache_retained, extent_state_retained,
if (ecache_init(tsdn, &shard->pac.ecache_retained, extent_state_retained,
ind, /* delay_coalesce */ false)) {
return true;
}
@ -94,8 +94,8 @@ pa_shard_reset(pa_shard_t *shard) {
void
pa_shard_destroy_retained(tsdn_t *tsdn, pa_shard_t *shard) {
assert(ecache_npages_get(&shard->ecache_dirty) == 0);
assert(ecache_npages_get(&shard->ecache_muzzy) == 0);
assert(ecache_npages_get(&shard->pac.ecache_dirty) == 0);
assert(ecache_npages_get(&shard->pac.ecache_muzzy) == 0);
/*
* Iterate over the retained extents and destroy them. This gives the
* extent allocator underlying the extent hooks an opportunity to unmap
@ -108,7 +108,7 @@ pa_shard_destroy_retained(tsdn_t *tsdn, pa_shard_t *shard) {
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
edata_t *edata;
while ((edata = ecache_evict(tsdn, shard, ehooks,
&shard->ecache_retained, 0)) != NULL) {
&shard->pac.ecache_retained, 0)) != NULL) {
extent_destroy_wrapper(tsdn, shard, ehooks, edata);
}
}
@ -131,15 +131,15 @@ ecache_pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
edata_t *edata = ecache_alloc(tsdn, shard, ehooks,
&shard->ecache_dirty, NULL, size, alignment, zero);
&shard->pac.ecache_dirty, NULL, size, alignment, zero);
if (edata == NULL && pa_shard_may_have_muzzy(shard)) {
edata = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy,
NULL, size, alignment, zero);
edata = ecache_alloc(tsdn, shard, ehooks,
&shard->pac.ecache_muzzy, NULL, size, alignment, zero);
}
if (edata == NULL) {
edata = ecache_alloc_grow(tsdn, shard, ehooks,
&shard->ecache_retained, NULL, size, alignment, zero);
&shard->pac.ecache_retained, NULL, size, alignment, zero);
if (config_stats && edata != NULL) {
atomic_fetch_add_zu(&shard->stats->pa_mapped, size,
ATOMIC_RELAXED);
@ -184,16 +184,17 @@ ecache_pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
if (ehooks_merge_will_fail(ehooks)) {
return true;
}
edata_t *trail = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_dirty,
trail_begin, expand_amount, PAGE, zero);
edata_t *trail = ecache_alloc(tsdn, shard, ehooks,
&shard->pac.ecache_dirty, trail_begin, expand_amount, PAGE, zero);
if (trail == NULL) {
trail = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy,
trail_begin, expand_amount, PAGE, zero);
trail = ecache_alloc(tsdn, shard, ehooks,
&shard->pac.ecache_muzzy, trail_begin, expand_amount, PAGE,
zero);
}
if (trail == NULL) {
trail = ecache_alloc_grow(tsdn, shard, ehooks,
&shard->ecache_retained, trail_begin, expand_amount, PAGE,
zero);
&shard->pac.ecache_retained, trail_begin, expand_amount,
PAGE, zero);
mapped_add = expand_amount;
}
if (trail == NULL) {
@ -250,7 +251,7 @@ ecache_pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
if (trail == NULL) {
return true;
}
ecache_dalloc(tsdn, shard, ehooks, &shard->ecache_dirty, trail);
ecache_dalloc(tsdn, shard, ehooks, &shard->pac.ecache_dirty, trail);
return false;
}
@ -281,7 +282,7 @@ ecache_pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata) {
pa_shard_t *shard =
(pa_shard_t *)((uintptr_t)self - offsetof(pa_shard_t, ecache_pai));
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
ecache_dalloc(tsdn, shard, ehooks, &shard->ecache_dirty, edata);
ecache_dalloc(tsdn, shard, ehooks, &shard->pac.ecache_dirty, edata);
}
void
@ -353,7 +354,7 @@ pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
edata, /* offset */ 0, size);
if (!err) {
ecache_dalloc(tsdn, shard, ehooks,
&shard->ecache_muzzy, edata);
&shard->pac.ecache_muzzy, edata);
break;
}
}

View File

@ -21,9 +21,9 @@ pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
void
pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_prefork(tsdn, &shard->ecache_dirty);
ecache_prefork(tsdn, &shard->ecache_muzzy);
ecache_prefork(tsdn, &shard->ecache_retained);
ecache_prefork(tsdn, &shard->pac.ecache_dirty);
ecache_prefork(tsdn, &shard->pac.ecache_muzzy);
ecache_prefork(tsdn, &shard->pac.ecache_retained);
}
@ -35,9 +35,9 @@ pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard) {
void
pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
edata_cache_postfork_parent(tsdn, &shard->edata_cache);
ecache_postfork_parent(tsdn, &shard->ecache_dirty);
ecache_postfork_parent(tsdn, &shard->ecache_muzzy);
ecache_postfork_parent(tsdn, &shard->ecache_retained);
ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty);
ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
ecache_grow_postfork_parent(tsdn, &shard->ecache_grow);
malloc_mutex_postfork_parent(tsdn, &shard->decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &shard->decay_muzzy.mtx);
@ -46,9 +46,9 @@ pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
void
pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
edata_cache_postfork_child(tsdn, &shard->edata_cache);
ecache_postfork_child(tsdn, &shard->ecache_dirty);
ecache_postfork_child(tsdn, &shard->ecache_muzzy);
ecache_postfork_child(tsdn, &shard->ecache_retained);
ecache_postfork_child(tsdn, &shard->pac.ecache_dirty);
ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
ecache_grow_postfork_child(tsdn, &shard->ecache_grow);
malloc_mutex_postfork_child(tsdn, &shard->decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &shard->decay_muzzy.mtx);
@ -58,8 +58,8 @@ void
pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
size_t *nmuzzy) {
*nactive += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
*ndirty += ecache_npages_get(&shard->ecache_dirty);
*nmuzzy += ecache_npages_get(&shard->ecache_muzzy);
*ndirty += ecache_npages_get(&shard->pac.ecache_dirty);
*nmuzzy += ecache_npages_get(&shard->pac.ecache_muzzy);
}
void
@ -69,13 +69,13 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
cassert(config_stats);
shard_stats_out->retained +=
ecache_npages_get(&shard->ecache_retained) << LG_PAGE;
ecache_npages_get(&shard->pac.ecache_retained) << LG_PAGE;
shard_stats_out->edata_avail += atomic_load_zu(
&shard->edata_cache.count, ATOMIC_RELAXED);
size_t resident_pgs = 0;
resident_pgs += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
resident_pgs += ecache_npages_get(&shard->ecache_dirty);
resident_pgs += ecache_npages_get(&shard->pac.ecache_dirty);
*resident += (resident_pgs << LG_PAGE);
/* Dirty decay stats */
@ -112,12 +112,13 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
retained_bytes;
dirty = ecache_nextents_get(&shard->ecache_dirty, i);
muzzy = ecache_nextents_get(&shard->ecache_muzzy, i);
retained = ecache_nextents_get(&shard->ecache_retained, i);
dirty_bytes = ecache_nbytes_get(&shard->ecache_dirty, i);
muzzy_bytes = ecache_nbytes_get(&shard->ecache_muzzy, i);
retained_bytes = ecache_nbytes_get(&shard->ecache_retained, i);
dirty = ecache_nextents_get(&shard->pac.ecache_dirty, i);
muzzy = ecache_nextents_get(&shard->pac.ecache_muzzy, i);
retained = ecache_nextents_get(&shard->pac.ecache_retained, i);
dirty_bytes = ecache_nbytes_get(&shard->pac.ecache_dirty, i);
muzzy_bytes = ecache_nbytes_get(&shard->pac.ecache_muzzy, i);
retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained,
i);
extent_stats_out[i].ndirty = dirty;
extent_stats_out[i].nmuzzy = muzzy;
@ -142,11 +143,11 @@ pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->edata_cache.mtx, arena_prof_mutex_extent_avail);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->ecache_dirty.mtx, arena_prof_mutex_extents_dirty);
&shard->pac.ecache_dirty.mtx, arena_prof_mutex_extents_dirty);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->ecache_muzzy.mtx, arena_prof_mutex_extents_muzzy);
&shard->pac.ecache_muzzy.mtx, arena_prof_mutex_extents_muzzy);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->ecache_retained.mtx, arena_prof_mutex_extents_retained);
&shard->pac.ecache_retained.mtx, arena_prof_mutex_extents_retained);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->decay_dirty.mtx, arena_prof_mutex_decay_dirty);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,

View File

@ -90,7 +90,7 @@ do_alloc_free_purge(void *arg) {
pa_decay_all(TSDN_NULL, &test_data->shard,
&test_data->shard.decay_dirty,
&test_data->stats.decay_dirty,
&test_data->shard.ecache_dirty, true);
&test_data->shard.pac.ecache_dirty, true);
malloc_mutex_unlock(TSDN_NULL,
&test_data->shard.decay_dirty.mtx);
}