edata_cache: Allow unbounded fast caching.

The edata_cache_small had a fill/flush heuristic.  In retrospect, this was a
premature optimization; more testing indicates that an unbounded cache is
effectively fine here, and moreover we spend a nontrivial amount of time doing
unnecessary filling/flushing.

As the HPA takes on a larger and larger fraction of all allocations, any
theoretical differences in allocation patterns should shrink.  The HPA is more
efficient with its metadata in general, so it still comes out ahead on metadata
usage anyways.
This commit is contained in:
David Goldblatt 2021-07-23 15:29:43 -07:00 committed by David Goldblatt
parent d93eef2f40
commit 92a1e38f52
5 changed files with 99 additions and 151 deletions

View File

@ -3,15 +3,8 @@
#include "jemalloc/internal/base.h"
/*
* Public for tests. When we go to the fallback when the small cache is empty,
* we grab up to 8 items (grabbing less only if the fallback is exhausted).
* When we exceed 16, we flush. This caps the maximum memory lost per cache to
* 16 * sizeof(edata_t), a max of 2k on architectures where the edata_t is 128
* bytes.
*/
#define EDATA_CACHE_SMALL_MAX 16
#define EDATA_CACHE_SMALL_FILL 8
/* For tests only. */
#define EDATA_CACHE_FAST_FILL 4
/*
* A cache of edata_t structures allocated via base_alloc_edata (as opposed to
@ -40,18 +33,17 @@ void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);
* synchronization and avoids first-fit strategies.
*/
typedef struct edata_cache_small_s edata_cache_small_t;
struct edata_cache_small_s {
typedef struct edata_cache_fast_s edata_cache_fast_t;
struct edata_cache_fast_s {
edata_list_inactive_t list;
size_t count;
edata_cache_t *fallback;
bool disabled;
};
void edata_cache_small_init(edata_cache_small_t *ecs, edata_cache_t *fallback);
edata_t *edata_cache_small_get(tsdn_t *tsdn, edata_cache_small_t *ecs);
void edata_cache_small_put(tsdn_t *tsdn, edata_cache_small_t *ecs,
void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback);
edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs);
void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs,
edata_t *edata);
void edata_cache_small_disable(tsdn_t *tsdn, edata_cache_small_t *ecs);
void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs);
#endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */

View File

@ -102,7 +102,7 @@ struct hpa_shard_s {
* from a pageslab. The pageslab itself comes from the centralized
* allocator, and so will use its edata_cache.
*/
edata_cache_small_t ecs;
edata_cache_fast_t ecf;
psset_t psset;

View File

@ -56,39 +56,34 @@ edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {
}
void
edata_cache_small_init(edata_cache_small_t *ecs, edata_cache_t *fallback) {
edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {
edata_list_inactive_init(&ecs->list);
ecs->count = 0;
ecs->fallback = fallback;
ecs->disabled = false;
}
static void
edata_cache_small_try_fill_from_fallback(tsdn_t *tsdn,
edata_cache_small_t *ecs) {
assert(ecs->count == 0);
edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
edata_cache_fast_t *ecs) {
edata_t *edata;
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
while (ecs->count < EDATA_CACHE_SMALL_FILL) {
edata = edata_avail_first(&ecs->fallback->avail);
for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
edata = edata_avail_remove_first(&ecs->fallback->avail);
if (edata == NULL) {
break;
}
edata_avail_remove(&ecs->fallback->avail, edata);
edata_list_inactive_append(&ecs->list, edata);
ecs->count++;
atomic_load_sub_store_zu(&ecs->fallback->count, 1);
}
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
}
edata_t *
edata_cache_small_get(tsdn_t *tsdn, edata_cache_small_t *ecs) {
edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_EDATA_CACHE, 0);
if (ecs->disabled) {
assert(ecs->count == 0);
assert(edata_list_inactive_first(&ecs->list) == NULL);
return edata_cache_get(tsdn, ecs->fallback);
}
@ -96,15 +91,13 @@ edata_cache_small_get(tsdn_t *tsdn, edata_cache_small_t *ecs) {
edata_t *edata = edata_list_inactive_first(&ecs->list);
if (edata != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
ecs->count--;
return edata;
}
/* Slow path; requires synchronization. */
edata_cache_small_try_fill_from_fallback(tsdn, ecs);
edata_cache_fast_try_fill_from_fallback(tsdn, ecs);
edata = edata_list_inactive_first(&ecs->list);
if (edata != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
ecs->count--;
} else {
/*
* Slowest path (fallback was also empty); allocate something
@ -116,7 +109,7 @@ edata_cache_small_get(tsdn_t *tsdn, edata_cache_small_t *ecs) {
}
static void
edata_cache_small_flush_all(tsdn_t *tsdn, edata_cache_small_t *ecs) {
edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
/*
* You could imagine smarter cache management policies (like
* only flushing down to some threshold in anticipation of
@ -132,19 +125,16 @@ edata_cache_small_flush_all(tsdn_t *tsdn, edata_cache_small_t *ecs) {
edata_avail_insert(&ecs->fallback->avail, edata);
nflushed++;
}
atomic_load_add_store_zu(&ecs->fallback->count, ecs->count);
atomic_load_add_store_zu(&ecs->fallback->count, nflushed);
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
assert(nflushed == ecs->count);
ecs->count = 0;
}
void
edata_cache_small_put(tsdn_t *tsdn, edata_cache_small_t *ecs, edata_t *edata) {
edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_EDATA_CACHE, 0);
if (ecs->disabled) {
assert(ecs->count == 0);
assert(edata_list_inactive_first(&ecs->list) == NULL);
edata_cache_put(tsdn, ecs->fallback, edata);
return;
@ -155,15 +145,10 @@ edata_cache_small_put(tsdn_t *tsdn, edata_cache_small_t *ecs, edata_t *edata) {
* cache locality.
*/
edata_list_inactive_prepend(&ecs->list, edata);
ecs->count++;
if (ecs->count > EDATA_CACHE_SMALL_MAX) {
assert(ecs->count == EDATA_CACHE_SMALL_MAX + 1);
edata_cache_small_flush_all(tsdn, ecs);
}
}
void
edata_cache_small_disable(tsdn_t *tsdn, edata_cache_small_t *ecs) {
edata_cache_small_flush_all(tsdn, ecs);
edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
edata_cache_fast_flush_all(tsdn, ecs);
ecs->disabled = true;
}

View File

@ -187,7 +187,7 @@ hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
assert(edata_cache != NULL);
shard->central = central;
shard->base = base;
edata_cache_small_init(&shard->ecs, edata_cache);
edata_cache_fast_init(&shard->ecf, edata_cache);
psset_init(&shard->psset);
shard->age_counter = 0;
shard->ind = ind;
@ -537,7 +537,7 @@ static edata_t *
hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
bool *oom) {
bool err;
edata_t *edata = edata_cache_small_get(tsdn, &shard->ecs);
edata_t *edata = edata_cache_fast_get(tsdn, &shard->ecf);
if (edata == NULL) {
*oom = true;
return NULL;
@ -545,7 +545,7 @@ hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
hpdata_t *ps = psset_pick_alloc(&shard->psset, size);
if (ps == NULL) {
edata_cache_small_put(tsdn, &shard->ecs, edata);
edata_cache_fast_put(tsdn, &shard->ecf, edata);
return NULL;
}
@ -592,7 +592,7 @@ hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
* tweaked the stats, but our tweaks weren't really accurate).
*/
psset_update_end(&shard->psset, ps);
edata_cache_small_put(tsdn, &shard->ecs, edata);
edata_cache_fast_put(tsdn, &shard->ecf, edata);
*oom = true;
return NULL;
}
@ -805,7 +805,7 @@ hpa_dalloc_locked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
assert(ps != NULL);
void *unreserve_addr = edata_addr_get(edata);
size_t unreserve_size = edata_size_get(edata);
edata_cache_small_put(tsdn, &shard->ecs, edata);
edata_cache_fast_put(tsdn, &shard->ecf, edata);
psset_update_begin(&shard->psset, ps);
hpdata_unreserve(ps, unreserve_addr, unreserve_size);
@ -844,7 +844,7 @@ hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata) {
void
hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_lock(tsdn, &shard->mtx);
edata_cache_small_disable(tsdn, &shard->ecs);
edata_cache_fast_disable(tsdn, &shard->ecf);
malloc_mutex_unlock(tsdn, &shard->mtx);
}

View File

@ -47,38 +47,48 @@ TEST_BEGIN(test_edata_cache) {
}
TEST_END
TEST_BEGIN(test_edata_cache_small_simple) {
static size_t
ecf_count(edata_cache_fast_t *ecf) {
size_t count = 0;
edata_t *cur;
ql_foreach(cur, &ecf->list.head, ql_link_inactive) {
count++;
}
return count;
}
TEST_BEGIN(test_edata_cache_fast_simple) {
edata_cache_t ec;
edata_cache_small_t ecs;
edata_cache_fast_t ecf;
test_edata_cache_init(&ec);
edata_cache_small_init(&ecs, &ec);
edata_cache_fast_init(&ecf, &ec);
edata_t *ed1 = edata_cache_small_get(TSDN_NULL, &ecs);
edata_t *ed1 = edata_cache_fast_get(TSDN_NULL, &ecf);
expect_ptr_not_null(ed1, "");
expect_zu_eq(ecs.count, 0, "");
expect_zu_eq(ecf_count(&ecf), 0, "");
expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
edata_t *ed2 = edata_cache_small_get(TSDN_NULL, &ecs);
edata_t *ed2 = edata_cache_fast_get(TSDN_NULL, &ecf);
expect_ptr_not_null(ed2, "");
expect_zu_eq(ecs.count, 0, "");
expect_zu_eq(ecf_count(&ecf), 0, "");
expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
edata_cache_small_put(TSDN_NULL, &ecs, ed1);
expect_zu_eq(ecs.count, 1, "");
edata_cache_fast_put(TSDN_NULL, &ecf, ed1);
expect_zu_eq(ecf_count(&ecf), 1, "");
expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
edata_cache_small_put(TSDN_NULL, &ecs, ed2);
expect_zu_eq(ecs.count, 2, "");
edata_cache_fast_put(TSDN_NULL, &ecf, ed2);
expect_zu_eq(ecf_count(&ecf), 2, "");
expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
/* LIFO ordering. */
expect_ptr_eq(ed2, edata_cache_small_get(TSDN_NULL, &ecs), "");
expect_zu_eq(ecs.count, 1, "");
expect_ptr_eq(ed2, edata_cache_fast_get(TSDN_NULL, &ecf), "");
expect_zu_eq(ecf_count(&ecf), 1, "");
expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
expect_ptr_eq(ed1, edata_cache_small_get(TSDN_NULL, &ecs), "");
expect_zu_eq(ecs.count, 0, "");
expect_ptr_eq(ed1, edata_cache_fast_get(TSDN_NULL, &ecf), "");
expect_zu_eq(ecf_count(&ecf), 0, "");
expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
test_edata_cache_destroy(&ec);
@ -87,41 +97,41 @@ TEST_END
TEST_BEGIN(test_edata_cache_fill) {
edata_cache_t ec;
edata_cache_small_t ecs;
edata_cache_fast_t ecf;
test_edata_cache_init(&ec);
edata_cache_small_init(&ecs, &ec);
edata_cache_fast_init(&ecf, &ec);
edata_t *allocs[EDATA_CACHE_SMALL_FILL * 2];
edata_t *allocs[EDATA_CACHE_FAST_FILL * 2];
/*
* If the fallback cache can't satisfy the request, we shouldn't do
* extra allocations until compelled to. Put half the fill goal in the
* fallback.
*/
for (int i = 0; i < EDATA_CACHE_SMALL_FILL / 2; i++) {
for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
allocs[i] = edata_cache_get(TSDN_NULL, &ec);
}
for (int i = 0; i < EDATA_CACHE_SMALL_FILL / 2; i++) {
for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
edata_cache_put(TSDN_NULL, &ec, allocs[i]);
}
expect_zu_eq(EDATA_CACHE_SMALL_FILL / 2,
expect_zu_eq(EDATA_CACHE_FAST_FILL / 2,
atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
allocs[0] = edata_cache_small_get(TSDN_NULL, &ecs);
expect_zu_eq(EDATA_CACHE_SMALL_FILL / 2 - 1, ecs.count,
allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
expect_zu_eq(EDATA_CACHE_FAST_FILL / 2 - 1, ecf_count(&ecf),
"Should have grabbed all edatas available but no more.");
for (int i = 1; i < EDATA_CACHE_SMALL_FILL / 2; i++) {
allocs[i] = edata_cache_small_get(TSDN_NULL, &ecs);
for (int i = 1; i < EDATA_CACHE_FAST_FILL / 2; i++) {
allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
expect_ptr_not_null(allocs[i], "");
}
expect_zu_eq(0, ecs.count, "");
expect_zu_eq(0, ecf_count(&ecf), "");
/* When forced, we should alloc from the base. */
edata_t *edata = edata_cache_small_get(TSDN_NULL, &ecs);
edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
expect_ptr_not_null(edata, "");
expect_zu_eq(0, ecs.count, "Allocated more than necessary");
expect_zu_eq(0, ecf_count(&ecf), "Allocated more than necessary");
expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED),
"Allocated more than necessary");
@ -129,116 +139,78 @@ TEST_BEGIN(test_edata_cache_fill) {
* We should correctly fill in the common case where the fallback isn't
* exhausted, too.
*/
for (int i = 0; i < EDATA_CACHE_SMALL_FILL * 2; i++) {
for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
allocs[i] = edata_cache_get(TSDN_NULL, &ec);
expect_ptr_not_null(allocs[i], "");
}
for (int i = 0; i < EDATA_CACHE_SMALL_FILL * 2; i++) {
for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
edata_cache_put(TSDN_NULL, &ec, allocs[i]);
}
allocs[0] = edata_cache_small_get(TSDN_NULL, &ecs);
expect_zu_eq(EDATA_CACHE_SMALL_FILL - 1, ecs.count, "");
expect_zu_eq(EDATA_CACHE_SMALL_FILL,
allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
expect_zu_eq(EDATA_CACHE_FAST_FILL,
atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
for (int i = 1; i < EDATA_CACHE_SMALL_FILL; i++) {
expect_zu_eq(EDATA_CACHE_SMALL_FILL - i, ecs.count, "");
expect_zu_eq(EDATA_CACHE_SMALL_FILL,
for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
expect_zu_eq(EDATA_CACHE_FAST_FILL,
atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
allocs[i] = edata_cache_small_get(TSDN_NULL, &ecs);
allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
expect_ptr_not_null(allocs[i], "");
}
expect_zu_eq(0, ecs.count, "");
expect_zu_eq(EDATA_CACHE_SMALL_FILL,
expect_zu_eq(0, ecf_count(&ecf), "");
expect_zu_eq(EDATA_CACHE_FAST_FILL,
atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
allocs[0] = edata_cache_small_get(TSDN_NULL, &ecs);
expect_zu_eq(EDATA_CACHE_SMALL_FILL - 1, ecs.count, "");
allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
for (int i = 1; i < EDATA_CACHE_SMALL_FILL; i++) {
expect_zu_eq(EDATA_CACHE_SMALL_FILL - i, ecs.count, "");
for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
allocs[i] = edata_cache_small_get(TSDN_NULL, &ecs);
allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
expect_ptr_not_null(allocs[i], "");
}
expect_zu_eq(0, ecs.count, "");
expect_zu_eq(0, ecf_count(&ecf), "");
expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
test_edata_cache_destroy(&ec);
}
TEST_END
TEST_BEGIN(test_edata_cache_flush) {
edata_cache_t ec;
edata_cache_small_t ecs;
test_edata_cache_init(&ec);
edata_cache_small_init(&ecs, &ec);
edata_t *allocs[2 * EDATA_CACHE_SMALL_MAX + 2];
for (int i = 0; i < 2 * EDATA_CACHE_SMALL_MAX + 2; i++) {
allocs[i] = edata_cache_get(TSDN_NULL, &ec);
expect_ptr_not_null(allocs[i], "");
}
for (int i = 0; i < EDATA_CACHE_SMALL_MAX; i++) {
edata_cache_small_put(TSDN_NULL, &ecs, allocs[i]);
expect_zu_eq(i + 1, ecs.count, "");
expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
}
edata_cache_small_put(TSDN_NULL, &ecs, allocs[EDATA_CACHE_SMALL_MAX]);
expect_zu_eq(0, ecs.count, "");
expect_zu_eq(EDATA_CACHE_SMALL_MAX + 1,
atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
for (int i = EDATA_CACHE_SMALL_MAX + 1;
i < 2 * EDATA_CACHE_SMALL_MAX + 1; i++) {
edata_cache_small_put(TSDN_NULL, &ecs, allocs[i]);
expect_zu_eq(i - EDATA_CACHE_SMALL_MAX, ecs.count, "");
expect_zu_eq(EDATA_CACHE_SMALL_MAX + 1,
atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
}
edata_cache_small_put(TSDN_NULL, &ecs, allocs[2 * EDATA_CACHE_SMALL_MAX + 1]);
expect_zu_eq(0, ecs.count, "");
expect_zu_eq(2 * EDATA_CACHE_SMALL_MAX + 2,
atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
test_edata_cache_destroy(&ec);
}
TEST_END
TEST_BEGIN(test_edata_cache_disable) {
edata_cache_t ec;
edata_cache_small_t ecs;
edata_cache_fast_t ecf;
test_edata_cache_init(&ec);
edata_cache_small_init(&ecs, &ec);
edata_cache_fast_init(&ecf, &ec);
for (int i = 0; i < EDATA_CACHE_SMALL_FILL; i++) {
for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
edata_t *edata = edata_cache_get(TSDN_NULL, &ec);
expect_ptr_not_null(edata, "");
edata_cache_small_put(TSDN_NULL, &ecs, edata);
edata_cache_fast_put(TSDN_NULL, &ecf, edata);
}
expect_zu_eq(EDATA_CACHE_SMALL_FILL, ecs.count, "");
expect_zu_eq(EDATA_CACHE_FAST_FILL, ecf_count(&ecf), "");
expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
edata_cache_small_disable(TSDN_NULL, &ecs);
edata_cache_fast_disable(TSDN_NULL, &ecf);
expect_zu_eq(0, ecs.count, "");
expect_zu_eq(EDATA_CACHE_SMALL_FILL,
expect_zu_eq(0, ecf_count(&ecf), "");
expect_zu_eq(EDATA_CACHE_FAST_FILL,
atomic_load_zu(&ec.count, ATOMIC_RELAXED), "Disabling should flush");
edata_t *edata = edata_cache_small_get(TSDN_NULL, &ecs);
expect_zu_eq(0, ecs.count, "");
expect_zu_eq(EDATA_CACHE_SMALL_FILL - 1,
edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
expect_zu_eq(0, ecf_count(&ecf), "");
expect_zu_eq(EDATA_CACHE_FAST_FILL - 1,
atomic_load_zu(&ec.count, ATOMIC_RELAXED),
"Disabled ecs should forward on get");
"Disabled ecf should forward on get");
edata_cache_small_put(TSDN_NULL, &ecs, edata);
expect_zu_eq(0, ecs.count, "");
expect_zu_eq(EDATA_CACHE_SMALL_FILL,
edata_cache_fast_put(TSDN_NULL, &ecf, edata);
expect_zu_eq(0, ecf_count(&ecf), "");
expect_zu_eq(EDATA_CACHE_FAST_FILL,
atomic_load_zu(&ec.count, ATOMIC_RELAXED),
"Disabled ecs should forward on put");
"Disabled ecf should forward on put");
test_edata_cache_destroy(&ec);
}
@ -248,8 +220,7 @@ int
main(void) {
return test(
test_edata_cache,
test_edata_cache_small_simple,
test_edata_cache_fast_simple,
test_edata_cache_fill,
test_edata_cache_flush,
test_edata_cache_disable);
}