2019-12-12 03:17:19 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
|
|
|
|
|
|
|
bool
|
2019-12-13 10:28:37 +08:00
|
|
|
edata_cache_init(edata_cache_t *edata_cache, base_t *base) {
|
|
|
|
edata_avail_new(&edata_cache->avail);
|
|
|
|
/*
|
|
|
|
* This is not strictly necessary, since the edata_cache_t is only
|
|
|
|
* created inside an arena, which is zeroed on creation. But this is
|
|
|
|
* handy as a safety measure.
|
|
|
|
*/
|
|
|
|
atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);
|
2019-12-12 03:17:19 +08:00
|
|
|
if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",
|
|
|
|
WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
|
|
|
|
return true;
|
|
|
|
}
|
2019-12-13 10:28:37 +08:00
|
|
|
edata_cache->base = base;
|
2019-12-12 03:17:19 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
edata_t *
|
2019-12-13 10:28:37 +08:00
|
|
|
edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) {
|
2019-12-12 03:17:19 +08:00
|
|
|
malloc_mutex_lock(tsdn, &edata_cache->mtx);
|
|
|
|
edata_t *edata = edata_avail_first(&edata_cache->avail);
|
|
|
|
if (edata == NULL) {
|
|
|
|
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
|
2019-12-13 10:28:37 +08:00
|
|
|
return base_alloc_edata(tsdn, edata_cache->base);
|
2019-12-12 03:17:19 +08:00
|
|
|
}
|
|
|
|
edata_avail_remove(&edata_cache->avail, edata);
|
2020-10-29 20:11:16 +08:00
|
|
|
atomic_load_sub_store_zu(&edata_cache->count, 1);
|
2019-12-12 03:17:19 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
|
|
|
|
return edata;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {
|
|
|
|
malloc_mutex_lock(tsdn, &edata_cache->mtx);
|
|
|
|
edata_avail_insert(&edata_cache->avail, edata);
|
2020-10-29 20:11:16 +08:00
|
|
|
atomic_load_add_store_zu(&edata_cache->count, 1);
|
2019-12-12 03:17:19 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
|
|
|
|
}
|
|
|
|
|
2020-02-19 09:21:40 +08:00
|
|
|
void
|
|
|
|
edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) {
|
2019-12-12 03:17:19 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &edata_cache->mtx);
|
|
|
|
}
|
|
|
|
|
2020-02-19 09:21:40 +08:00
|
|
|
void
|
|
|
|
edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache) {
|
2019-12-12 03:17:19 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &edata_cache->mtx);
|
|
|
|
}
|
|
|
|
|
2020-02-19 09:21:40 +08:00
|
|
|
void
|
|
|
|
edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {
|
2019-12-12 03:17:19 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &edata_cache->mtx);
|
|
|
|
}
|
2020-02-19 09:21:40 +08:00
|
|
|
|
|
|
|
void
|
2021-07-24 06:29:43 +08:00
|
|
|
edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {
|
2020-06-12 06:15:51 +08:00
|
|
|
edata_list_inactive_init(&ecs->list);
|
2020-02-19 09:21:40 +08:00
|
|
|
ecs->fallback = fallback;
|
2020-10-29 20:11:16 +08:00
|
|
|
ecs->disabled = false;
|
2020-02-19 09:21:40 +08:00
|
|
|
}
|
|
|
|
|
2020-10-29 20:11:16 +08:00
|
|
|
static void
|
2021-07-24 06:29:43 +08:00
|
|
|
edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
|
|
|
|
edata_cache_fast_t *ecs) {
|
2020-10-29 20:11:16 +08:00
|
|
|
edata_t *edata;
|
|
|
|
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
|
2021-07-24 06:29:43 +08:00
|
|
|
for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
|
|
|
|
edata = edata_avail_remove_first(&ecs->fallback->avail);
|
2020-10-29 20:11:16 +08:00
|
|
|
if (edata == NULL) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
edata_list_inactive_append(&ecs->list, edata);
|
|
|
|
atomic_load_sub_store_zu(&ecs->fallback->count, 1);
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
|
2020-02-19 09:21:40 +08:00
|
|
|
}
|
|
|
|
|
2020-10-29 20:11:16 +08:00
|
|
|
edata_t *
|
2021-07-24 06:29:43 +08:00
|
|
|
edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
|
2020-10-29 20:11:16 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_EDATA_CACHE, 0);
|
|
|
|
|
|
|
|
if (ecs->disabled) {
|
|
|
|
assert(edata_list_inactive_first(&ecs->list) == NULL);
|
|
|
|
return edata_cache_get(tsdn, ecs->fallback);
|
|
|
|
}
|
2020-02-19 09:21:40 +08:00
|
|
|
|
2020-10-29 20:11:16 +08:00
|
|
|
edata_t *edata = edata_list_inactive_first(&ecs->list);
|
|
|
|
if (edata != NULL) {
|
|
|
|
edata_list_inactive_remove(&ecs->list, edata);
|
|
|
|
return edata;
|
|
|
|
}
|
|
|
|
/* Slow path; requires synchronization. */
|
2021-07-24 06:29:43 +08:00
|
|
|
edata_cache_fast_try_fill_from_fallback(tsdn, ecs);
|
2020-10-29 20:11:16 +08:00
|
|
|
edata = edata_list_inactive_first(&ecs->list);
|
|
|
|
if (edata != NULL) {
|
|
|
|
edata_list_inactive_remove(&ecs->list, edata);
|
|
|
|
} else {
|
2020-02-19 09:21:40 +08:00
|
|
|
/*
|
2020-10-29 20:11:16 +08:00
|
|
|
* Slowest path (fallback was also empty); allocate something
|
|
|
|
* new.
|
2020-02-19 09:21:40 +08:00
|
|
|
*/
|
2020-10-29 20:11:16 +08:00
|
|
|
edata = base_alloc_edata(tsdn, ecs->fallback->base);
|
2020-02-19 09:21:40 +08:00
|
|
|
}
|
2020-10-29 20:11:16 +08:00
|
|
|
return edata;
|
2020-02-19 09:21:40 +08:00
|
|
|
}
|
|
|
|
|
2020-10-29 20:11:16 +08:00
|
|
|
static void
|
2021-07-24 06:29:43 +08:00
|
|
|
edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
|
2020-10-29 20:11:16 +08:00
|
|
|
/*
|
|
|
|
* You could imagine smarter cache management policies (like
|
|
|
|
* only flushing down to some threshold in anticipation of
|
|
|
|
* future get requests). But just flushing everything provides
|
|
|
|
* a good opportunity to defrag too, and lets us share code between the
|
|
|
|
* flush and disable pathways.
|
|
|
|
*/
|
|
|
|
edata_t *edata;
|
|
|
|
size_t nflushed = 0;
|
|
|
|
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
|
|
|
|
while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) {
|
|
|
|
edata_list_inactive_remove(&ecs->list, edata);
|
|
|
|
edata_avail_insert(&ecs->fallback->avail, edata);
|
|
|
|
nflushed++;
|
|
|
|
}
|
2021-07-24 06:29:43 +08:00
|
|
|
atomic_load_add_store_zu(&ecs->fallback->count, nflushed);
|
2020-10-29 20:11:16 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-07-24 06:29:43 +08:00
|
|
|
edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {
|
2020-10-29 20:11:16 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_EDATA_CACHE, 0);
|
|
|
|
|
|
|
|
if (ecs->disabled) {
|
|
|
|
assert(edata_list_inactive_first(&ecs->list) == NULL);
|
2020-02-19 09:21:40 +08:00
|
|
|
edata_cache_put(tsdn, ecs->fallback, edata);
|
2020-10-29 20:11:16 +08:00
|
|
|
return;
|
2020-02-19 09:21:40 +08:00
|
|
|
}
|
2020-10-29 20:11:16 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepend rather than append, to do LIFO ordering in the hopes of some
|
|
|
|
* cache locality.
|
|
|
|
*/
|
|
|
|
edata_list_inactive_prepend(&ecs->list, edata);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-07-24 06:29:43 +08:00
|
|
|
edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
|
|
|
|
edata_cache_fast_flush_all(tsdn, ecs);
|
2020-10-29 20:11:16 +08:00
|
|
|
ecs->disabled = true;
|
2020-02-19 09:21:40 +08:00
|
|
|
}
|