Implement opt.cache_oblivious.

Keep config.cache_oblivious for now to remain backward-compatible.
This commit is contained in:
Qi Wang
2021-02-09 22:24:35 -08:00
committed by Qi Wang
parent 8c5e5f50a2
commit a11be50332
12 changed files with 53 additions and 27 deletions

View File

@@ -90,6 +90,7 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_abort_conf)
CTL_PROTO(opt_cache_oblivious)
CTL_PROTO(opt_trust_madvise)
CTL_PROTO(opt_confirm_conf)
CTL_PROTO(opt_hpa)
@@ -395,6 +396,7 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
{NAME("abort_conf"), CTL(opt_abort_conf)},
{NAME("cache_oblivious"), CTL(opt_cache_oblivious)},
{NAME("trust_madvise"), CTL(opt_trust_madvise)},
{NAME("confirm_conf"), CTL(opt_confirm_conf)},
{NAME("hpa"), CTL(opt_hpa)},
@@ -2095,6 +2097,7 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool)
CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool)
CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)

View File

@@ -102,6 +102,14 @@ bool opt_trust_madvise =
#endif
;
bool opt_cache_oblivious =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
true
#else
false
#endif
;
zero_realloc_action_t opt_zero_realloc_action =
zero_realloc_action_strict;
@@ -1697,7 +1705,7 @@ malloc_init_hard_a0_locked() {
prof_boot0();
}
malloc_conf_init(&sc_data, bin_shard_sizes);
sz_boot(&sc_data);
sz_boot(&sc_data, opt_cache_oblivious);
bin_info_boot(&sc_data, bin_shard_sizes);
if (opt_stats_print) {
@@ -2790,12 +2798,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
* usize can be trusted to determine szind and slab.
*/
alloc_ctx.szind = sz_size2index(usize);
if (config_cache_oblivious) {
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
} else {
/* Non page aligned must be slab allocated. */
alloc_ctx.slab = true;
}
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
} else if (opt_prof) {
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr, &alloc_ctx);

View File

@@ -95,7 +95,8 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
}
if (zero) {
if (config_cache_oblivious) {
if (opt_cache_oblivious) {
assert(sz_large_pad == PAGE);
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.

View File

@@ -1458,6 +1458,7 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("abort")
OPT_WRITE_BOOL("abort_conf")
OPT_WRITE_BOOL("cache_oblivious")
OPT_WRITE_BOOL("confirm_conf")
OPT_WRITE_BOOL("retain")
OPT_WRITE_CHAR_P("dss")

View File

@@ -1,9 +1,10 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/sz.h"
JEMALLOC_ALIGNED(CACHELINE)
size_t sz_pind2sz_tab[SC_NPSIZES+1];
size_t sz_large_pad;
size_t
sz_psz_quantize_floor(size_t size) {
@@ -105,7 +106,8 @@ sz_boot_size2index_tab(const sc_data_t *sc_data) {
}
void
sz_boot(const sc_data_t *sc_data) {
sz_boot(const sc_data_t *sc_data, bool cache_oblivious) {
sz_large_pad = cache_oblivious ? PAGE : 0;
sz_boot_pind2sz_tab(sc_data);
sz_boot_index2size_tab(sc_data);
sz_boot_size2index_tab(sc_data);