Fix shadowed variable usage.

Verified with EXTRA_CFLAGS=-Wshadow.
This commit is contained in:
Qi Wang 2021-12-22 17:24:58 -08:00 committed by Qi Wang
parent bd70d8fc0f
commit d038160f3b
26 changed files with 119 additions and 120 deletions

View File

@ -20,7 +20,6 @@ percpu_arena_update(tsd_t *tsd, unsigned cpu) {
tcache_t *tcache = tcache_get(tsd); tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) { if (tcache != NULL) {
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd); tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
tcache_t *tcache = tsd_tcachep_get(tsd);
tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow, tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
tcache, newarena); tcache, newarena);
} }

View File

@ -18,7 +18,7 @@ typedef struct {
#endif #endif
} nstime_t; } nstime_t;
static const nstime_t zero = NSTIME_ZERO_INITIALIZER; static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER;
void nstime_init(nstime_t *time, uint64_t ns); void nstime_init(nstime_t *time, uint64_t ns);
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
@ -60,12 +60,12 @@ extern const char *prof_time_res_mode_names[];
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
nstime_init_zero(nstime_t *time) { nstime_init_zero(nstime_t *time) {
nstime_copy(time, &zero); nstime_copy(time, &nstime_zero);
} }
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool
nstime_equals_zero(nstime_t *time) { nstime_equals_zero(nstime_t *time) {
int diff = nstime_compare(time, &zero); int diff = nstime_compare(time, &nstime_zero);
assert(diff >= 0); assert(diff >= 0);
return diff == 0; return diff == 0;
} }

View File

@ -32,7 +32,7 @@ extern bool opt_prof_sys_thread_name;
extern bool opt_prof_stats; extern bool opt_prof_stats;
/* Accessed via prof_active_[gs]et{_unlocked,}(). */ /* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active; extern bool prof_active_state;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ /* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val; extern bool prof_gdump_val;

View File

@ -12,7 +12,7 @@ prof_active_assert() {
* If opt_prof is off, then prof_active must always be off, regardless * If opt_prof is off, then prof_active must always be off, regardless
* of whether prof_active_mtx is in effect or not. * of whether prof_active_mtx is in effect or not.
*/ */
assert(opt_prof || !prof_active); assert(opt_prof || !prof_active_state);
} }
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool
@ -24,7 +24,7 @@ prof_active_get_unlocked(void) {
* prof_active in the fast path, so there are no guarantees regarding * prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes. * how long it will take for all threads to notice state changes.
*/ */
return prof_active; return prof_active_state;
} }
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool

View File

@ -7,9 +7,9 @@ bin_info_t bin_infos[SC_NBINS];
static void static void
bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
bin_info_t bin_infos[SC_NBINS]) { bin_info_t infos[SC_NBINS]) {
for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned i = 0; i < SC_NBINS; i++) {
bin_info_t *bin_info = &bin_infos[i]; bin_info_t *bin_info = &infos[i];
sc_t *sc = &sc_data->sc[i]; sc_t *sc = &sc_data->sc[i];
bin_info->reg_size = ((size_t)1U << sc->lg_base) bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ ((size_t)sc->ndelta << sc->lg_delta); + ((size_t)sc->ndelta << sc->lg_delta);

View File

@ -356,14 +356,14 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
} }
bool bool
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
ckh_keycomp_t *keycomp) { ckh_keycomp_t *keycomp) {
bool ret; bool ret;
size_t mincells, usize; size_t mincells, usize;
unsigned lg_mincells; unsigned lg_mincells;
assert(minitems > 0); assert(minitems > 0);
assert(hash != NULL); assert(ckh_hash != NULL);
assert(keycomp != NULL); assert(keycomp != NULL);
#ifdef CKH_COUNT #ifdef CKH_COUNT
@ -392,7 +392,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
} }
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash; ckh->hash = ckh_hash;
ckh->keycomp = keycomp; ckh->keycomp = keycomp;
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);

View File

@ -3622,9 +3622,9 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
MUTEX_PROF_RESET(arena->tcache_ql_mtx); MUTEX_PROF_RESET(arena->tcache_ql_mtx);
MUTEX_PROF_RESET(arena->base->mtx); MUTEX_PROF_RESET(arena->base->mtx);
for (szind_t i = 0; i < SC_NBINS; i++) { for (szind_t j = 0; j < SC_NBINS; j++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
bin_t *bin = arena_get_bin(arena, i, j); bin_t *bin = arena_get_bin(arena, j, k);
MUTEX_PROF_RESET(bin->lock); MUTEX_PROF_RESET(bin->lock);
} }
} }

View File

@ -748,9 +748,8 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_gdump_add(tsdn, edata); extent_gdump_add(tsdn, edata);
} }
if (zero && !edata_zeroed_get(edata)) { if (zero && !edata_zeroed_get(edata)) {
void *addr = edata_base_get(edata); ehooks_zero(tsdn, ehooks, edata_base_get(edata),
size_t size = edata_size_get(edata); edata_size_get(edata));
ehooks_zero(tsdn, ehooks, addr, size);
} }
return edata; return edata;
label_err: label_err:

View File

@ -1212,12 +1212,12 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise") CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise")
if (strncmp("metadata_thp", k, klen) == 0) { if (strncmp("metadata_thp", k, klen) == 0) {
int i; int m;
bool match = false; bool match = false;
for (i = 0; i < metadata_thp_mode_limit; i++) { for (m = 0; m < metadata_thp_mode_limit; m++) {
if (strncmp(metadata_thp_mode_names[i], if (strncmp(metadata_thp_mode_names[m],
v, vlen) == 0) { v, vlen) == 0) {
opt_metadata_thp = i; opt_metadata_thp = m;
match = true; match = true;
break; break;
} }
@ -1230,18 +1230,18 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
} }
CONF_HANDLE_BOOL(opt_retain, "retain") CONF_HANDLE_BOOL(opt_retain, "retain")
if (strncmp("dss", k, klen) == 0) { if (strncmp("dss", k, klen) == 0) {
int i; int m;
bool match = false; bool match = false;
for (i = 0; i < dss_prec_limit; i++) { for (m = 0; m < dss_prec_limit; m++) {
if (strncmp(dss_prec_names[i], v, vlen) if (strncmp(dss_prec_names[m], v, vlen)
== 0) { == 0) {
if (extent_dss_prec_set(i)) { if (extent_dss_prec_set(m)) {
CONF_ERROR( CONF_ERROR(
"Error setting dss", "Error setting dss",
k, klen, v, vlen); k, klen, v, vlen);
} else { } else {
opt_dss = opt_dss =
dss_prec_names[i]; dss_prec_names[m];
match = true; match = true;
break; break;
} }
@ -1428,16 +1428,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
if (strncmp("percpu_arena", k, klen) == 0) { if (strncmp("percpu_arena", k, klen) == 0) {
bool match = false; bool match = false;
for (int i = percpu_arena_mode_names_base; i < for (int m = percpu_arena_mode_names_base; m <
percpu_arena_mode_names_limit; i++) { percpu_arena_mode_names_limit; m++) {
if (strncmp(percpu_arena_mode_names[i], if (strncmp(percpu_arena_mode_names[m],
v, vlen) == 0) { v, vlen) == 0) {
if (!have_percpu_arena) { if (!have_percpu_arena) {
CONF_ERROR( CONF_ERROR(
"No getcpu support", "No getcpu support",
k, klen, v, vlen); k, klen, v, vlen);
} }
opt_percpu_arena = i; opt_percpu_arena = m;
match = true; match = true;
break; break;
} }
@ -1622,15 +1622,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
} }
if (CONF_MATCH("thp")) { if (CONF_MATCH("thp")) {
bool match = false; bool match = false;
for (int i = 0; i < thp_mode_names_limit; i++) { for (int m = 0; m < thp_mode_names_limit; m++) {
if (strncmp(thp_mode_names[i],v, vlen) if (strncmp(thp_mode_names[m],v, vlen)
== 0) { == 0) {
if (!have_madvise_huge && !have_memcntl) { if (!have_madvise_huge && !have_memcntl) {
CONF_ERROR( CONF_ERROR(
"No THP support", "No THP support",
k, klen, v, vlen); k, klen, v, vlen);
} }
opt_thp = i; opt_thp = m;
match = true; match = true;
break; break;
} }

View File

@ -31,8 +31,9 @@ pa_central_init(pa_central_t *central, base_t *base, bool hpa,
bool bool
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central, pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats, emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold, malloc_mutex_t *stats_mtx, nstime_t *cur_time,
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms) {
/* This will change eventually, but for now it should hold. */ /* This will change eventually, but for now it should hold. */
assert(base_ind_get(base) == ind); assert(base_ind_get(base) == ind);
if (edata_cache_init(&shard->edata_cache, base)) { if (edata_cache_init(&shard->edata_cache, base)) {
@ -40,7 +41,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
} }
if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache, if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache,
cur_time, oversize_threshold, dirty_decay_ms, muzzy_decay_ms, cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
&stats->pac_stats, stats_mtx)) { &stats->pac_stats, stats_mtx)) {
return true; return true;
} }

View File

@ -36,9 +36,9 @@ pac_decay_data_get(pac_t *pac, extent_state_t state,
bool bool
pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold, edata_cache_t *edata_cache, nstime_t *cur_time,
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
malloc_mutex_t *stats_mtx) { ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) {
unsigned ind = base_ind_get(base); unsigned ind = base_ind_get(base);
/* /*
* Delay coalescing for dirty extents despite the disruptive effect on * Delay coalescing for dirty extents despite the disruptive effect on
@ -73,7 +73,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
return true; return true;
} }
atomic_store_zu(&pac->oversize_threshold, oversize_threshold, atomic_store_zu(&pac->oversize_threshold, pac_oversize_threshold,
ATOMIC_RELAXED); ATOMIC_RELAXED);
if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) { if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
return true; return true;

View File

@ -43,7 +43,7 @@ static counter_accum_t prof_idump_accumulated;
* Initialized as opt_prof_active, and accessed via * Initialized as opt_prof_active, and accessed via
* prof_active_[gs]et{_unlocked,}(). * prof_active_[gs]et{_unlocked,}().
*/ */
bool prof_active; bool prof_active_state;
static malloc_mutex_t prof_active_mtx; static malloc_mutex_t prof_active_mtx;
/* /*
@ -416,7 +416,7 @@ prof_active_get(tsdn_t *tsdn) {
prof_active_assert(); prof_active_assert();
malloc_mutex_lock(tsdn, &prof_active_mtx); malloc_mutex_lock(tsdn, &prof_active_mtx);
prof_active_current = prof_active; prof_active_current = prof_active_state;
malloc_mutex_unlock(tsdn, &prof_active_mtx); malloc_mutex_unlock(tsdn, &prof_active_mtx);
return prof_active_current; return prof_active_current;
} }
@ -427,8 +427,8 @@ prof_active_set(tsdn_t *tsdn, bool active) {
prof_active_assert(); prof_active_assert();
malloc_mutex_lock(tsdn, &prof_active_mtx); malloc_mutex_lock(tsdn, &prof_active_mtx);
prof_active_old = prof_active; prof_active_old = prof_active_state;
prof_active = active; prof_active_state = active;
malloc_mutex_unlock(tsdn, &prof_active_mtx); malloc_mutex_unlock(tsdn, &prof_active_mtx);
prof_active_assert(); prof_active_assert();
return prof_active_old; return prof_active_old;
@ -629,7 +629,7 @@ prof_boot2(tsd_t *tsd, base_t *base) {
if (opt_prof) { if (opt_prof) {
lg_prof_sample = opt_lg_prof_sample; lg_prof_sample = opt_lg_prof_sample;
prof_unbias_map_init(); prof_unbias_map_init();
prof_active = opt_prof_active; prof_active_state = opt_prof_active;
prof_gdump_val = opt_prof_gdump; prof_gdump_val = opt_prof_gdump;
prof_thread_active_init = opt_prof_thread_active_init; prof_thread_active_init = opt_prof_thread_active_init;

View File

@ -397,7 +397,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
/* Used in unit tests. */ /* Used in unit tests. */
static prof_tdata_t * static prof_tdata_t *
prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
void *arg) { void *arg) {
size_t *tdata_count = (size_t *)arg; size_t *tdata_count = (size_t *)arg;
@ -895,7 +895,7 @@ struct prof_tdata_merge_iter_arg_s {
}; };
static prof_tdata_t * static prof_tdata_t *
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
void *opaque) { void *opaque) {
prof_tdata_merge_iter_arg_t *arg = prof_tdata_merge_iter_arg_t *arg =
(prof_tdata_merge_iter_arg_t *)opaque; (prof_tdata_merge_iter_arg_t *)opaque;
@ -939,7 +939,7 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
} }
static prof_tdata_t * static prof_tdata_t *
prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
void *opaque) { void *opaque) {
if (!tdata->dumping) { if (!tdata->dumping) {
return NULL; return NULL;
@ -1278,7 +1278,7 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
} }
static prof_tdata_t * static prof_tdata_t *
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, prof_tdata_reset_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
void *arg) { void *arg) {
tsdn_t *tsdn = (tsdn_t *)arg; tsdn_t *tsdn = (tsdn_t *)arg;

View File

@ -561,18 +561,18 @@ prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) {
cassert(config_prof); cassert(config_prof);
assert(tsd_reentrancy_level_get(tsd) == 0); assert(tsd_reentrancy_level_get(tsd) == 0);
const char *prof_prefix = prof_prefix_get(tsd_tsdn(tsd)); const char *prefix = prof_prefix_get(tsd_tsdn(tsd));
if (vseq != VSEQ_INVALID) { if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c%"FMTu64".heap", prof_prefix, "%s.%d.%"FMTu64".%c%"FMTu64".heap", prefix, prof_getpid(),
prof_getpid(), prof_dump_seq, v, vseq); prof_dump_seq, v, vseq);
} else { } else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */ /* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c.heap", prof_prefix, "%s.%d.%"FMTu64".%c.heap", prefix, prof_getpid(),
prof_getpid(), prof_dump_seq, v); prof_dump_seq, v);
} }
prof_dump_seq++; prof_dump_seq++;
} }

View File

@ -1606,15 +1606,15 @@ stats_general_print(emitter_t *emitter) {
"Maximum thread-cached size class", emitter_type_size, &sv); "Maximum thread-cached size class", emitter_type_size, &sv);
} }
unsigned nbins; unsigned arenas_nbins;
CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nbins", &arenas_nbins, unsigned);
emitter_kv(emitter, "nbins", "Number of bin size classes", emitter_kv(emitter, "nbins", "Number of bin size classes",
emitter_type_unsigned, &nbins); emitter_type_unsigned, &arenas_nbins);
unsigned nhbins; unsigned arenas_nhbins;
CTL_GET("arenas.nhbins", &nhbins, unsigned); CTL_GET("arenas.nhbins", &arenas_nhbins, unsigned);
emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes", emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
emitter_type_unsigned, &nhbins); emitter_type_unsigned, &arenas_nhbins);
/* /*
* We do enough mallctls in a loop that we actually want to omit them * We do enough mallctls in a loop that we actually want to omit them
@ -1624,7 +1624,7 @@ stats_general_print(emitter_t *emitter) {
emitter_json_array_kv_begin(emitter, "bin"); emitter_json_array_kv_begin(emitter, "bin");
size_t arenas_bin_mib[CTL_MAX_DEPTH]; size_t arenas_bin_mib[CTL_MAX_DEPTH];
CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin"); CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
for (unsigned i = 0; i < nbins; i++) { for (unsigned i = 0; i < arenas_nbins; i++) {
arenas_bin_mib[2] = i; arenas_bin_mib[2] = i;
emitter_json_object_begin(emitter); emitter_json_object_begin(emitter);

View File

@ -45,9 +45,9 @@ do_allocs(size_t sz, size_t cnt, bool do_frees) {
int int
main(void) { main(void) {
size_t lg_prof_sample = 19; size_t lg_prof_sample_local = 19;
int err = mallctl("prof.reset", NULL, NULL, (void *)&lg_prof_sample, int err = mallctl("prof.reset", NULL, NULL,
sizeof(lg_prof_sample)); (void *)&lg_prof_sample_local, sizeof(lg_prof_sample_local));
assert(err == 0); assert(err == 0);
prof_backtrace_hook_set(mock_backtrace); prof_backtrace_hook_set(mock_backtrace);

View File

@ -87,8 +87,8 @@ test_fail(const char *format, ...) {
} }
static const char * static const char *
test_status_string(test_status_t test_status) { test_status_string(test_status_t current_status) {
switch (test_status) { switch (current_status) {
case test_status_pass: return "pass"; case test_status_pass: return "pass";
case test_status_skip: return "skip"; case test_status_skip: return "skip";
case test_status_fail: return "fail"; case test_status_fail: return "fail";

View File

@ -258,12 +258,12 @@ TEST_BEGIN(test_arena_destroy_hooks_default) {
/* Try arena.create with custom hooks. */ /* Try arena.create with custom hooks. */
size_t sz = sizeof(extent_hooks_t *); size_t sz = sizeof(extent_hooks_t *);
extent_hooks_t *default_hooks; extent_hooks_t *a0_default_hooks;
expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&a0_default_hooks,
&sz, NULL, 0), 0, "Unexpected mallctlnametomib() failure"); &sz, NULL, 0), 0, "Unexpected mallctlnametomib() failure");
/* Default impl; but wrapped as "customized". */ /* Default impl; but wrapped as "customized". */
extent_hooks_t new_hooks = *default_hooks; extent_hooks_t new_hooks = *a0_default_hooks;
extent_hooks_t *hook = &new_hooks; extent_hooks_t *hook = &new_hooks;
sz = sizeof(unsigned); sz = sizeof(unsigned);
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,

View File

@ -45,7 +45,7 @@
*/ \ */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
success = false; \ success = false; \
for (int i = 0; i < 10 && !success; i++) { \ for (int retry = 0; retry < 10 && !success; retry++) { \
expected = val2; \ expected = val2; \
success = atomic_compare_exchange_weak_##ta(&atom, \ success = atomic_compare_exchange_weak_##ta(&atom, \
&expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ &expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \

View File

@ -1,7 +1,7 @@
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
#define BATCH_MAX ((1U << 16) + 1024) #define BATCH_MAX ((1U << 16) + 1024)
static void *ptrs[BATCH_MAX]; static void *global_ptrs[BATCH_MAX];
#define PAGE_ALIGNED(ptr) (((uintptr_t)ptr & PAGE_MASK) == 0) #define PAGE_ALIGNED(ptr) (((uintptr_t)ptr & PAGE_MASK) == 0)
@ -122,13 +122,14 @@ test_wrapper(size_t size, size_t alignment, bool zero, unsigned arena_flag) {
} }
size_t batch = base + (size_t)j; size_t batch = base + (size_t)j;
assert(batch < BATCH_MAX); assert(batch < BATCH_MAX);
size_t filled = batch_alloc_wrapper(ptrs, batch, size, size_t filled = batch_alloc_wrapper(global_ptrs, batch,
flags); size, flags);
assert_zu_eq(filled, batch, ""); assert_zu_eq(filled, batch, "");
verify_batch_basic(tsd, ptrs, batch, usize, zero); verify_batch_basic(tsd, global_ptrs, batch, usize,
verify_batch_locality(tsd, ptrs, batch, usize, arena, zero);
nregs); verify_batch_locality(tsd, global_ptrs, batch, usize,
release_batch(ptrs, batch, usize); arena, nregs);
release_batch(global_ptrs, batch, usize);
} }
} }
@ -163,16 +164,16 @@ TEST_BEGIN(test_batch_alloc_large) {
size_t size = SC_LARGE_MINCLASS; size_t size = SC_LARGE_MINCLASS;
for (size_t batch = 0; batch < 4; ++batch) { for (size_t batch = 0; batch < 4; ++batch) {
assert(batch < BATCH_MAX); assert(batch < BATCH_MAX);
size_t filled = batch_alloc(ptrs, batch, size, 0); size_t filled = batch_alloc(global_ptrs, batch, size, 0);
assert_zu_eq(filled, batch, ""); assert_zu_eq(filled, batch, "");
release_batch(ptrs, batch, size); release_batch(global_ptrs, batch, size);
} }
size = tcache_maxclass + 1; size = tcache_maxclass + 1;
for (size_t batch = 0; batch < 4; ++batch) { for (size_t batch = 0; batch < 4; ++batch) {
assert(batch < BATCH_MAX); assert(batch < BATCH_MAX);
size_t filled = batch_alloc(ptrs, batch, size, 0); size_t filled = batch_alloc(global_ptrs, batch, size, 0);
assert_zu_eq(filled, batch, ""); assert_zu_eq(filled, batch, "");
release_batch(ptrs, batch, size); release_batch(global_ptrs, batch, size);
} }
} }
TEST_END TEST_END

View File

@ -69,10 +69,10 @@ test_data_t *init_test_data(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
&hpa_hooks_default); &hpa_hooks_default);
assert_false(err, ""); assert_false(err, "");
const size_t oversize_threshold = 8 * 1024 * 1024; const size_t pa_oversize_threshold = 8 * 1024 * 1024;
err = pa_shard_init(TSDN_NULL, &test_data->shard, &test_data->central, err = pa_shard_init(TSDN_NULL, &test_data->shard, &test_data->central,
&test_data->emap, test_data->base, /* ind */ 1, &test_data->stats, &test_data->emap, test_data->base, /* ind */ 1, &test_data->stats,
&test_data->stats_mtx, &time, oversize_threshold, dirty_decay_ms, &test_data->stats_mtx, &time, pa_oversize_threshold, dirty_decay_ms,
muzzy_decay_ms); muzzy_decay_ms);
assert_false(err, ""); assert_false(err, "");

View File

@ -26,14 +26,14 @@ TEST_BEGIN(test_idump) {
bool active; bool active;
void *p; void *p;
const char *prefix = TEST_PREFIX; const char *test_prefix = TEST_PREFIX;
test_skip_if(!config_prof); test_skip_if(!config_prof);
active = true; active = true;
expect_d_eq(mallctl("prof.prefix", NULL, NULL, (void *)&prefix, expect_d_eq(mallctl("prof.prefix", NULL, NULL, (void *)&test_prefix,
sizeof(prefix)), 0, sizeof(test_prefix)), 0,
"Unexpected mallctl failure while overwriting dump prefix"); "Unexpected mallctl failure while overwriting dump prefix");
expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,

View File

@ -15,7 +15,7 @@ confirm_prof_setup() {
"opt_prof_recent_alloc_max not set correctly"); "opt_prof_recent_alloc_max not set correctly");
/* Dynamics */ /* Dynamics */
assert_true(prof_active, "prof_active not on"); assert_true(prof_active_state, "prof_active not on");
assert_zd_eq(prof_recent_alloc_max_ctl_read(), OPT_ALLOC_MAX, assert_zd_eq(prof_recent_alloc_max_ctl_read(), OPT_ALLOC_MAX,
"prof_recent_alloc_max not set correctly"); "prof_recent_alloc_max not set correctly");
} }

View File

@ -21,26 +21,25 @@ set_prof_active(bool active) {
static size_t static size_t
get_lg_prof_sample(void) { get_lg_prof_sample(void) {
size_t lg_prof_sample; size_t ret;
size_t sz = sizeof(size_t); size_t sz = sizeof(size_t);
expect_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz, expect_d_eq(mallctl("prof.lg_sample", (void *)&ret, &sz, NULL, 0), 0,
NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate"); "Unexpected mallctl failure while reading profiling sample rate");
return lg_prof_sample; return ret;
} }
static void static void
do_prof_reset(size_t lg_prof_sample) { do_prof_reset(size_t lg_prof_sample_input) {
expect_d_eq(mallctl("prof.reset", NULL, NULL, expect_d_eq(mallctl("prof.reset", NULL, NULL,
(void *)&lg_prof_sample, sizeof(size_t)), 0, (void *)&lg_prof_sample_input, sizeof(size_t)), 0,
"Unexpected mallctl failure while resetting profile data"); "Unexpected mallctl failure while resetting profile data");
expect_zu_eq(lg_prof_sample, get_lg_prof_sample(), expect_zu_eq(lg_prof_sample_input, get_lg_prof_sample(),
"Expected profile sample rate change"); "Expected profile sample rate change");
} }
TEST_BEGIN(test_prof_reset_basic) { TEST_BEGIN(test_prof_reset_basic) {
size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; size_t lg_prof_sample_orig, lg_prof_sample_cur, lg_prof_sample_next;
size_t sz; size_t sz;
unsigned i; unsigned i;
@ -52,8 +51,8 @@ TEST_BEGIN(test_prof_reset_basic) {
"Unexpected mallctl failure while reading profiling sample rate"); "Unexpected mallctl failure while reading profiling sample rate");
expect_zu_eq(lg_prof_sample_orig, 0, expect_zu_eq(lg_prof_sample_orig, 0,
"Unexpected profiling sample rate"); "Unexpected profiling sample rate");
lg_prof_sample = get_lg_prof_sample(); lg_prof_sample_cur = get_lg_prof_sample();
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample, expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
"Unexpected disagreement between \"opt.lg_prof_sample\" and " "Unexpected disagreement between \"opt.lg_prof_sample\" and "
"\"prof.lg_sample\""); "\"prof.lg_sample\"");
@ -61,8 +60,8 @@ TEST_BEGIN(test_prof_reset_basic) {
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure while resetting profile data"); "Unexpected mallctl failure while resetting profile data");
lg_prof_sample = get_lg_prof_sample(); lg_prof_sample_cur = get_lg_prof_sample();
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample, expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
"Unexpected profile sample rate change"); "Unexpected profile sample rate change");
} }
@ -70,15 +69,15 @@ TEST_BEGIN(test_prof_reset_basic) {
lg_prof_sample_next = 1; lg_prof_sample_next = 1;
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
do_prof_reset(lg_prof_sample_next); do_prof_reset(lg_prof_sample_next);
lg_prof_sample = get_lg_prof_sample(); lg_prof_sample_cur = get_lg_prof_sample();
expect_zu_eq(lg_prof_sample, lg_prof_sample_next, expect_zu_eq(lg_prof_sample_cur, lg_prof_sample_next,
"Expected profile sample rate change"); "Expected profile sample rate change");
lg_prof_sample_next = lg_prof_sample_orig; lg_prof_sample_next = lg_prof_sample_orig;
} }
/* Make sure the test code restored prof.lg_sample. */ /* Make sure the test code restored prof.lg_sample. */
lg_prof_sample = get_lg_prof_sample(); lg_prof_sample_cur = get_lg_prof_sample();
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample, expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
"Unexpected disagreement between \"opt.lg_prof_sample\" and " "Unexpected disagreement between \"opt.lg_prof_sample\" and "
"\"prof.lg_sample\""); "\"prof.lg_sample\"");
} }

View File

@ -964,7 +964,7 @@ do_update_search_test(int nnodes, int ntrees, int nremovals,
tree_insert(&tree, &nodes[j]); tree_insert(&tree, &nodes[j]);
} }
} }
for (int i = 0; i < nupdates; i++) { for (int j = 0; j < nupdates; j++) {
uint32_t ind = gen_rand32_range(sfmt, nnodes); uint32_t ind = gen_rand32_range(sfmt, nnodes);
nodes[ind].specialness = 1 - nodes[ind].specialness; nodes[ind].specialness = 1 - nodes[ind].specialness;
tree_update_summaries(&tree, &nodes[ind]); tree_update_summaries(&tree, &nodes[ind]);

View File

@ -13,43 +13,43 @@ static atomic_u_t nfinished;
static unsigned static unsigned
do_arena_create(extent_hooks_t *h) { do_arena_create(extent_hooks_t *h) {
unsigned arena_ind; unsigned new_arena_ind;
size_t sz = sizeof(unsigned); size_t ind_sz = sizeof(unsigned);
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, expect_d_eq(mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz,
(void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
"Unexpected mallctl() failure"); "Unexpected mallctl() failure");
return arena_ind; return new_arena_ind;
} }
static void static void
do_arena_destroy(unsigned arena_ind) { do_arena_destroy(unsigned ind) {
size_t mib[3]; size_t mib[3];
size_t miblen; size_t miblen;
miblen = sizeof(mib)/sizeof(size_t); miblen = sizeof(mib)/sizeof(size_t);
expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure"); "Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind; mib[1] = (size_t)ind;
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure"); "Unexpected mallctlbymib() failure");
} }
static void static void
do_refresh(void) { do_refresh(void) {
uint64_t epoch = 1; uint64_t refresh_epoch = 1;
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&refresh_epoch,
sizeof(epoch)), 0, "Unexpected mallctl() failure"); sizeof(refresh_epoch)), 0, "Unexpected mallctl() failure");
} }
static size_t static size_t
do_get_size_impl(const char *cmd, unsigned arena_ind) { do_get_size_impl(const char *cmd, unsigned ind) {
size_t mib[4]; size_t mib[4];
size_t miblen = sizeof(mib) / sizeof(size_t); size_t miblen = sizeof(mib) / sizeof(size_t);
size_t z = sizeof(size_t); size_t z = sizeof(size_t);
expect_d_eq(mallctlnametomib(cmd, mib, &miblen), expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = arena_ind; mib[2] = ind;
size_t size; size_t size;
expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0), expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd); 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
@ -58,13 +58,13 @@ do_get_size_impl(const char *cmd, unsigned arena_ind) {
} }
static size_t static size_t
do_get_active(unsigned arena_ind) { do_get_active(unsigned ind) {
return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE; return do_get_size_impl("stats.arenas.0.pactive", ind) * PAGE;
} }
static size_t static size_t
do_get_mapped(unsigned arena_ind) { do_get_mapped(unsigned ind) {
return do_get_size_impl("stats.arenas.0.mapped", arena_ind); return do_get_size_impl("stats.arenas.0.mapped", ind);
} }
static void * static void *