Fix shadowed variable usage.
Verified with EXTRA_CFLAGS=-Wshadow.
This commit is contained in:
parent
bd70d8fc0f
commit
d038160f3b
@ -20,7 +20,6 @@ percpu_arena_update(tsd_t *tsd, unsigned cpu) {
|
||||
tcache_t *tcache = tcache_get(tsd);
|
||||
if (tcache != NULL) {
|
||||
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
|
||||
tcache_t *tcache = tsd_tcachep_get(tsd);
|
||||
tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
|
||||
tcache, newarena);
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ typedef struct {
|
||||
#endif
|
||||
} nstime_t;
|
||||
|
||||
static const nstime_t zero = NSTIME_ZERO_INITIALIZER;
|
||||
static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER;
|
||||
|
||||
void nstime_init(nstime_t *time, uint64_t ns);
|
||||
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
|
||||
@ -60,12 +60,12 @@ extern const char *prof_time_res_mode_names[];
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
nstime_init_zero(nstime_t *time) {
|
||||
nstime_copy(time, &zero);
|
||||
nstime_copy(time, &nstime_zero);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
nstime_equals_zero(nstime_t *time) {
|
||||
int diff = nstime_compare(time, &zero);
|
||||
int diff = nstime_compare(time, &nstime_zero);
|
||||
assert(diff >= 0);
|
||||
return diff == 0;
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ extern bool opt_prof_sys_thread_name;
|
||||
extern bool opt_prof_stats;
|
||||
|
||||
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
|
||||
extern bool prof_active;
|
||||
extern bool prof_active_state;
|
||||
|
||||
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
|
||||
extern bool prof_gdump_val;
|
||||
|
@ -12,7 +12,7 @@ prof_active_assert() {
|
||||
* If opt_prof is off, then prof_active must always be off, regardless
|
||||
* of whether prof_active_mtx is in effect or not.
|
||||
*/
|
||||
assert(opt_prof || !prof_active);
|
||||
assert(opt_prof || !prof_active_state);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
@ -24,7 +24,7 @@ prof_active_get_unlocked(void) {
|
||||
* prof_active in the fast path, so there are no guarantees regarding
|
||||
* how long it will take for all threads to notice state changes.
|
||||
*/
|
||||
return prof_active;
|
||||
return prof_active_state;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
|
@ -7,9 +7,9 @@ bin_info_t bin_infos[SC_NBINS];
|
||||
|
||||
static void
|
||||
bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
bin_info_t bin_infos[SC_NBINS]) {
|
||||
bin_info_t infos[SC_NBINS]) {
|
||||
for (unsigned i = 0; i < SC_NBINS; i++) {
|
||||
bin_info_t *bin_info = &bin_infos[i];
|
||||
bin_info_t *bin_info = &infos[i];
|
||||
sc_t *sc = &sc_data->sc[i];
|
||||
bin_info->reg_size = ((size_t)1U << sc->lg_base)
|
||||
+ ((size_t)sc->ndelta << sc->lg_delta);
|
||||
|
@ -356,14 +356,14 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
|
||||
ckh_keycomp_t *keycomp) {
|
||||
bool ret;
|
||||
size_t mincells, usize;
|
||||
unsigned lg_mincells;
|
||||
|
||||
assert(minitems > 0);
|
||||
assert(hash != NULL);
|
||||
assert(ckh_hash != NULL);
|
||||
assert(keycomp != NULL);
|
||||
|
||||
#ifdef CKH_COUNT
|
||||
@ -392,7 +392,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
}
|
||||
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
|
||||
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
|
||||
ckh->hash = hash;
|
||||
ckh->hash = ckh_hash;
|
||||
ckh->keycomp = keycomp;
|
||||
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
||||
|
@ -3622,9 +3622,9 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
|
||||
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
|
||||
MUTEX_PROF_RESET(arena->base->mtx);
|
||||
|
||||
for (szind_t i = 0; i < SC_NBINS; i++) {
|
||||
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
|
||||
bin_t *bin = arena_get_bin(arena, i, j);
|
||||
for (szind_t j = 0; j < SC_NBINS; j++) {
|
||||
for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
|
||||
bin_t *bin = arena_get_bin(arena, j, k);
|
||||
MUTEX_PROF_RESET(bin->lock);
|
||||
}
|
||||
}
|
||||
|
@ -748,9 +748,8 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
extent_gdump_add(tsdn, edata);
|
||||
}
|
||||
if (zero && !edata_zeroed_get(edata)) {
|
||||
void *addr = edata_base_get(edata);
|
||||
size_t size = edata_size_get(edata);
|
||||
ehooks_zero(tsdn, ehooks, addr, size);
|
||||
ehooks_zero(tsdn, ehooks, edata_base_get(edata),
|
||||
edata_size_get(edata));
|
||||
}
|
||||
return edata;
|
||||
label_err:
|
||||
|
@ -1212,12 +1212,12 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
|
||||
CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise")
|
||||
if (strncmp("metadata_thp", k, klen) == 0) {
|
||||
int i;
|
||||
int m;
|
||||
bool match = false;
|
||||
for (i = 0; i < metadata_thp_mode_limit; i++) {
|
||||
if (strncmp(metadata_thp_mode_names[i],
|
||||
for (m = 0; m < metadata_thp_mode_limit; m++) {
|
||||
if (strncmp(metadata_thp_mode_names[m],
|
||||
v, vlen) == 0) {
|
||||
opt_metadata_thp = i;
|
||||
opt_metadata_thp = m;
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
@ -1230,18 +1230,18 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
}
|
||||
CONF_HANDLE_BOOL(opt_retain, "retain")
|
||||
if (strncmp("dss", k, klen) == 0) {
|
||||
int i;
|
||||
int m;
|
||||
bool match = false;
|
||||
for (i = 0; i < dss_prec_limit; i++) {
|
||||
if (strncmp(dss_prec_names[i], v, vlen)
|
||||
for (m = 0; m < dss_prec_limit; m++) {
|
||||
if (strncmp(dss_prec_names[m], v, vlen)
|
||||
== 0) {
|
||||
if (extent_dss_prec_set(i)) {
|
||||
if (extent_dss_prec_set(m)) {
|
||||
CONF_ERROR(
|
||||
"Error setting dss",
|
||||
k, klen, v, vlen);
|
||||
} else {
|
||||
opt_dss =
|
||||
dss_prec_names[i];
|
||||
dss_prec_names[m];
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
@ -1428,16 +1428,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
|
||||
if (strncmp("percpu_arena", k, klen) == 0) {
|
||||
bool match = false;
|
||||
for (int i = percpu_arena_mode_names_base; i <
|
||||
percpu_arena_mode_names_limit; i++) {
|
||||
if (strncmp(percpu_arena_mode_names[i],
|
||||
for (int m = percpu_arena_mode_names_base; m <
|
||||
percpu_arena_mode_names_limit; m++) {
|
||||
if (strncmp(percpu_arena_mode_names[m],
|
||||
v, vlen) == 0) {
|
||||
if (!have_percpu_arena) {
|
||||
CONF_ERROR(
|
||||
"No getcpu support",
|
||||
k, klen, v, vlen);
|
||||
}
|
||||
opt_percpu_arena = i;
|
||||
opt_percpu_arena = m;
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
@ -1622,15 +1622,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
}
|
||||
if (CONF_MATCH("thp")) {
|
||||
bool match = false;
|
||||
for (int i = 0; i < thp_mode_names_limit; i++) {
|
||||
if (strncmp(thp_mode_names[i],v, vlen)
|
||||
for (int m = 0; m < thp_mode_names_limit; m++) {
|
||||
if (strncmp(thp_mode_names[m],v, vlen)
|
||||
== 0) {
|
||||
if (!have_madvise_huge && !have_memcntl) {
|
||||
CONF_ERROR(
|
||||
"No THP support",
|
||||
k, klen, v, vlen);
|
||||
}
|
||||
opt_thp = i;
|
||||
opt_thp = m;
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
|
7
src/pa.c
7
src/pa.c
@ -31,8 +31,9 @@ pa_central_init(pa_central_t *central, base_t *base, bool hpa,
|
||||
bool
|
||||
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
|
||||
emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
|
||||
malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold,
|
||||
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
|
||||
malloc_mutex_t *stats_mtx, nstime_t *cur_time,
|
||||
size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
|
||||
ssize_t muzzy_decay_ms) {
|
||||
/* This will change eventually, but for now it should hold. */
|
||||
assert(base_ind_get(base) == ind);
|
||||
if (edata_cache_init(&shard->edata_cache, base)) {
|
||||
@ -40,7 +41,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
|
||||
}
|
||||
|
||||
if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache,
|
||||
cur_time, oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
|
||||
cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
|
||||
&stats->pac_stats, stats_mtx)) {
|
||||
return true;
|
||||
}
|
||||
|
@ -36,9 +36,9 @@ pac_decay_data_get(pac_t *pac, extent_state_t state,
|
||||
|
||||
bool
|
||||
pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
||||
edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
|
||||
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
|
||||
malloc_mutex_t *stats_mtx) {
|
||||
edata_cache_t *edata_cache, nstime_t *cur_time,
|
||||
size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
|
||||
ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) {
|
||||
unsigned ind = base_ind_get(base);
|
||||
/*
|
||||
* Delay coalescing for dirty extents despite the disruptive effect on
|
||||
@ -73,7 +73,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
||||
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
atomic_store_zu(&pac->oversize_threshold, oversize_threshold,
|
||||
atomic_store_zu(&pac->oversize_threshold, pac_oversize_threshold,
|
||||
ATOMIC_RELAXED);
|
||||
if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
|
||||
return true;
|
||||
|
10
src/prof.c
10
src/prof.c
@ -43,7 +43,7 @@ static counter_accum_t prof_idump_accumulated;
|
||||
* Initialized as opt_prof_active, and accessed via
|
||||
* prof_active_[gs]et{_unlocked,}().
|
||||
*/
|
||||
bool prof_active;
|
||||
bool prof_active_state;
|
||||
static malloc_mutex_t prof_active_mtx;
|
||||
|
||||
/*
|
||||
@ -416,7 +416,7 @@ prof_active_get(tsdn_t *tsdn) {
|
||||
|
||||
prof_active_assert();
|
||||
malloc_mutex_lock(tsdn, &prof_active_mtx);
|
||||
prof_active_current = prof_active;
|
||||
prof_active_current = prof_active_state;
|
||||
malloc_mutex_unlock(tsdn, &prof_active_mtx);
|
||||
return prof_active_current;
|
||||
}
|
||||
@ -427,8 +427,8 @@ prof_active_set(tsdn_t *tsdn, bool active) {
|
||||
|
||||
prof_active_assert();
|
||||
malloc_mutex_lock(tsdn, &prof_active_mtx);
|
||||
prof_active_old = prof_active;
|
||||
prof_active = active;
|
||||
prof_active_old = prof_active_state;
|
||||
prof_active_state = active;
|
||||
malloc_mutex_unlock(tsdn, &prof_active_mtx);
|
||||
prof_active_assert();
|
||||
return prof_active_old;
|
||||
@ -629,7 +629,7 @@ prof_boot2(tsd_t *tsd, base_t *base) {
|
||||
if (opt_prof) {
|
||||
lg_prof_sample = opt_lg_prof_sample;
|
||||
prof_unbias_map_init();
|
||||
prof_active = opt_prof_active;
|
||||
prof_active_state = opt_prof_active;
|
||||
prof_gdump_val = opt_prof_gdump;
|
||||
prof_thread_active_init = opt_prof_thread_active_init;
|
||||
|
||||
|
@ -397,7 +397,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
|
||||
|
||||
/* Used in unit tests. */
|
||||
static prof_tdata_t *
|
||||
prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
|
||||
prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
|
||||
void *arg) {
|
||||
size_t *tdata_count = (size_t *)arg;
|
||||
|
||||
@ -895,7 +895,7 @@ struct prof_tdata_merge_iter_arg_s {
|
||||
};
|
||||
|
||||
static prof_tdata_t *
|
||||
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
|
||||
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
|
||||
void *opaque) {
|
||||
prof_tdata_merge_iter_arg_t *arg =
|
||||
(prof_tdata_merge_iter_arg_t *)opaque;
|
||||
@ -939,7 +939,7 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
|
||||
}
|
||||
|
||||
static prof_tdata_t *
|
||||
prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
|
||||
prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
|
||||
void *opaque) {
|
||||
if (!tdata->dumping) {
|
||||
return NULL;
|
||||
@ -1278,7 +1278,7 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
|
||||
}
|
||||
|
||||
static prof_tdata_t *
|
||||
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
|
||||
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
|
||||
void *arg) {
|
||||
tsdn_t *tsdn = (tsdn_t *)arg;
|
||||
|
||||
|
@ -561,18 +561,18 @@ prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) {
|
||||
cassert(config_prof);
|
||||
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
const char *prof_prefix = prof_prefix_get(tsd_tsdn(tsd));
|
||||
const char *prefix = prof_prefix_get(tsd_tsdn(tsd));
|
||||
|
||||
if (vseq != VSEQ_INVALID) {
|
||||
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
|
||||
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
|
||||
"%s.%d.%"FMTu64".%c%"FMTu64".heap", prof_prefix,
|
||||
prof_getpid(), prof_dump_seq, v, vseq);
|
||||
"%s.%d.%"FMTu64".%c%"FMTu64".heap", prefix, prof_getpid(),
|
||||
prof_dump_seq, v, vseq);
|
||||
} else {
|
||||
/* "<prefix>.<pid>.<seq>.<v>.heap" */
|
||||
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
|
||||
"%s.%d.%"FMTu64".%c.heap", prof_prefix,
|
||||
prof_getpid(), prof_dump_seq, v);
|
||||
"%s.%d.%"FMTu64".%c.heap", prefix, prof_getpid(),
|
||||
prof_dump_seq, v);
|
||||
}
|
||||
prof_dump_seq++;
|
||||
}
|
||||
|
14
src/stats.c
14
src/stats.c
@ -1606,15 +1606,15 @@ stats_general_print(emitter_t *emitter) {
|
||||
"Maximum thread-cached size class", emitter_type_size, &sv);
|
||||
}
|
||||
|
||||
unsigned nbins;
|
||||
CTL_GET("arenas.nbins", &nbins, unsigned);
|
||||
unsigned arenas_nbins;
|
||||
CTL_GET("arenas.nbins", &arenas_nbins, unsigned);
|
||||
emitter_kv(emitter, "nbins", "Number of bin size classes",
|
||||
emitter_type_unsigned, &nbins);
|
||||
emitter_type_unsigned, &arenas_nbins);
|
||||
|
||||
unsigned nhbins;
|
||||
CTL_GET("arenas.nhbins", &nhbins, unsigned);
|
||||
unsigned arenas_nhbins;
|
||||
CTL_GET("arenas.nhbins", &arenas_nhbins, unsigned);
|
||||
emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
|
||||
emitter_type_unsigned, &nhbins);
|
||||
emitter_type_unsigned, &arenas_nhbins);
|
||||
|
||||
/*
|
||||
* We do enough mallctls in a loop that we actually want to omit them
|
||||
@ -1624,7 +1624,7 @@ stats_general_print(emitter_t *emitter) {
|
||||
emitter_json_array_kv_begin(emitter, "bin");
|
||||
size_t arenas_bin_mib[CTL_MAX_DEPTH];
|
||||
CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
|
||||
for (unsigned i = 0; i < nbins; i++) {
|
||||
for (unsigned i = 0; i < arenas_nbins; i++) {
|
||||
arenas_bin_mib[2] = i;
|
||||
emitter_json_object_begin(emitter);
|
||||
|
||||
|
@ -45,9 +45,9 @@ do_allocs(size_t sz, size_t cnt, bool do_frees) {
|
||||
|
||||
int
|
||||
main(void) {
|
||||
size_t lg_prof_sample = 19;
|
||||
int err = mallctl("prof.reset", NULL, NULL, (void *)&lg_prof_sample,
|
||||
sizeof(lg_prof_sample));
|
||||
size_t lg_prof_sample_local = 19;
|
||||
int err = mallctl("prof.reset", NULL, NULL,
|
||||
(void *)&lg_prof_sample_local, sizeof(lg_prof_sample_local));
|
||||
assert(err == 0);
|
||||
|
||||
prof_backtrace_hook_set(mock_backtrace);
|
||||
|
@ -87,8 +87,8 @@ test_fail(const char *format, ...) {
|
||||
}
|
||||
|
||||
static const char *
|
||||
test_status_string(test_status_t test_status) {
|
||||
switch (test_status) {
|
||||
test_status_string(test_status_t current_status) {
|
||||
switch (current_status) {
|
||||
case test_status_pass: return "pass";
|
||||
case test_status_skip: return "skip";
|
||||
case test_status_fail: return "fail";
|
||||
|
@ -258,12 +258,12 @@ TEST_BEGIN(test_arena_destroy_hooks_default) {
|
||||
|
||||
/* Try arena.create with custom hooks. */
|
||||
size_t sz = sizeof(extent_hooks_t *);
|
||||
extent_hooks_t *default_hooks;
|
||||
expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks,
|
||||
extent_hooks_t *a0_default_hooks;
|
||||
expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&a0_default_hooks,
|
||||
&sz, NULL, 0), 0, "Unexpected mallctlnametomib() failure");
|
||||
|
||||
/* Default impl; but wrapped as "customized". */
|
||||
extent_hooks_t new_hooks = *default_hooks;
|
||||
extent_hooks_t new_hooks = *a0_default_hooks;
|
||||
extent_hooks_t *hook = &new_hooks;
|
||||
sz = sizeof(unsigned);
|
||||
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
|
||||
|
@ -45,7 +45,7 @@
|
||||
*/ \
|
||||
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
|
||||
success = false; \
|
||||
for (int i = 0; i < 10 && !success; i++) { \
|
||||
for (int retry = 0; retry < 10 && !success; retry++) { \
|
||||
expected = val2; \
|
||||
success = atomic_compare_exchange_weak_##ta(&atom, \
|
||||
&expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
#define BATCH_MAX ((1U << 16) + 1024)
|
||||
static void *ptrs[BATCH_MAX];
|
||||
static void *global_ptrs[BATCH_MAX];
|
||||
|
||||
#define PAGE_ALIGNED(ptr) (((uintptr_t)ptr & PAGE_MASK) == 0)
|
||||
|
||||
@ -122,13 +122,14 @@ test_wrapper(size_t size, size_t alignment, bool zero, unsigned arena_flag) {
|
||||
}
|
||||
size_t batch = base + (size_t)j;
|
||||
assert(batch < BATCH_MAX);
|
||||
size_t filled = batch_alloc_wrapper(ptrs, batch, size,
|
||||
flags);
|
||||
size_t filled = batch_alloc_wrapper(global_ptrs, batch,
|
||||
size, flags);
|
||||
assert_zu_eq(filled, batch, "");
|
||||
verify_batch_basic(tsd, ptrs, batch, usize, zero);
|
||||
verify_batch_locality(tsd, ptrs, batch, usize, arena,
|
||||
nregs);
|
||||
release_batch(ptrs, batch, usize);
|
||||
verify_batch_basic(tsd, global_ptrs, batch, usize,
|
||||
zero);
|
||||
verify_batch_locality(tsd, global_ptrs, batch, usize,
|
||||
arena, nregs);
|
||||
release_batch(global_ptrs, batch, usize);
|
||||
}
|
||||
}
|
||||
|
||||
@ -163,16 +164,16 @@ TEST_BEGIN(test_batch_alloc_large) {
|
||||
size_t size = SC_LARGE_MINCLASS;
|
||||
for (size_t batch = 0; batch < 4; ++batch) {
|
||||
assert(batch < BATCH_MAX);
|
||||
size_t filled = batch_alloc(ptrs, batch, size, 0);
|
||||
size_t filled = batch_alloc(global_ptrs, batch, size, 0);
|
||||
assert_zu_eq(filled, batch, "");
|
||||
release_batch(ptrs, batch, size);
|
||||
release_batch(global_ptrs, batch, size);
|
||||
}
|
||||
size = tcache_maxclass + 1;
|
||||
for (size_t batch = 0; batch < 4; ++batch) {
|
||||
assert(batch < BATCH_MAX);
|
||||
size_t filled = batch_alloc(ptrs, batch, size, 0);
|
||||
size_t filled = batch_alloc(global_ptrs, batch, size, 0);
|
||||
assert_zu_eq(filled, batch, "");
|
||||
release_batch(ptrs, batch, size);
|
||||
release_batch(global_ptrs, batch, size);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
@ -69,10 +69,10 @@ test_data_t *init_test_data(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
|
||||
&hpa_hooks_default);
|
||||
assert_false(err, "");
|
||||
|
||||
const size_t oversize_threshold = 8 * 1024 * 1024;
|
||||
const size_t pa_oversize_threshold = 8 * 1024 * 1024;
|
||||
err = pa_shard_init(TSDN_NULL, &test_data->shard, &test_data->central,
|
||||
&test_data->emap, test_data->base, /* ind */ 1, &test_data->stats,
|
||||
&test_data->stats_mtx, &time, oversize_threshold, dirty_decay_ms,
|
||||
&test_data->stats_mtx, &time, pa_oversize_threshold, dirty_decay_ms,
|
||||
muzzy_decay_ms);
|
||||
assert_false(err, "");
|
||||
|
||||
|
@ -26,14 +26,14 @@ TEST_BEGIN(test_idump) {
|
||||
bool active;
|
||||
void *p;
|
||||
|
||||
const char *prefix = TEST_PREFIX;
|
||||
const char *test_prefix = TEST_PREFIX;
|
||||
|
||||
test_skip_if(!config_prof);
|
||||
|
||||
active = true;
|
||||
|
||||
expect_d_eq(mallctl("prof.prefix", NULL, NULL, (void *)&prefix,
|
||||
sizeof(prefix)), 0,
|
||||
expect_d_eq(mallctl("prof.prefix", NULL, NULL, (void *)&test_prefix,
|
||||
sizeof(test_prefix)), 0,
|
||||
"Unexpected mallctl failure while overwriting dump prefix");
|
||||
|
||||
expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
|
||||
|
@ -15,7 +15,7 @@ confirm_prof_setup() {
|
||||
"opt_prof_recent_alloc_max not set correctly");
|
||||
|
||||
/* Dynamics */
|
||||
assert_true(prof_active, "prof_active not on");
|
||||
assert_true(prof_active_state, "prof_active not on");
|
||||
assert_zd_eq(prof_recent_alloc_max_ctl_read(), OPT_ALLOC_MAX,
|
||||
"prof_recent_alloc_max not set correctly");
|
||||
}
|
||||
|
@ -21,26 +21,25 @@ set_prof_active(bool active) {
|
||||
|
||||
static size_t
|
||||
get_lg_prof_sample(void) {
|
||||
size_t lg_prof_sample;
|
||||
size_t ret;
|
||||
size_t sz = sizeof(size_t);
|
||||
|
||||
expect_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
|
||||
NULL, 0), 0,
|
||||
expect_d_eq(mallctl("prof.lg_sample", (void *)&ret, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl failure while reading profiling sample rate");
|
||||
return lg_prof_sample;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
do_prof_reset(size_t lg_prof_sample) {
|
||||
do_prof_reset(size_t lg_prof_sample_input) {
|
||||
expect_d_eq(mallctl("prof.reset", NULL, NULL,
|
||||
(void *)&lg_prof_sample, sizeof(size_t)), 0,
|
||||
(void *)&lg_prof_sample_input, sizeof(size_t)), 0,
|
||||
"Unexpected mallctl failure while resetting profile data");
|
||||
expect_zu_eq(lg_prof_sample, get_lg_prof_sample(),
|
||||
expect_zu_eq(lg_prof_sample_input, get_lg_prof_sample(),
|
||||
"Expected profile sample rate change");
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_prof_reset_basic) {
|
||||
size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
|
||||
size_t lg_prof_sample_orig, lg_prof_sample_cur, lg_prof_sample_next;
|
||||
size_t sz;
|
||||
unsigned i;
|
||||
|
||||
@ -52,8 +51,8 @@ TEST_BEGIN(test_prof_reset_basic) {
|
||||
"Unexpected mallctl failure while reading profiling sample rate");
|
||||
expect_zu_eq(lg_prof_sample_orig, 0,
|
||||
"Unexpected profiling sample rate");
|
||||
lg_prof_sample = get_lg_prof_sample();
|
||||
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample,
|
||||
lg_prof_sample_cur = get_lg_prof_sample();
|
||||
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
|
||||
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
|
||||
"\"prof.lg_sample\"");
|
||||
|
||||
@ -61,8 +60,8 @@ TEST_BEGIN(test_prof_reset_basic) {
|
||||
for (i = 0; i < 2; i++) {
|
||||
expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctl failure while resetting profile data");
|
||||
lg_prof_sample = get_lg_prof_sample();
|
||||
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample,
|
||||
lg_prof_sample_cur = get_lg_prof_sample();
|
||||
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
|
||||
"Unexpected profile sample rate change");
|
||||
}
|
||||
|
||||
@ -70,15 +69,15 @@ TEST_BEGIN(test_prof_reset_basic) {
|
||||
lg_prof_sample_next = 1;
|
||||
for (i = 0; i < 2; i++) {
|
||||
do_prof_reset(lg_prof_sample_next);
|
||||
lg_prof_sample = get_lg_prof_sample();
|
||||
expect_zu_eq(lg_prof_sample, lg_prof_sample_next,
|
||||
lg_prof_sample_cur = get_lg_prof_sample();
|
||||
expect_zu_eq(lg_prof_sample_cur, lg_prof_sample_next,
|
||||
"Expected profile sample rate change");
|
||||
lg_prof_sample_next = lg_prof_sample_orig;
|
||||
}
|
||||
|
||||
/* Make sure the test code restored prof.lg_sample. */
|
||||
lg_prof_sample = get_lg_prof_sample();
|
||||
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample,
|
||||
lg_prof_sample_cur = get_lg_prof_sample();
|
||||
expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
|
||||
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
|
||||
"\"prof.lg_sample\"");
|
||||
}
|
||||
|
@ -964,7 +964,7 @@ do_update_search_test(int nnodes, int ntrees, int nremovals,
|
||||
tree_insert(&tree, &nodes[j]);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < nupdates; i++) {
|
||||
for (int j = 0; j < nupdates; j++) {
|
||||
uint32_t ind = gen_rand32_range(sfmt, nnodes);
|
||||
nodes[ind].specialness = 1 - nodes[ind].specialness;
|
||||
tree_update_summaries(&tree, &nodes[ind]);
|
||||
|
@ -13,43 +13,43 @@ static atomic_u_t nfinished;
|
||||
|
||||
static unsigned
|
||||
do_arena_create(extent_hooks_t *h) {
|
||||
unsigned arena_ind;
|
||||
size_t sz = sizeof(unsigned);
|
||||
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
|
||||
unsigned new_arena_ind;
|
||||
size_t ind_sz = sizeof(unsigned);
|
||||
expect_d_eq(mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz,
|
||||
(void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
return arena_ind;
|
||||
return new_arena_ind;
|
||||
}
|
||||
|
||||
static void
|
||||
do_arena_destroy(unsigned arena_ind) {
|
||||
do_arena_destroy(unsigned ind) {
|
||||
size_t mib[3];
|
||||
size_t miblen;
|
||||
|
||||
miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
mib[1] = (size_t)ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
static void
|
||||
do_refresh(void) {
|
||||
uint64_t epoch = 1;
|
||||
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
||||
sizeof(epoch)), 0, "Unexpected mallctl() failure");
|
||||
uint64_t refresh_epoch = 1;
|
||||
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&refresh_epoch,
|
||||
sizeof(refresh_epoch)), 0, "Unexpected mallctl() failure");
|
||||
}
|
||||
|
||||
static size_t
|
||||
do_get_size_impl(const char *cmd, unsigned arena_ind) {
|
||||
do_get_size_impl(const char *cmd, unsigned ind) {
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib) / sizeof(size_t);
|
||||
size_t z = sizeof(size_t);
|
||||
|
||||
expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
|
||||
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
|
||||
mib[2] = arena_ind;
|
||||
mib[2] = ind;
|
||||
size_t size;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
|
||||
0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
|
||||
@ -58,13 +58,13 @@ do_get_size_impl(const char *cmd, unsigned arena_ind) {
|
||||
}
|
||||
|
||||
static size_t
|
||||
do_get_active(unsigned arena_ind) {
|
||||
return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE;
|
||||
do_get_active(unsigned ind) {
|
||||
return do_get_size_impl("stats.arenas.0.pactive", ind) * PAGE;
|
||||
}
|
||||
|
||||
static size_t
|
||||
do_get_mapped(unsigned arena_ind) {
|
||||
return do_get_size_impl("stats.arenas.0.mapped", arena_ind);
|
||||
do_get_mapped(unsigned ind) {
|
||||
return do_get_size_impl("stats.arenas.0.mapped", ind);
|
||||
}
|
||||
|
||||
static void *
|
||||
|
Loading…
Reference in New Issue
Block a user