Fix shadowed variable usage.
Verified with EXTRA_CFLAGS=-Wshadow.
This commit is contained in:
@@ -7,9 +7,9 @@ bin_info_t bin_infos[SC_NBINS];
|
||||
|
||||
static void
|
||||
bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
bin_info_t bin_infos[SC_NBINS]) {
|
||||
bin_info_t infos[SC_NBINS]) {
|
||||
for (unsigned i = 0; i < SC_NBINS; i++) {
|
||||
bin_info_t *bin_info = &bin_infos[i];
|
||||
bin_info_t *bin_info = &infos[i];
|
||||
sc_t *sc = &sc_data->sc[i];
|
||||
bin_info->reg_size = ((size_t)1U << sc->lg_base)
|
||||
+ ((size_t)sc->ndelta << sc->lg_delta);
|
||||
|
@@ -356,14 +356,14 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
|
||||
ckh_keycomp_t *keycomp) {
|
||||
bool ret;
|
||||
size_t mincells, usize;
|
||||
unsigned lg_mincells;
|
||||
|
||||
assert(minitems > 0);
|
||||
assert(hash != NULL);
|
||||
assert(ckh_hash != NULL);
|
||||
assert(keycomp != NULL);
|
||||
|
||||
#ifdef CKH_COUNT
|
||||
@@ -392,7 +392,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
}
|
||||
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
|
||||
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
|
||||
ckh->hash = hash;
|
||||
ckh->hash = ckh_hash;
|
||||
ckh->keycomp = keycomp;
|
||||
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
||||
|
@@ -3622,9 +3622,9 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
|
||||
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
|
||||
MUTEX_PROF_RESET(arena->base->mtx);
|
||||
|
||||
for (szind_t i = 0; i < SC_NBINS; i++) {
|
||||
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
|
||||
bin_t *bin = arena_get_bin(arena, i, j);
|
||||
for (szind_t j = 0; j < SC_NBINS; j++) {
|
||||
for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
|
||||
bin_t *bin = arena_get_bin(arena, j, k);
|
||||
MUTEX_PROF_RESET(bin->lock);
|
||||
}
|
||||
}
|
||||
|
@@ -748,9 +748,8 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
extent_gdump_add(tsdn, edata);
|
||||
}
|
||||
if (zero && !edata_zeroed_get(edata)) {
|
||||
void *addr = edata_base_get(edata);
|
||||
size_t size = edata_size_get(edata);
|
||||
ehooks_zero(tsdn, ehooks, addr, size);
|
||||
ehooks_zero(tsdn, ehooks, edata_base_get(edata),
|
||||
edata_size_get(edata));
|
||||
}
|
||||
return edata;
|
||||
label_err:
|
||||
|
@@ -1212,12 +1212,12 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
|
||||
CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise")
|
||||
if (strncmp("metadata_thp", k, klen) == 0) {
|
||||
int i;
|
||||
int m;
|
||||
bool match = false;
|
||||
for (i = 0; i < metadata_thp_mode_limit; i++) {
|
||||
if (strncmp(metadata_thp_mode_names[i],
|
||||
for (m = 0; m < metadata_thp_mode_limit; m++) {
|
||||
if (strncmp(metadata_thp_mode_names[m],
|
||||
v, vlen) == 0) {
|
||||
opt_metadata_thp = i;
|
||||
opt_metadata_thp = m;
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
@@ -1230,18 +1230,18 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
}
|
||||
CONF_HANDLE_BOOL(opt_retain, "retain")
|
||||
if (strncmp("dss", k, klen) == 0) {
|
||||
int i;
|
||||
int m;
|
||||
bool match = false;
|
||||
for (i = 0; i < dss_prec_limit; i++) {
|
||||
if (strncmp(dss_prec_names[i], v, vlen)
|
||||
for (m = 0; m < dss_prec_limit; m++) {
|
||||
if (strncmp(dss_prec_names[m], v, vlen)
|
||||
== 0) {
|
||||
if (extent_dss_prec_set(i)) {
|
||||
if (extent_dss_prec_set(m)) {
|
||||
CONF_ERROR(
|
||||
"Error setting dss",
|
||||
k, klen, v, vlen);
|
||||
} else {
|
||||
opt_dss =
|
||||
dss_prec_names[i];
|
||||
dss_prec_names[m];
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
@@ -1428,16 +1428,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
|
||||
if (strncmp("percpu_arena", k, klen) == 0) {
|
||||
bool match = false;
|
||||
for (int i = percpu_arena_mode_names_base; i <
|
||||
percpu_arena_mode_names_limit; i++) {
|
||||
if (strncmp(percpu_arena_mode_names[i],
|
||||
for (int m = percpu_arena_mode_names_base; m <
|
||||
percpu_arena_mode_names_limit; m++) {
|
||||
if (strncmp(percpu_arena_mode_names[m],
|
||||
v, vlen) == 0) {
|
||||
if (!have_percpu_arena) {
|
||||
CONF_ERROR(
|
||||
"No getcpu support",
|
||||
k, klen, v, vlen);
|
||||
}
|
||||
opt_percpu_arena = i;
|
||||
opt_percpu_arena = m;
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
@@ -1622,15 +1622,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
}
|
||||
if (CONF_MATCH("thp")) {
|
||||
bool match = false;
|
||||
for (int i = 0; i < thp_mode_names_limit; i++) {
|
||||
if (strncmp(thp_mode_names[i],v, vlen)
|
||||
for (int m = 0; m < thp_mode_names_limit; m++) {
|
||||
if (strncmp(thp_mode_names[m],v, vlen)
|
||||
== 0) {
|
||||
if (!have_madvise_huge && !have_memcntl) {
|
||||
CONF_ERROR(
|
||||
"No THP support",
|
||||
k, klen, v, vlen);
|
||||
}
|
||||
opt_thp = i;
|
||||
opt_thp = m;
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
|
7
src/pa.c
7
src/pa.c
@@ -31,8 +31,9 @@ pa_central_init(pa_central_t *central, base_t *base, bool hpa,
|
||||
bool
|
||||
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
|
||||
emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
|
||||
malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold,
|
||||
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
|
||||
malloc_mutex_t *stats_mtx, nstime_t *cur_time,
|
||||
size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
|
||||
ssize_t muzzy_decay_ms) {
|
||||
/* This will change eventually, but for now it should hold. */
|
||||
assert(base_ind_get(base) == ind);
|
||||
if (edata_cache_init(&shard->edata_cache, base)) {
|
||||
@@ -40,7 +41,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
|
||||
}
|
||||
|
||||
if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache,
|
||||
cur_time, oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
|
||||
cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
|
||||
&stats->pac_stats, stats_mtx)) {
|
||||
return true;
|
||||
}
|
||||
|
@@ -36,9 +36,9 @@ pac_decay_data_get(pac_t *pac, extent_state_t state,
|
||||
|
||||
bool
|
||||
pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
||||
edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
|
||||
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
|
||||
malloc_mutex_t *stats_mtx) {
|
||||
edata_cache_t *edata_cache, nstime_t *cur_time,
|
||||
size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
|
||||
ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) {
|
||||
unsigned ind = base_ind_get(base);
|
||||
/*
|
||||
* Delay coalescing for dirty extents despite the disruptive effect on
|
||||
@@ -73,7 +73,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
||||
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
atomic_store_zu(&pac->oversize_threshold, oversize_threshold,
|
||||
atomic_store_zu(&pac->oversize_threshold, pac_oversize_threshold,
|
||||
ATOMIC_RELAXED);
|
||||
if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
|
||||
return true;
|
||||
|
10
src/prof.c
10
src/prof.c
@@ -43,7 +43,7 @@ static counter_accum_t prof_idump_accumulated;
|
||||
* Initialized as opt_prof_active, and accessed via
|
||||
* prof_active_[gs]et{_unlocked,}().
|
||||
*/
|
||||
bool prof_active;
|
||||
bool prof_active_state;
|
||||
static malloc_mutex_t prof_active_mtx;
|
||||
|
||||
/*
|
||||
@@ -416,7 +416,7 @@ prof_active_get(tsdn_t *tsdn) {
|
||||
|
||||
prof_active_assert();
|
||||
malloc_mutex_lock(tsdn, &prof_active_mtx);
|
||||
prof_active_current = prof_active;
|
||||
prof_active_current = prof_active_state;
|
||||
malloc_mutex_unlock(tsdn, &prof_active_mtx);
|
||||
return prof_active_current;
|
||||
}
|
||||
@@ -427,8 +427,8 @@ prof_active_set(tsdn_t *tsdn, bool active) {
|
||||
|
||||
prof_active_assert();
|
||||
malloc_mutex_lock(tsdn, &prof_active_mtx);
|
||||
prof_active_old = prof_active;
|
||||
prof_active = active;
|
||||
prof_active_old = prof_active_state;
|
||||
prof_active_state = active;
|
||||
malloc_mutex_unlock(tsdn, &prof_active_mtx);
|
||||
prof_active_assert();
|
||||
return prof_active_old;
|
||||
@@ -629,7 +629,7 @@ prof_boot2(tsd_t *tsd, base_t *base) {
|
||||
if (opt_prof) {
|
||||
lg_prof_sample = opt_lg_prof_sample;
|
||||
prof_unbias_map_init();
|
||||
prof_active = opt_prof_active;
|
||||
prof_active_state = opt_prof_active;
|
||||
prof_gdump_val = opt_prof_gdump;
|
||||
prof_thread_active_init = opt_prof_thread_active_init;
|
||||
|
||||
|
@@ -397,7 +397,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
|
||||
|
||||
/* Used in unit tests. */
|
||||
static prof_tdata_t *
|
||||
prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
|
||||
prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
|
||||
void *arg) {
|
||||
size_t *tdata_count = (size_t *)arg;
|
||||
|
||||
@@ -895,7 +895,7 @@ struct prof_tdata_merge_iter_arg_s {
|
||||
};
|
||||
|
||||
static prof_tdata_t *
|
||||
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
|
||||
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
|
||||
void *opaque) {
|
||||
prof_tdata_merge_iter_arg_t *arg =
|
||||
(prof_tdata_merge_iter_arg_t *)opaque;
|
||||
@@ -939,7 +939,7 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
|
||||
}
|
||||
|
||||
static prof_tdata_t *
|
||||
prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
|
||||
prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
|
||||
void *opaque) {
|
||||
if (!tdata->dumping) {
|
||||
return NULL;
|
||||
@@ -1278,7 +1278,7 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
|
||||
}
|
||||
|
||||
static prof_tdata_t *
|
||||
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
|
||||
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
|
||||
void *arg) {
|
||||
tsdn_t *tsdn = (tsdn_t *)arg;
|
||||
|
||||
|
@@ -561,18 +561,18 @@ prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) {
|
||||
cassert(config_prof);
|
||||
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
const char *prof_prefix = prof_prefix_get(tsd_tsdn(tsd));
|
||||
const char *prefix = prof_prefix_get(tsd_tsdn(tsd));
|
||||
|
||||
if (vseq != VSEQ_INVALID) {
|
||||
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
|
||||
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
|
||||
"%s.%d.%"FMTu64".%c%"FMTu64".heap", prof_prefix,
|
||||
prof_getpid(), prof_dump_seq, v, vseq);
|
||||
"%s.%d.%"FMTu64".%c%"FMTu64".heap", prefix, prof_getpid(),
|
||||
prof_dump_seq, v, vseq);
|
||||
} else {
|
||||
/* "<prefix>.<pid>.<seq>.<v>.heap" */
|
||||
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
|
||||
"%s.%d.%"FMTu64".%c.heap", prof_prefix,
|
||||
prof_getpid(), prof_dump_seq, v);
|
||||
"%s.%d.%"FMTu64".%c.heap", prefix, prof_getpid(),
|
||||
prof_dump_seq, v);
|
||||
}
|
||||
prof_dump_seq++;
|
||||
}
|
||||
|
14
src/stats.c
14
src/stats.c
@@ -1606,15 +1606,15 @@ stats_general_print(emitter_t *emitter) {
|
||||
"Maximum thread-cached size class", emitter_type_size, &sv);
|
||||
}
|
||||
|
||||
unsigned nbins;
|
||||
CTL_GET("arenas.nbins", &nbins, unsigned);
|
||||
unsigned arenas_nbins;
|
||||
CTL_GET("arenas.nbins", &arenas_nbins, unsigned);
|
||||
emitter_kv(emitter, "nbins", "Number of bin size classes",
|
||||
emitter_type_unsigned, &nbins);
|
||||
emitter_type_unsigned, &arenas_nbins);
|
||||
|
||||
unsigned nhbins;
|
||||
CTL_GET("arenas.nhbins", &nhbins, unsigned);
|
||||
unsigned arenas_nhbins;
|
||||
CTL_GET("arenas.nhbins", &arenas_nhbins, unsigned);
|
||||
emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
|
||||
emitter_type_unsigned, &nhbins);
|
||||
emitter_type_unsigned, &arenas_nhbins);
|
||||
|
||||
/*
|
||||
* We do enough mallctls in a loop that we actually want to omit them
|
||||
@@ -1624,7 +1624,7 @@ stats_general_print(emitter_t *emitter) {
|
||||
emitter_json_array_kv_begin(emitter, "bin");
|
||||
size_t arenas_bin_mib[CTL_MAX_DEPTH];
|
||||
CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
|
||||
for (unsigned i = 0; i < nbins; i++) {
|
||||
for (unsigned i = 0; i < arenas_nbins; i++) {
|
||||
arenas_bin_mib[2] = i;
|
||||
emitter_json_object_begin(emitter);
|
||||
|
||||
|
Reference in New Issue
Block a user