Silence miscellaneous 64-to-32-bit data loss warnings.
This commit is contained in:
21
src/arena.c
21
src/arena.c
@@ -308,7 +308,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
|
||||
assert(run->nfree > 0);
|
||||
assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
|
||||
|
||||
regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
|
||||
regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
|
||||
miscelm = arena_run_to_miscelm(run);
|
||||
rpages = arena_miscelm_to_rpages(miscelm);
|
||||
ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
|
||||
@@ -3411,18 +3411,19 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
||||
* size).
|
||||
*/
|
||||
try_run_size = PAGE;
|
||||
try_nregs = try_run_size / bin_info->reg_size;
|
||||
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
|
||||
do {
|
||||
perfect_run_size = try_run_size;
|
||||
perfect_nregs = try_nregs;
|
||||
|
||||
try_run_size += PAGE;
|
||||
try_nregs = try_run_size / bin_info->reg_size;
|
||||
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
|
||||
} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
|
||||
assert(perfect_nregs <= RUN_MAXREGS);
|
||||
|
||||
actual_run_size = perfect_run_size;
|
||||
actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
|
||||
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
|
||||
bin_info->reg_interval);
|
||||
|
||||
/*
|
||||
* Redzones can require enough padding that not even a single region can
|
||||
@@ -3434,8 +3435,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
||||
assert(config_fill && unlikely(opt_redzone));
|
||||
|
||||
actual_run_size += PAGE;
|
||||
actual_nregs = (actual_run_size - pad_size) /
|
||||
bin_info->reg_interval;
|
||||
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
|
||||
bin_info->reg_interval);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3443,8 +3444,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
||||
*/
|
||||
while (actual_run_size > arena_maxrun) {
|
||||
actual_run_size -= PAGE;
|
||||
actual_nregs = (actual_run_size - pad_size) /
|
||||
bin_info->reg_interval;
|
||||
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
|
||||
bin_info->reg_interval);
|
||||
}
|
||||
assert(actual_nregs > 0);
|
||||
assert(actual_run_size == s2u(actual_run_size));
|
||||
@@ -3452,8 +3453,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
||||
/* Copy final settings. */
|
||||
bin_info->run_size = actual_run_size;
|
||||
bin_info->nregs = actual_nregs;
|
||||
bin_info->reg0_offset = actual_run_size - (actual_nregs *
|
||||
bin_info->reg_interval) - pad_size + bin_info->redzone_size;
|
||||
bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
|
||||
bin_info->reg_interval) - pad_size + bin_info->redzone_size);
|
||||
|
||||
if (actual_run_size > small_maxrun)
|
||||
small_maxrun = actual_run_size;
|
||||
|
@@ -732,8 +732,8 @@ chunk_boot(void)
|
||||
|
||||
if (have_dss && chunk_dss_boot())
|
||||
return (true);
|
||||
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
|
||||
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk), chunks_rtree_node_alloc, NULL))
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
|
12
src/ckh.c
12
src/ckh.c
@@ -99,7 +99,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
* Cycle through the cells in the bucket, starting at a random position.
|
||||
* The randomness avoids worst-case search overhead as buckets fill up.
|
||||
*/
|
||||
offset = prng_lg_range(&ckh->prng_state, LG_CKH_BUCKET_CELLS);
|
||||
offset = (unsigned)prng_lg_range(&ckh->prng_state, LG_CKH_BUCKET_CELLS);
|
||||
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
|
||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
|
||||
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
|
||||
@@ -141,7 +141,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
* were an item for which both hashes indicated the same
|
||||
* bucket.
|
||||
*/
|
||||
i = prng_lg_range(&ckh->prng_state, LG_CKH_BUCKET_CELLS);
|
||||
i = (unsigned)prng_lg_range(&ckh->prng_state,
|
||||
LG_CKH_BUCKET_CELLS);
|
||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
|
||||
assert(cell->key != NULL);
|
||||
|
||||
@@ -247,8 +248,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
|
||||
{
|
||||
bool ret;
|
||||
ckhc_t *tab, *ttab;
|
||||
size_t lg_curcells;
|
||||
unsigned lg_prevbuckets;
|
||||
unsigned lg_prevbuckets, lg_curcells;
|
||||
|
||||
#ifdef CKH_COUNT
|
||||
ckh->ngrows++;
|
||||
@@ -302,8 +302,8 @@ static void
|
||||
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
|
||||
{
|
||||
ckhc_t *tab, *ttab;
|
||||
size_t lg_curcells, usize;
|
||||
unsigned lg_prevbuckets;
|
||||
size_t usize;
|
||||
unsigned lg_prevbuckets, lg_curcells;
|
||||
|
||||
/*
|
||||
* It is possible (though unlikely, given well behaved hashes) that the
|
||||
|
@@ -1925,7 +1925,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
}
|
||||
|
||||
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
|
||||
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t)
|
||||
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
|
||||
static const ctl_named_node_t *
|
||||
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
@@ -1936,7 +1936,8 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
}
|
||||
|
||||
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
|
||||
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t)
|
||||
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
|
||||
size_t)
|
||||
static const ctl_named_node_t *
|
||||
arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
|
@@ -1396,7 +1396,7 @@ malloc_init_hard_finish(void)
|
||||
* machinery will fail to allocate memory at far lower limits.
|
||||
*/
|
||||
if (narenas_auto > chunksize / sizeof(arena_t *)) {
|
||||
narenas_auto = chunksize / sizeof(arena_t *);
|
||||
narenas_auto = (unsigned)(chunksize / sizeof(arena_t *));
|
||||
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
|
||||
narenas_auto);
|
||||
}
|
||||
|
@@ -461,7 +461,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
||||
elm = tcaches_avail;
|
||||
tcaches_avail = tcaches_avail->next;
|
||||
elm->tcache = tcache;
|
||||
*r_ind = elm - tcaches;
|
||||
*r_ind = (unsigned)(elm - tcaches);
|
||||
} else {
|
||||
elm = &tcaches[tcaches_past];
|
||||
elm->tcache = tcache;
|
||||
|
@@ -581,7 +581,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
str[i] = '\0';
|
||||
else
|
||||
str[size - 1] = '\0';
|
||||
ret = i;
|
||||
assert(i < INT_MAX);
|
||||
ret = (int)i;
|
||||
|
||||
#undef APPEND_C
|
||||
#undef APPEND_S
|
||||
|
Reference in New Issue
Block a user