Silence miscellaneous 64-to-32-bit data loss warnings.
This commit is contained in:
parent
1c42a04cc6
commit
9e1810ca9d
@ -1053,7 +1053,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
|||||||
run = &miscelm->run;
|
run = &miscelm->run;
|
||||||
run_binind = run->binind;
|
run_binind = run->binind;
|
||||||
bin = &arena->bins[run_binind];
|
bin = &arena->bins[run_binind];
|
||||||
actual_binind = bin - arena->bins;
|
actual_binind = (szind_t)(bin - arena->bins);
|
||||||
assert(run_binind == actual_binind);
|
assert(run_binind == actual_binind);
|
||||||
bin_info = &arena_bin_info[actual_binind];
|
bin_info = &arena_bin_info[actual_binind];
|
||||||
rpages = arena_miscelm_to_rpages(miscelm);
|
rpages = arena_miscelm_to_rpages(miscelm);
|
||||||
@ -1070,7 +1070,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
|||||||
JEMALLOC_INLINE szind_t
|
JEMALLOC_INLINE szind_t
|
||||||
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
||||||
{
|
{
|
||||||
szind_t binind = bin - arena->bins;
|
szind_t binind = (szind_t)(bin - arena->bins);
|
||||||
assert(binind < NBINS);
|
assert(binind < NBINS);
|
||||||
return (binind);
|
return (binind);
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
|||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||||
{
|
{
|
||||||
unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||||
bitmap_t rg = bitmap[rgoff];
|
bitmap_t rg = bitmap[rgoff];
|
||||||
/* The bitmap is full iff the root group is 0. */
|
/* The bitmap is full iff the root group is 0. */
|
||||||
return (rg == 0);
|
return (rg == 0);
|
||||||
|
@ -337,13 +337,18 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
|
|||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
|
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
|
||||||
{
|
{
|
||||||
|
|
||||||
|
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
|
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
|
||||||
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
|
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
|
||||||
#else
|
#else
|
||||||
|
{
|
||||||
uint64_t hashes[2];
|
uint64_t hashes[2];
|
||||||
hash_x86_128(key, len, seed, hashes);
|
hash_x86_128(key, (int)len, seed, hashes);
|
||||||
r_hash[0] = (size_t)hashes[0];
|
r_hash[0] = (size_t)hashes[0];
|
||||||
r_hash[1] = (size_t)hashes[1];
|
r_hash[1] = (size_t)hashes[1];
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -11,12 +11,12 @@
|
|||||||
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
|
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
|
||||||
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
|
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
|
||||||
|
|
||||||
# define MALLOCX_LG_ALIGN(la) (la)
|
# define MALLOCX_LG_ALIGN(la) ((int)(la))
|
||||||
# if LG_SIZEOF_PTR == 2
|
# if LG_SIZEOF_PTR == 2
|
||||||
# define MALLOCX_ALIGN(a) (ffs(a)-1)
|
# define MALLOCX_ALIGN(a) ((int)(ffs(a)-1))
|
||||||
# else
|
# else
|
||||||
# define MALLOCX_ALIGN(a) \
|
# define MALLOCX_ALIGN(a) \
|
||||||
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
|
((int)((a < (size_t)INT_MAX) ? ffs((int)a)-1 : ffs((int)(a>>32))+31))
|
||||||
# endif
|
# endif
|
||||||
# define MALLOCX_ZERO ((int)0x40)
|
# define MALLOCX_ZERO ((int)0x40)
|
||||||
/*
|
/*
|
||||||
|
21
src/arena.c
21
src/arena.c
@ -308,7 +308,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
|
|||||||
assert(run->nfree > 0);
|
assert(run->nfree > 0);
|
||||||
assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
|
assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
|
||||||
|
|
||||||
regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
|
regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
|
||||||
miscelm = arena_run_to_miscelm(run);
|
miscelm = arena_run_to_miscelm(run);
|
||||||
rpages = arena_miscelm_to_rpages(miscelm);
|
rpages = arena_miscelm_to_rpages(miscelm);
|
||||||
ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
|
ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
|
||||||
@ -3411,18 +3411,19 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
|||||||
* size).
|
* size).
|
||||||
*/
|
*/
|
||||||
try_run_size = PAGE;
|
try_run_size = PAGE;
|
||||||
try_nregs = try_run_size / bin_info->reg_size;
|
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
|
||||||
do {
|
do {
|
||||||
perfect_run_size = try_run_size;
|
perfect_run_size = try_run_size;
|
||||||
perfect_nregs = try_nregs;
|
perfect_nregs = try_nregs;
|
||||||
|
|
||||||
try_run_size += PAGE;
|
try_run_size += PAGE;
|
||||||
try_nregs = try_run_size / bin_info->reg_size;
|
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
|
||||||
} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
|
} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
|
||||||
assert(perfect_nregs <= RUN_MAXREGS);
|
assert(perfect_nregs <= RUN_MAXREGS);
|
||||||
|
|
||||||
actual_run_size = perfect_run_size;
|
actual_run_size = perfect_run_size;
|
||||||
actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
|
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
|
||||||
|
bin_info->reg_interval);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Redzones can require enough padding that not even a single region can
|
* Redzones can require enough padding that not even a single region can
|
||||||
@ -3434,8 +3435,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
|||||||
assert(config_fill && unlikely(opt_redzone));
|
assert(config_fill && unlikely(opt_redzone));
|
||||||
|
|
||||||
actual_run_size += PAGE;
|
actual_run_size += PAGE;
|
||||||
actual_nregs = (actual_run_size - pad_size) /
|
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
|
||||||
bin_info->reg_interval;
|
bin_info->reg_interval);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3443,8 +3444,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
|||||||
*/
|
*/
|
||||||
while (actual_run_size > arena_maxrun) {
|
while (actual_run_size > arena_maxrun) {
|
||||||
actual_run_size -= PAGE;
|
actual_run_size -= PAGE;
|
||||||
actual_nregs = (actual_run_size - pad_size) /
|
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
|
||||||
bin_info->reg_interval;
|
bin_info->reg_interval);
|
||||||
}
|
}
|
||||||
assert(actual_nregs > 0);
|
assert(actual_nregs > 0);
|
||||||
assert(actual_run_size == s2u(actual_run_size));
|
assert(actual_run_size == s2u(actual_run_size));
|
||||||
@ -3452,8 +3453,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
|||||||
/* Copy final settings. */
|
/* Copy final settings. */
|
||||||
bin_info->run_size = actual_run_size;
|
bin_info->run_size = actual_run_size;
|
||||||
bin_info->nregs = actual_nregs;
|
bin_info->nregs = actual_nregs;
|
||||||
bin_info->reg0_offset = actual_run_size - (actual_nregs *
|
bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
|
||||||
bin_info->reg_interval) - pad_size + bin_info->redzone_size;
|
bin_info->reg_interval) - pad_size + bin_info->redzone_size);
|
||||||
|
|
||||||
if (actual_run_size > small_maxrun)
|
if (actual_run_size > small_maxrun)
|
||||||
small_maxrun = actual_run_size;
|
small_maxrun = actual_run_size;
|
||||||
|
@ -732,8 +732,8 @@ chunk_boot(void)
|
|||||||
|
|
||||||
if (have_dss && chunk_dss_boot())
|
if (have_dss && chunk_dss_boot())
|
||||||
return (true);
|
return (true);
|
||||||
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
|
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||||
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
|
opt_lg_chunk), chunks_rtree_node_alloc, NULL))
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
|
12
src/ckh.c
12
src/ckh.c
@ -99,7 +99,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
|||||||
* Cycle through the cells in the bucket, starting at a random position.
|
* Cycle through the cells in the bucket, starting at a random position.
|
||||||
* The randomness avoids worst-case search overhead as buckets fill up.
|
* The randomness avoids worst-case search overhead as buckets fill up.
|
||||||
*/
|
*/
|
||||||
offset = prng_lg_range(&ckh->prng_state, LG_CKH_BUCKET_CELLS);
|
offset = (unsigned)prng_lg_range(&ckh->prng_state, LG_CKH_BUCKET_CELLS);
|
||||||
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
|
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
|
||||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
|
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
|
||||||
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
|
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
|
||||||
@ -141,7 +141,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
|||||||
* were an item for which both hashes indicated the same
|
* were an item for which both hashes indicated the same
|
||||||
* bucket.
|
* bucket.
|
||||||
*/
|
*/
|
||||||
i = prng_lg_range(&ckh->prng_state, LG_CKH_BUCKET_CELLS);
|
i = (unsigned)prng_lg_range(&ckh->prng_state,
|
||||||
|
LG_CKH_BUCKET_CELLS);
|
||||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
|
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
|
||||||
assert(cell->key != NULL);
|
assert(cell->key != NULL);
|
||||||
|
|
||||||
@ -247,8 +248,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
|
|||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
ckhc_t *tab, *ttab;
|
ckhc_t *tab, *ttab;
|
||||||
size_t lg_curcells;
|
unsigned lg_prevbuckets, lg_curcells;
|
||||||
unsigned lg_prevbuckets;
|
|
||||||
|
|
||||||
#ifdef CKH_COUNT
|
#ifdef CKH_COUNT
|
||||||
ckh->ngrows++;
|
ckh->ngrows++;
|
||||||
@ -302,8 +302,8 @@ static void
|
|||||||
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
|
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
|
||||||
{
|
{
|
||||||
ckhc_t *tab, *ttab;
|
ckhc_t *tab, *ttab;
|
||||||
size_t lg_curcells, usize;
|
size_t usize;
|
||||||
unsigned lg_prevbuckets;
|
unsigned lg_prevbuckets, lg_curcells;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It is possible (though unlikely, given well behaved hashes) that the
|
* It is possible (though unlikely, given well behaved hashes) that the
|
||||||
|
@ -1925,7 +1925,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
|
|||||||
}
|
}
|
||||||
|
|
||||||
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
|
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
|
||||||
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t)
|
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
|
||||||
static const ctl_named_node_t *
|
static const ctl_named_node_t *
|
||||||
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
|
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||||
{
|
{
|
||||||
@ -1936,7 +1936,8 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
|
|||||||
}
|
}
|
||||||
|
|
||||||
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
|
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
|
||||||
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t)
|
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
|
||||||
|
size_t)
|
||||||
static const ctl_named_node_t *
|
static const ctl_named_node_t *
|
||||||
arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
|
arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||||
{
|
{
|
||||||
|
@ -1396,7 +1396,7 @@ malloc_init_hard_finish(void)
|
|||||||
* machinery will fail to allocate memory at far lower limits.
|
* machinery will fail to allocate memory at far lower limits.
|
||||||
*/
|
*/
|
||||||
if (narenas_auto > chunksize / sizeof(arena_t *)) {
|
if (narenas_auto > chunksize / sizeof(arena_t *)) {
|
||||||
narenas_auto = chunksize / sizeof(arena_t *);
|
narenas_auto = (unsigned)(chunksize / sizeof(arena_t *));
|
||||||
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
|
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
|
||||||
narenas_auto);
|
narenas_auto);
|
||||||
}
|
}
|
||||||
|
@ -461,7 +461,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
|||||||
elm = tcaches_avail;
|
elm = tcaches_avail;
|
||||||
tcaches_avail = tcaches_avail->next;
|
tcaches_avail = tcaches_avail->next;
|
||||||
elm->tcache = tcache;
|
elm->tcache = tcache;
|
||||||
*r_ind = elm - tcaches;
|
*r_ind = (unsigned)(elm - tcaches);
|
||||||
} else {
|
} else {
|
||||||
elm = &tcaches[tcaches_past];
|
elm = &tcaches[tcaches_past];
|
||||||
elm->tcache = tcache;
|
elm->tcache = tcache;
|
||||||
|
@ -581,7 +581,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
|||||||
str[i] = '\0';
|
str[i] = '\0';
|
||||||
else
|
else
|
||||||
str[size - 1] = '\0';
|
str[size - 1] = '\0';
|
||||||
ret = i;
|
assert(i < INT_MAX);
|
||||||
|
ret = (int)i;
|
||||||
|
|
||||||
#undef APPEND_C
|
#undef APPEND_C
|
||||||
#undef APPEND_S
|
#undef APPEND_S
|
||||||
|
@ -138,22 +138,22 @@ TEST_END
|
|||||||
TEST_BEGIN(test_lg_align_and_zero)
|
TEST_BEGIN(test_lg_align_and_zero)
|
||||||
{
|
{
|
||||||
void *p, *q;
|
void *p, *q;
|
||||||
size_t lg_align, sz;
|
unsigned lg_align;
|
||||||
|
size_t sz;
|
||||||
#define MAX_LG_ALIGN 25
|
#define MAX_LG_ALIGN 25
|
||||||
#define MAX_VALIDATE (ZU(1) << 22)
|
#define MAX_VALIDATE (ZU(1) << 22)
|
||||||
|
|
||||||
lg_align = ZU(0);
|
lg_align = 0;
|
||||||
p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
|
p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
|
|
||||||
for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
|
for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
|
||||||
q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
|
q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
|
||||||
assert_ptr_not_null(q,
|
assert_ptr_not_null(q,
|
||||||
"Unexpected rallocx() error for lg_align=%zu", lg_align);
|
"Unexpected rallocx() error for lg_align=%u", lg_align);
|
||||||
assert_ptr_null(
|
assert_ptr_null(
|
||||||
(void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
|
(void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
|
||||||
"%p inadequately aligned for lg_align=%zu",
|
"%p inadequately aligned for lg_align=%u", q, lg_align);
|
||||||
q, lg_align);
|
|
||||||
sz = sallocx(q, 0);
|
sz = sallocx(q, 0);
|
||||||
if ((sz << 1) <= MAX_VALIDATE) {
|
if ((sz << 1) <= MAX_VALIDATE) {
|
||||||
assert_false(validate_fill(q, 0, 0, sz),
|
assert_false(validate_fill(q, 0, 0, sz),
|
||||||
|
@ -35,7 +35,7 @@ typedef enum {
|
|||||||
hash_variant_x64_128
|
hash_variant_x64_128
|
||||||
} hash_variant_t;
|
} hash_variant_t;
|
||||||
|
|
||||||
static size_t
|
static int
|
||||||
hash_variant_bits(hash_variant_t variant)
|
hash_variant_bits(hash_variant_t variant)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ hash_variant_string(hash_variant_t variant)
|
|||||||
static void
|
static void
|
||||||
hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
|
hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
|
||||||
{
|
{
|
||||||
const size_t hashbytes = hash_variant_bits(variant) / 8;
|
const int hashbytes = hash_variant_bits(variant) / 8;
|
||||||
VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256);
|
VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256);
|
||||||
VARIABLE_ARRAY(uint8_t, final, hashbytes);
|
VARIABLE_ARRAY(uint8_t, final, hashbytes);
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
Loading…
Reference in New Issue
Block a user