Fix all optimization-inhibiting integer-to-pointer casts

Following from PR #2481, we replace all integer-to-pointer casts [which
hide pointer provenance information (and thus inhibit
optimizations)](https://clang.llvm.org/extra/clang-tidy/checks/performance/no-int-to-ptr.html)
with equivalent operations that preserve this information. I have
enabled the corresponding clang-tidy check in our static analysis CI so
that we do not get bitten by this again in the future.
This commit is contained in:
Kevin Svetlitski 2023-07-24 10:33:36 -07:00 committed by Qi Wang
parent 4827bb17bd
commit 3e82f357bb
27 changed files with 116 additions and 66 deletions

View File

@ -513,7 +513,7 @@ arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
}
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
lg_range);
edata->e_addr = (void *)((uintptr_t)edata->e_addr +
edata->e_addr = (void *)((byte_t *)edata->e_addr +
random_offset);
assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
edata->e_addr);
@ -599,7 +599,7 @@ arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
static inline bin_t *
arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) {
bin_t *shard0 = (bin_t *)((uintptr_t)arena + arena_bin_offsets[binind]);
bin_t *shard0 = (bin_t *)((byte_t *)arena + arena_bin_offsets[binind]);
return shard0 + binshard;
}

View File

@ -247,7 +247,7 @@ static inline void **
cache_bin_empty_position_get(cache_bin_t *bin) {
cache_bin_sz_t diff = cache_bin_diff(bin,
(uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty);
uintptr_t empty_bits = (uintptr_t)bin->stack_head + diff;
byte_t *empty_bits = (byte_t *)bin->stack_head + diff;
void **ret = (void **)empty_bits;
assert(ret >= bin->stack_head);
@ -479,7 +479,7 @@ cache_bin_stash(cache_bin_t *bin, void *ptr) {
uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head;
/* Wraparound handled as well. */
uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head);
*(void **)((uintptr_t)bin->stack_head - diff) = ptr;
*(void **)((byte_t *)bin->stack_head - diff) = ptr;
assert(!cache_bin_full(bin));
bin->low_bits_full += sizeof(void *);

View File

@ -377,18 +377,18 @@ edata_ps_get(const edata_t *edata) {
static inline void *
edata_before_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
return (void *)((byte_t *)edata_base_get(edata) - PAGE);
}
static inline void *
edata_last_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) +
return (void *)((byte_t *)edata_base_get(edata) +
edata_size_get(edata) - PAGE);
}
static inline void *
edata_past_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) +
return (void *)((byte_t *)edata_base_get(edata) +
edata_size_get(edata));
}

View File

@ -105,4 +105,21 @@ isblank(int c) {
# undef small
#endif
/*
* Oftentimes we'd like to perform some kind of arithmetic to obtain
* a pointer from another pointer but with some offset or mask applied.
* Naively you would accomplish this by casting the source pointer to
* `uintptr_t`, performing all of the relevant arithmetic, and then casting
* the result to the desired pointer type. However, this has the unfortunate
* side-effect of concealing pointer provenance, hiding useful information for
* optimization from the compiler (see here for details:
* https://clang.llvm.org/extra/clang-tidy/checks/performance/no-int-to-ptr.html
* )
* Instead what one should do is cast the source pointer to `char *` and perform
* the equivalent arithmetic (since `char` of course represents one byte). But
* because `char *` has the semantic meaning of "string", we define this typedef
* simply to make it clearer where we are performing such pointer arithmetic.
*/
typedef char byte_t;
#endif /* JEMALLOC_INTERNAL_H */

View File

@ -99,7 +99,8 @@ typedef enum malloc_init_e malloc_init_t;
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
((void *)(((byte_t *)(a)) - (((uintptr_t)(a)) - \
((uintptr_t)(a) & ((~(alignment)) + 1)))))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
@ -109,6 +110,19 @@ typedef enum malloc_init_e malloc_init_t;
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
/*
* Return the nearest aligned address at or above a.
*
* While at first glance this would appear to be merely a more complicated
* way to perform the same computation as `ALIGNMENT_CEILING`,
* this has the important additional property of not concealing pointer
* provenance from the compiler. See the block-comment on the
* definition of `byte_t` for more details.
*/
#define ALIGNMENT_ADDR2CEILING(a, alignment) \
((void *)(((byte_t *)(a)) + (((((uintptr_t)(a)) + \
(alignment - 1)) & ((~(alignment)) + 1)) - ((uintptr_t)(a)))))
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L || defined(__STDC_NO_VLA__)
# ifdef _MSC_VER

View File

@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
extern size_t os_page;
@ -14,7 +15,7 @@ extern size_t os_page;
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the page base address for the page containing address a. */
#define PAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~PAGE_MASK))
ALIGNMENT_ADDR2BASE(a, PAGE)
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
@ -41,7 +42,7 @@ extern size_t os_page;
/* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
ALIGNMENT_ADDR2BASE(a, HUGEPAGE)
/* Return the smallest pagesize multiple that is >= s. */
#define HUGEPAGE_CEILING(s) \
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)

View File

@ -88,6 +88,7 @@ typedef struct prof_recent_s prof_recent_t;
#define PROF_SAMPLE_ALIGNMENT PAGE
#define PROF_SAMPLE_ALIGNMENT_MASK PAGE_MASK
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
#define PROF_TCTX_SENTINEL ((prof_tctx_t *)((uintptr_t)1U))
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */

View File

@ -226,9 +226,11 @@ rtree_leaf_elm_bits_decode(uintptr_t bits) {
uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
/* Mask off metadata. */
uintptr_t mask = high_bit_mask & low_bit_mask;
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
contents.edata = (edata_t *)(bits & mask);
# else
/* Restore sign-extended high bits, mask metadata bits. */
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB)
>> RTREE_NHIB) & low_bit_mask);
# endif
@ -270,6 +272,7 @@ JEMALLOC_ALWAYS_INLINE void
rtree_contents_encode(rtree_contents_t contents, void **bits,
unsigned *additional) {
#ifdef RTREE_LEAF_COMPACT
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
*bits = (void *)rtree_leaf_elm_bits_encode(contents);
/* Suppress spurious warning from static analysis */
if (config_debug) {
@ -320,8 +323,10 @@ rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree,
/* dependent */ true);
bits &= ~RTREE_LEAF_STATE_MASK;
bits |= state << RTREE_LEAF_STATE_SHIFT;
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
atomic_store_p(&elm1->le_bits, (void *)bits, ATOMIC_RELEASE);
if (elm2 != NULL) {
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
atomic_store_p(&elm2->le_bits, (void *)bits, ATOMIC_RELEASE);
}
#else

View File

@ -31,7 +31,7 @@ compute_redzone_end(const void *_ptr, size_t usize, size_t bumped_usize) {
const unsigned char *redzone_end = usize + REDZONE_SIZE < bumped_usize ?
&ptr[usize + REDZONE_SIZE] : &ptr[bumped_usize];
const unsigned char *page_end = (const unsigned char *)
ALIGNMENT_CEILING(((uintptr_t) (&ptr[usize])), os_page);
ALIGNMENT_ADDR2CEILING(&ptr[usize], os_page);
return redzone_end < page_end ? redzone_end : page_end;
}

View File

@ -140,7 +140,7 @@ san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid,
*first = ptr;
*mid = (void *)((uintptr_t)ptr + ((usize >> 1) & ~(ptr_sz - 1)));
*mid = (void *)((byte_t *)ptr + ((usize >> 1) & ~(ptr_sz - 1)));
assert(*first != *mid || usize == ptr_sz);
assert((uintptr_t)*first <= (uintptr_t)*mid);
@ -151,7 +151,7 @@ san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid,
* default the tcache only goes up to the 32K size class, and is usually
* tuned lower instead of higher, which makes it less of a concern.
*/
*last = (void *)((uintptr_t)ptr + usize - sizeof(uaf_detect_junk));
*last = (void *)((byte_t *)ptr + usize - sizeof(uaf_detect_junk));
assert(*first != *last || usize == ptr_sz);
assert(*mid != *last || usize <= ptr_sz * 2);
assert((uintptr_t)*mid <= (uintptr_t)*last);

View File

@ -16,6 +16,7 @@ typedef struct tcaches_s tcaches_t;
#define TCACHE_ENABLED_ZERO_INITIALIZER false
/* Used for explicit tcache only. Means flushed but not destroyed. */
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
#define TCACHE_LG_MAXCLASS_LIMIT 23 /* tcache_maxclass = 8M */

View File

@ -110,14 +110,14 @@ util_prefetch_write(void *ptr) {
JEMALLOC_ALWAYS_INLINE void
util_prefetch_read_range(void *ptr, size_t sz) {
for (size_t i = 0; i < sz; i += CACHELINE) {
util_prefetch_read((void *)((uintptr_t)ptr + i));
util_prefetch_read((void *)((byte_t *)ptr + i));
}
}
JEMALLOC_ALWAYS_INLINE void
util_prefetch_write_range(void *ptr, size_t sz) {
for (size_t i = 0; i < sz; i += CACHELINE) {
util_prefetch_write((void *)((uintptr_t)ptr + i));
util_prefetch_write((void *)((byte_t *)ptr + i));
}
}

View File

@ -44,7 +44,8 @@ echo '-**/stdlib.h' > "$skipfile"
CC_ANALYZERS_FROM_PATH=1 CodeChecker analyze compile_commands.json --jobs "$(nproc)" \
--ctu --compile-uniqueing strict --output static_analysis_raw_results \
--analyzers clangsa clang-tidy --skip "$skipfile" \
--enable readability-inconsistent-declaration-parameter-name
--enable readability-inconsistent-declaration-parameter-name \
--enable performance-no-int-to-ptr
# `--enable` is additive, the vast majority of the checks we want are
# enabled by default.

View File

@ -236,7 +236,7 @@ arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
ret = (void *)((uintptr_t)edata_addr_get(slab) +
ret = (void *)((byte_t *)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
edata_nfree_dec(slab);
return ret;
@ -280,6 +280,7 @@ arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
while (pop--) {
size_t bit = cfs_lu(&g);
size_t regind = shift + bit;
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
*(ptrs + i) = (void *)(base + regsize * regind);
i++;

View File

@ -368,6 +368,7 @@ check_background_thread_creation(tsd_t *tsd,
pre_reentrancy(tsd, NULL);
int err = background_thread_create_signals_masked(&info->thread,
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
NULL, background_thread_entry, (void *)(uintptr_t)i);
post_reentrancy(tsd);
@ -540,6 +541,7 @@ background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
* background threads with the underlying pthread_create.
*/
int err = background_thread_create_signals_masked(&info->thread, NULL,
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
background_thread_entry, (void *)thread_ind);
post_reentrancy(tsd);

View File

@ -181,9 +181,9 @@ base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
*gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
alignment) - (uintptr_t)edata_addr_get(edata);
ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
ret = (void *)((byte_t *)edata_addr_get(edata) + *gap_size);
assert(edata_bsize_get(edata) >= *gap_size + size);
edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
edata_binit(edata, (void *)((byte_t *)edata_addr_get(edata) +
*gap_size + size), edata_bsize_get(edata) - *gap_size - size,
edata_sn_get(edata));
return ret;
@ -291,7 +291,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
block->next = NULL;
assert(block_size >= header_size);
base_edata_init(extent_sn_next, &block->edata,
(void *)((uintptr_t)block + header_size), block_size - header_size);
(void *)((byte_t *)block + header_size), block_size - header_size);
return block;
}

View File

@ -50,7 +50,7 @@ cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0);
}
*(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
*(uintptr_t *)((byte_t *)alloc + *cur_offset) =
cache_bin_preceding_junk;
*cur_offset += sizeof(void *);
}
@ -58,7 +58,7 @@ cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
void
cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
size_t *cur_offset) {
*(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
*(uintptr_t *)((byte_t *)alloc + *cur_offset) =
cache_bin_trailing_junk;
*cur_offset += sizeof(void *);
}
@ -71,12 +71,12 @@ cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
* will access the slots toward higher addresses (for the benefit of
* adjacent prefetch).
*/
void *stack_cur = (void *)((uintptr_t)alloc + *cur_offset);
void *stack_cur = (void *)((byte_t *)alloc + *cur_offset);
void *full_position = stack_cur;
uint16_t bin_stack_size = info->ncached_max * sizeof(void *);
*cur_offset += bin_stack_size;
void *empty_position = (void *)((uintptr_t)alloc + *cur_offset);
void *empty_position = (void *)((byte_t *)alloc + *cur_offset);
/* Init to the empty position. */
bin->stack_head = (void **)empty_position;

View File

@ -100,7 +100,7 @@ ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool
ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
return pages_commit((void *)((byte_t *)addr + (uintptr_t)offset),
length);
}
@ -112,7 +112,7 @@ ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool
ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
return pages_decommit((void *)((byte_t *)addr + (uintptr_t)offset),
length);
}
@ -125,7 +125,7 @@ ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
#ifdef PAGES_CAN_PURGE_LAZY
bool
ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
return pages_purge_lazy((void *)((byte_t *)addr + (uintptr_t)offset),
length);
}
@ -143,7 +143,7 @@ ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
#ifdef PAGES_CAN_PURGE_FORCED
bool
ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
return pages_purge_forced((void *)((uintptr_t)addr +
return pages_purge_forced((void *)((byte_t *)addr +
(uintptr_t)offset), length);
}

View File

@ -743,7 +743,7 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* A successful commit should return zeroed memory. */
if (config_debug) {
void *addr = edata_addr_get(edata);
size_t *p = (size_t *)(uintptr_t)addr;
size_t *p = (size_t *)addr;
/* Check the first page only. */
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
assert(p[i] == 0);
@ -1199,7 +1199,7 @@ extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
edata_init(trail, edata_arena_ind_get(edata),
(void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
(void *)((byte_t *)edata_base_get(edata) + size_a), size_b,
/* slab */ false, SC_NSIZES, edata_sn_get(edata),
edata_state_get(edata), edata_zeroed_get(edata),
edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD);

View File

@ -8,6 +8,7 @@
/******************************************************************************/
/* Data. */
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
#define SBRK_INVALID ((void *)-1)
const char *opt_dss = DSS_DEFAULT;
@ -149,10 +150,10 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
void *gap_addr_page = (void *)(PAGE_CEILING(
(uintptr_t)max_cur));
void *ret = (void *)ALIGNMENT_CEILING(
(uintptr_t)gap_addr_page, alignment);
void *gap_addr_page = ALIGNMENT_ADDR2CEILING(max_cur,
PAGE);
void *ret = ALIGNMENT_ADDR2CEILING(
gap_addr_page, alignment);
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
@ -167,7 +168,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
* Compute the address just past the end of the desired
* allocation space.
*/
void *dss_next = (void *)((uintptr_t)ret + size);
void *dss_next = (void *)((byte_t *)ret + size);
if ((uintptr_t)ret < (uintptr_t)max_cur ||
(uintptr_t)dss_next < (uintptr_t)max_cur) {
goto label_oom; /* Wrap-around. */

View File

@ -130,7 +130,7 @@ hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) {
hpdata_assert_consistent(hpdata);
return (void *)(
(uintptr_t)hpdata_addr_get(hpdata) + (result << LG_PAGE));
(byte_t *)hpdata_addr_get(hpdata) + (result << LG_PAGE));
}
void
@ -277,7 +277,7 @@ hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
}
*r_purge_addr = (void *)(
(uintptr_t)hpdata_addr_get(hpdata) + purge_begin * PAGE);
(byte_t *)hpdata_addr_get(hpdata) + purge_begin * PAGE);
*r_purge_size = purge_len * PAGE;
purge_state->next_purge_search_begin = purge_begin + purge_len;

View File

@ -3446,7 +3446,7 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize
&& !zero) {
size_t excess_len = usize - old_usize;
void *excess_start = (void *)((uintptr_t)p + old_usize);
void *excess_start = (void *)((byte_t *)p + old_usize);
junk_alloc_callback(excess_start, excess_len);
}
@ -3716,7 +3716,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize &&
!zero) {
size_t excess_len = usize - old_usize;
void *excess_start = (void *)((uintptr_t)ptr + old_usize);
void *excess_start = (void *)((byte_t *)ptr + old_usize);
junk_alloc_callback(excess_start, excess_len);
}
label_not_resized:

View File

@ -113,10 +113,10 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
* of CACHELINE in [0 .. PAGE).
*/
void *zbase = (void *)
((uintptr_t)edata_addr_get(edata) + old_usize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
((byte_t *)edata_addr_get(edata) + old_usize);
void *zpast = PAGE_ADDR2BASE((void *)((byte_t *)zbase +
PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
size_t nzero = (byte_t *)zpast - (byte_t *)zbase;
assert(nzero > 0);
memset(zbase, 0, nzero);
}

View File

@ -197,7 +197,7 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
static void *
os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
bool *commit) {
void *ret = (void *)((uintptr_t)addr + leadsize);
void *ret = (void *)((byte_t *)addr + leadsize);
assert(alloc_size >= leadsize + size);
#ifdef _WIN32
@ -217,7 +217,7 @@ os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
os_pages_unmap(addr, leadsize);
}
if (trailsize != 0) {
os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
os_pages_unmap((void *)((byte_t *)ret + size), trailsize);
}
return ret;
#endif

View File

@ -85,8 +85,10 @@ prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
return ret;
}
/* NOLINTBEGIN(performance-no-int-to-ptr) */
rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
tctx_link, prof_tctx_comp)
/* NOLINTEND(performance-no-int-to-ptr) */
static int
prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
@ -100,8 +102,10 @@ prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
return ret;
}
/* NOLINTBEGIN(performance-no-int-to-ptr) */
rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
prof_gctx_comp)
/* NOLINTEND(performance-no-int-to-ptr) */
static int
prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
@ -119,8 +123,10 @@ prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
return ret;
}
/* NOLINTBEGIN(performance-no-int-to-ptr) */
rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
prof_tdata_comp)
/* NOLINTEND(performance-no-int-to-ptr) */
/******************************************************************************/
@ -1141,7 +1147,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
return NULL;
}
tdata->vec = (void **)((uintptr_t)tdata + tdata_sz);
tdata->vec = (void **)((byte_t *)tdata + tdata_sz);
tdata->lock = prof_tdata_mutex_choose(thr_uid);
tdata->thr_uid = thr_uid;
tdata->thr_discrim = thr_discrim;

View File

@ -20,43 +20,43 @@ ssize_t opt_lg_san_uaf_align = SAN_LG_UAF_ALIGN_DEFAULT;
uintptr_t san_cache_bin_nonfast_mask = SAN_CACHE_BIN_NONFAST_MASK_DEFAULT;
static inline void
san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
uintptr_t *addr, size_t size, bool left, bool right) {
san_find_guarded_addr(edata_t *edata, void **guard1, void **guard2,
void **addr, size_t size, bool left, bool right) {
assert(!edata_guarded_get(edata));
assert(size % PAGE == 0);
*addr = (uintptr_t)edata_base_get(edata);
*addr = edata_base_get(edata);
if (left) {
*guard1 = *addr;
*addr += SAN_PAGE_GUARD;
*addr = ((byte_t *)*addr) + SAN_PAGE_GUARD;
} else {
*guard1 = 0;
*guard1 = NULL;
}
if (right) {
*guard2 = *addr + size;
*guard2 = ((byte_t *)*addr) + size;
} else {
*guard2 = 0;
*guard2 = NULL;
}
}
static inline void
san_find_unguarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
uintptr_t *addr, size_t size, bool left, bool right) {
san_find_unguarded_addr(edata_t *edata, void **guard1, void **guard2,
void **addr, size_t size, bool left, bool right) {
assert(edata_guarded_get(edata));
assert(size % PAGE == 0);
*addr = (uintptr_t)edata_base_get(edata);
*addr = edata_base_get(edata);
if (right) {
*guard2 = *addr + size;
*guard2 = ((byte_t *)*addr) + size;
} else {
*guard2 = 0;
*guard2 = NULL;
}
if (left) {
*guard1 = *addr - SAN_PAGE_GUARD;
assert(*guard1 != 0);
*guard1 = ((byte_t *)*addr) - SAN_PAGE_GUARD;
assert(*guard1 != NULL);
*addr = *guard1;
} else {
*guard1 = 0;
*guard1 = NULL;
}
}
@ -73,16 +73,16 @@ san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap,
? san_two_side_unguarded_sz(size_with_guards)
: san_one_side_unguarded_sz(size_with_guards);
uintptr_t guard1, guard2, addr;
void *guard1, *guard2, *addr;
san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left,
right);
assert(edata_state_get(edata) == extent_state_active);
ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2);
ehooks_guard(tsdn, ehooks, guard1, guard2);
/* Update the guarded addr and usable size of the edata. */
edata_size_set(edata, usize);
edata_addr_set(edata, (void *)addr);
edata_addr_set(edata, addr);
edata_guarded_set(edata, true);
if (remap) {
@ -108,7 +108,7 @@ san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
? san_two_side_guarded_sz(size)
: san_one_side_guarded_sz(size);
uintptr_t guard1, guard2, addr;
void *guard1, *guard2, *addr;
san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left,
right);

View File

@ -768,9 +768,9 @@ tcache_create_explicit(tsd_t *tsd) {
if (mem == NULL) {
return NULL;
}
tcache_t *tcache = (void *)((uintptr_t)mem + tcache_bin_alloc_size);
tcache_t *tcache = (void *)((byte_t *)mem + tcache_bin_alloc_size);
tcache_slow_t *tcache_slow =
(void *)((uintptr_t)mem + tcache_bin_alloc_size + sizeof(tcache_t));
(void *)((byte_t *)mem + tcache_bin_alloc_size + sizeof(tcache_t));
tcache_init(tsd, tcache_slow, tcache, mem);
tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,