Get rid of most of the various inline macros.
This commit is contained in:
committed by
David Goldblatt
parent
7d86c92c61
commit
4d2e4bf5eb
@@ -359,7 +359,7 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void *
|
||||
static void *
|
||||
arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
|
||||
const arena_bin_info_t *bin_info) {
|
||||
void *ret;
|
||||
@@ -377,7 +377,7 @@ arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
|
||||
}
|
||||
|
||||
#ifndef JEMALLOC_JET
|
||||
JEMALLOC_INLINE_C
|
||||
static
|
||||
#endif
|
||||
size_t
|
||||
arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
|
||||
@@ -414,7 +414,7 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
|
||||
return regind;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void
|
||||
static void
|
||||
arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
|
||||
arena_slab_data_t *slab_data, void *ptr) {
|
||||
szind_t binind = extent_szind_get(slab);
|
||||
|
12
src/ckh.c
12
src/ckh.c
@@ -54,7 +54,7 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
|
||||
* Search bucket for key and return the cell number if found; SIZE_T_MAX
|
||||
* otherwise.
|
||||
*/
|
||||
JEMALLOC_INLINE_C size_t
|
||||
static size_t
|
||||
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
|
||||
ckhc_t *cell;
|
||||
unsigned i;
|
||||
@@ -72,7 +72,7 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
|
||||
/*
|
||||
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
|
||||
*/
|
||||
JEMALLOC_INLINE_C size_t
|
||||
static size_t
|
||||
ckh_isearch(ckh_t *ckh, const void *key) {
|
||||
size_t hashes[2], bucket, cell;
|
||||
|
||||
@@ -93,7 +93,7 @@ ckh_isearch(ckh_t *ckh, const void *key) {
|
||||
return cell;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C bool
|
||||
static bool
|
||||
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
const void *data) {
|
||||
ckhc_t *cell;
|
||||
@@ -125,7 +125,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
* eviction/relocation procedure until either success or detection of an
|
||||
* eviction/relocation bucket cycle.
|
||||
*/
|
||||
JEMALLOC_INLINE_C bool
|
||||
static bool
|
||||
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
void const **argdata) {
|
||||
const void *key, *data, *tkey, *tdata;
|
||||
@@ -196,7 +196,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C bool
|
||||
static bool
|
||||
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
|
||||
size_t hashes[2], bucket;
|
||||
const void *key = *argkey;
|
||||
@@ -226,7 +226,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
|
||||
* Try to rebuild the hash table from scratch by inserting all items from the
|
||||
* old table into the new.
|
||||
*/
|
||||
JEMALLOC_INLINE_C bool
|
||||
static bool
|
||||
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
|
||||
size_t count, i, nins;
|
||||
const void *key, *data;
|
||||
|
@@ -21,19 +21,19 @@ static ctl_arenas_t *ctl_arenas;
|
||||
/******************************************************************************/
|
||||
/* Helpers for named and indexed nodes. */
|
||||
|
||||
JEMALLOC_INLINE_C const ctl_named_node_t *
|
||||
static const ctl_named_node_t *
|
||||
ctl_named_node(const ctl_node_t *node) {
|
||||
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C const ctl_named_node_t *
|
||||
static const ctl_named_node_t *
|
||||
ctl_named_children(const ctl_named_node_t *node, size_t index) {
|
||||
const ctl_named_node_t *children = ctl_named_node(node->children);
|
||||
|
||||
return (children ? &children[index] : NULL);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C const ctl_indexed_node_t *
|
||||
static const ctl_indexed_node_t *
|
||||
ctl_indexed_node(const ctl_node_t *node) {
|
||||
return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
|
||||
}
|
||||
|
@@ -288,7 +288,7 @@ malloc_initialized(void) {
|
||||
return (malloc_init_state == malloc_init_initialized);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C bool
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
malloc_init_a0(void) {
|
||||
if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
|
||||
return malloc_init_hard_a0();
|
||||
@@ -296,7 +296,7 @@ malloc_init_a0(void) {
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C bool
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
malloc_init(void) {
|
||||
if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
|
||||
return true;
|
||||
@@ -1490,7 +1490,7 @@ struct static_opts_s {
|
||||
bool slow;
|
||||
};
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
static_opts_init(static_opts_t *static_opts) {
|
||||
static_opts->may_overflow = false;
|
||||
static_opts->bump_empty_alloc = false;
|
||||
@@ -1523,7 +1523,7 @@ struct dynamic_opts_s {
|
||||
unsigned arena_ind;
|
||||
};
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
|
||||
dynamic_opts->result = NULL;
|
||||
dynamic_opts->num_items = 0;
|
||||
@@ -1535,7 +1535,7 @@ dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
|
||||
}
|
||||
|
||||
/* ind is ignored if dopts->alignment > 0. */
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
||||
size_t size, size_t usize, szind_t ind) {
|
||||
tcache_t *tcache;
|
||||
@@ -1577,7 +1577,7 @@ imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
||||
arena, sopts->slow);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
||||
size_t usize, szind_t ind) {
|
||||
void *ret;
|
||||
@@ -1611,7 +1611,7 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
||||
* Returns true if the allocation will overflow, and false otherwise. Sets
|
||||
* *size to the product either way.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE_C bool
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
|
||||
size_t *size) {
|
||||
/*
|
||||
@@ -1649,7 +1649,7 @@ compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
|
||||
return true;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C int
|
||||
JEMALLOC_ALWAYS_INLINE int
|
||||
imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
|
||||
/* Where the actual allocated memory will live. */
|
||||
void *allocation = NULL;
|
||||
@@ -1850,7 +1850,7 @@ label_invalid_alignment:
|
||||
}
|
||||
|
||||
/* Returns the errno-style error code of the allocation. */
|
||||
JEMALLOC_ALWAYS_INLINE_C int
|
||||
JEMALLOC_ALWAYS_INLINE int
|
||||
imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
|
||||
if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
|
||||
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
||||
@@ -2011,7 +2011,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
|
||||
return p;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
|
||||
alloc_ctx_t *alloc_ctx) {
|
||||
void *p;
|
||||
@@ -2036,7 +2036,7 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
|
||||
return p;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
||||
assert(slow_path || tsd_assert_fast(tsd));
|
||||
if (tsd_reentrancy_level_get(tsd) == 0) {
|
||||
@@ -2074,7 +2074,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
||||
assert(slow_path || tsd_assert_fast(tsd));
|
||||
if (tsd_reentrancy_level_get(tsd) == 0) {
|
||||
@@ -2403,7 +2403,7 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
|
||||
return p;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
||||
size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
|
||||
arena_t *arena, alloc_ctx_t *alloc_ctx) {
|
||||
@@ -2528,7 +2528,7 @@ label_oom:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C size_t
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero) {
|
||||
size_t usize;
|
||||
@@ -2555,7 +2555,7 @@ ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
|
||||
return usize;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C size_t
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
|
||||
size_t usize_max, usize;
|
||||
@@ -2727,7 +2727,7 @@ je_dallocx(void *ptr, int flags) {
|
||||
witness_assert_lockless(tsd_tsdn(tsd));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C size_t
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
inallocx(tsdn_t *tsdn, size_t size, int flags) {
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
|
@@ -40,7 +40,6 @@ void operator delete[](void *ptr, std::size_t size) noexcept;
|
||||
#endif
|
||||
|
||||
template <bool IsNoExcept>
|
||||
JEMALLOC_INLINE
|
||||
void *
|
||||
newImpl(std::size_t size) noexcept(IsNoExcept) {
|
||||
void *ptr = je_malloc(size);
|
||||
|
12
src/prof.c
12
src/prof.c
@@ -145,7 +145,7 @@ static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
|
||||
/******************************************************************************/
|
||||
/* Red-black trees. */
|
||||
|
||||
JEMALLOC_INLINE_C int
|
||||
static int
|
||||
prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
|
||||
uint64_t a_thr_uid = a->thr_uid;
|
||||
uint64_t b_thr_uid = b->thr_uid;
|
||||
@@ -168,7 +168,7 @@ prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
|
||||
rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
|
||||
tctx_link, prof_tctx_comp)
|
||||
|
||||
JEMALLOC_INLINE_C int
|
||||
static int
|
||||
prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
|
||||
unsigned a_len = a->bt.len;
|
||||
unsigned b_len = b->bt.len;
|
||||
@@ -183,7 +183,7 @@ prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
|
||||
rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
|
||||
prof_gctx_comp)
|
||||
|
||||
JEMALLOC_INLINE_C int
|
||||
static int
|
||||
prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
|
||||
int ret;
|
||||
uint64_t a_uid = a->thr_uid;
|
||||
@@ -273,7 +273,7 @@ bt_init(prof_bt_t *bt, void **vec) {
|
||||
bt->len = 0;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void
|
||||
static void
|
||||
prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
|
||||
cassert(config_prof);
|
||||
assert(tdata == prof_tdata_get(tsd, false));
|
||||
@@ -286,7 +286,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void
|
||||
static void
|
||||
prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
|
||||
cassert(config_prof);
|
||||
assert(tdata == prof_tdata_get(tsd, false));
|
||||
@@ -1884,7 +1884,7 @@ prof_bt_keycomp(const void *k1, const void *k2) {
|
||||
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C uint64_t
|
||||
static uint64_t
|
||||
prof_thr_uid_alloc(tsdn_t *tsdn) {
|
||||
uint64_t thr_uid;
|
||||
|
||||
|
Reference in New Issue
Block a user