Get rid of most of the various inline macros.
This commit is contained in:
parent
7d86c92c61
commit
4d2e4bf5eb
@ -1,38 +1,27 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
||||||
#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
static inline unsigned
|
||||||
unsigned arena_ind_get(const arena_t *arena);
|
|
||||||
void arena_internal_add(arena_t *arena, size_t size);
|
|
||||||
void arena_internal_sub(arena_t *arena, size_t size);
|
|
||||||
size_t arena_internal_get(arena_t *arena);
|
|
||||||
bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
|
|
||||||
void percpu_arena_update(tsd_t *tsd, unsigned cpu);
|
|
||||||
#endif /* JEMALLOC_ENABLE_INLINE */
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
|
||||||
|
|
||||||
JEMALLOC_INLINE unsigned
|
|
||||||
arena_ind_get(const arena_t *arena) {
|
arena_ind_get(const arena_t *arena) {
|
||||||
return base_ind_get(arena->base);
|
return base_ind_get(arena->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
arena_internal_add(arena_t *arena, size_t size) {
|
arena_internal_add(arena_t *arena, size_t size) {
|
||||||
atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
arena_internal_sub(arena_t *arena, size_t size) {
|
arena_internal_sub(arena_t *arena, size_t size) {
|
||||||
atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
arena_internal_get(arena_t *arena) {
|
arena_internal_get(arena_t *arena) {
|
||||||
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
|
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
|
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
|
|
||||||
@ -43,7 +32,7 @@ arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
|
|||||||
return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
|
return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
|
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
|
||||||
assert(have_percpu_arena);
|
assert(have_percpu_arena);
|
||||||
arena_t *oldarena = tsd_arena_get(tsd);
|
arena_t *oldarena = tsd_arena_get(tsd);
|
||||||
@ -65,6 +54,4 @@ percpu_arena_update(tsd_t *tsd, unsigned cpu) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) */
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
|
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
|
||||||
|
@ -3,30 +3,7 @@
|
|||||||
|
|
||||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
static inline szind_t
|
||||||
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
|
||||||
prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr,
|
|
||||||
alloc_ctx_t *ctx);
|
|
||||||
void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
|
||||||
alloc_ctx_t *ctx, prof_tctx_t *tctx);
|
|
||||||
void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
|
|
||||||
void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
|
|
||||||
void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
|
|
||||||
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
|
||||||
bool zero, tcache_t *tcache, bool slow_path);
|
|
||||||
arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
|
|
||||||
size_t arena_salloc(tsdn_t *tsdn, const void *ptr);
|
|
||||||
size_t arena_vsalloc(tsdn_t *tsdn, const void *ptr);
|
|
||||||
void arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr);
|
|
||||||
void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|
||||||
alloc_ctx_t *alloc_ctx, bool slow_path);
|
|
||||||
void arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size);
|
|
||||||
void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
|
||||||
alloc_ctx_t *alloc_ctx, bool slow_path);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
|
||||||
JEMALLOC_INLINE szind_t
|
|
||||||
arena_bin_index(arena_t *arena, arena_bin_t *bin) {
|
arena_bin_index(arena_t *arena, arena_bin_t *bin) {
|
||||||
szind_t binind = (szind_t)(bin - arena->bins);
|
szind_t binind = (szind_t)(bin - arena->bins);
|
||||||
assert(binind < NBINS);
|
assert(binind < NBINS);
|
||||||
@ -71,7 +48,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
@ -182,7 +159,7 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
return index2size(szind);
|
return index2size(szind);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
@ -264,7 +241,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(size <= LARGE_MAXCLASS);
|
assert(size <= LARGE_MAXCLASS);
|
||||||
@ -376,5 +353,4 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) */
|
|
||||||
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
|
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
|
||||||
|
@ -1,15 +1,9 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
|
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
|
||||||
#define JEMALLOC_INTERNAL_BASE_INLINES_H
|
#define JEMALLOC_INTERNAL_BASE_INLINES_H
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
static inline unsigned
|
||||||
unsigned base_ind_get(const base_t *base);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BASE_C_))
|
|
||||||
JEMALLOC_INLINE unsigned
|
|
||||||
base_ind_get(const base_t *base) {
|
base_ind_get(const base_t *base) {
|
||||||
return base->ind;
|
return base->ind;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
|
||||||
|
@ -3,18 +3,7 @@
|
|||||||
|
|
||||||
#include "jemalloc/internal/bit_util.h"
|
#include "jemalloc/internal/bit_util.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
static inline bool
|
||||||
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
|
||||||
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
|
||||||
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
|
||||||
size_t bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo,
|
|
||||||
size_t min_bit);
|
|
||||||
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
|
||||||
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||||
#ifdef BITMAP_USE_TREE
|
#ifdef BITMAP_USE_TREE
|
||||||
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||||
@ -33,7 +22,7 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||||
size_t goff;
|
size_t goff;
|
||||||
bitmap_t g;
|
bitmap_t g;
|
||||||
@ -44,7 +33,7 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
|||||||
return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||||
size_t goff;
|
size_t goff;
|
||||||
bitmap_t *gp;
|
bitmap_t *gp;
|
||||||
@ -80,7 +69,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* ffu: find first unset >= bit. */
|
/* ffu: find first unset >= bit. */
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
|
bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
|
||||||
assert(min_bit < binfo->nbits);
|
assert(min_bit < binfo->nbits);
|
||||||
|
|
||||||
@ -139,7 +128,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* sfu: set first unset. */
|
/* sfu: set first unset. */
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||||
size_t bit;
|
size_t bit;
|
||||||
bitmap_t g;
|
bitmap_t g;
|
||||||
@ -169,7 +158,7 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
|||||||
return bit;
|
return bit;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||||
size_t goff;
|
size_t goff;
|
||||||
bitmap_t *gp;
|
bitmap_t *gp;
|
||||||
@ -208,6 +197,4 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
|||||||
#endif /* BITMAP_USE_TREE */
|
#endif /* BITMAP_USE_TREE */
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_BITMAP_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_BITMAP_INLINES_H */
|
||||||
|
@ -3,64 +3,7 @@
|
|||||||
|
|
||||||
#include "jemalloc/internal/ql.h"
|
#include "jemalloc/internal/ql.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
static inline arena_t *
|
||||||
arena_t *extent_arena_get(const extent_t *extent);
|
|
||||||
szind_t extent_szind_get_maybe_invalid(const extent_t *extent);
|
|
||||||
szind_t extent_szind_get(const extent_t *extent);
|
|
||||||
size_t extent_usize_get(const extent_t *extent);
|
|
||||||
size_t extent_sn_get(const extent_t *extent);
|
|
||||||
extent_state_t extent_state_get(const extent_t *extent);
|
|
||||||
bool extent_zeroed_get(const extent_t *extent);
|
|
||||||
bool extent_committed_get(const extent_t *extent);
|
|
||||||
bool extent_slab_get(const extent_t *extent);
|
|
||||||
unsigned extent_nfree_get(const extent_t *extent);
|
|
||||||
void *extent_base_get(const extent_t *extent);
|
|
||||||
void *extent_addr_get(const extent_t *extent);
|
|
||||||
size_t extent_size_get(const extent_t *extent);
|
|
||||||
size_t extent_esn_get(const extent_t *extent);
|
|
||||||
size_t extent_bsize_get(const extent_t *extent);
|
|
||||||
void *extent_before_get(const extent_t *extent);
|
|
||||||
void *extent_last_get(const extent_t *extent);
|
|
||||||
void *extent_past_get(const extent_t *extent);
|
|
||||||
arena_slab_data_t *extent_slab_data_get(extent_t *extent);
|
|
||||||
const arena_slab_data_t *extent_slab_data_get_const(const extent_t *extent);
|
|
||||||
prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
|
|
||||||
void extent_arena_set(extent_t *extent, arena_t *arena);
|
|
||||||
void extent_addr_set(extent_t *extent, void *addr);
|
|
||||||
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
|
||||||
void extent_size_set(extent_t *extent, size_t size);
|
|
||||||
void extent_esn_set(extent_t *extent, size_t esn);
|
|
||||||
void extent_bsize_set(extent_t *extent, size_t bsize);
|
|
||||||
void extent_szind_set(extent_t *extent, szind_t szind);
|
|
||||||
void extent_nfree_set(extent_t *extent, unsigned nfree);
|
|
||||||
void extent_nfree_inc(extent_t *extent);
|
|
||||||
void extent_nfree_dec(extent_t *extent);
|
|
||||||
void extent_sn_set(extent_t *extent, size_t sn);
|
|
||||||
void extent_state_set(extent_t *extent, extent_state_t state);
|
|
||||||
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
|
||||||
void extent_committed_set(extent_t *extent, bool committed);
|
|
||||||
void extent_slab_set(extent_t *extent, bool slab);
|
|
||||||
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
|
||||||
void extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
|
||||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
|
||||||
bool committed);
|
|
||||||
void extent_binit(extent_t *extent, void *addr, size_t size, size_t sn);
|
|
||||||
void extent_list_init(extent_list_t *list);
|
|
||||||
extent_t *extent_list_first(const extent_list_t *list);
|
|
||||||
extent_t *extent_list_last(const extent_list_t *list);
|
|
||||||
void extent_list_append(extent_list_t *list, extent_t *extent);
|
|
||||||
void extent_list_replace(extent_list_t *list, extent_t *to_remove,
|
|
||||||
extent_t *to_insert);
|
|
||||||
void extent_list_remove(extent_list_t *list, extent_t *extent);
|
|
||||||
int extent_sn_comp(const extent_t *a, const extent_t *b);
|
|
||||||
int extent_esn_comp(const extent_t *a, const extent_t *b);
|
|
||||||
int extent_ad_comp(const extent_t *a, const extent_t *b);
|
|
||||||
int extent_snad_comp(const extent_t *a, const extent_t *b);
|
|
||||||
int extent_esnead_comp(const extent_t *a, const extent_t *b);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
|
||||||
JEMALLOC_INLINE arena_t *
|
|
||||||
extent_arena_get(const extent_t *extent) {
|
extent_arena_get(const extent_t *extent) {
|
||||||
unsigned arena_ind = (unsigned)((extent->e_bits &
|
unsigned arena_ind = (unsigned)((extent->e_bits &
|
||||||
EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
|
EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
|
||||||
@ -75,7 +18,7 @@ extent_arena_get(const extent_t *extent) {
|
|||||||
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
|
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE szind_t
|
static inline szind_t
|
||||||
extent_szind_get_maybe_invalid(const extent_t *extent) {
|
extent_szind_get_maybe_invalid(const extent_t *extent) {
|
||||||
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
|
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
|
||||||
EXTENT_BITS_SZIND_SHIFT);
|
EXTENT_BITS_SZIND_SHIFT);
|
||||||
@ -83,120 +26,120 @@ extent_szind_get_maybe_invalid(const extent_t *extent) {
|
|||||||
return szind;
|
return szind;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE szind_t
|
static inline szind_t
|
||||||
extent_szind_get(const extent_t *extent) {
|
extent_szind_get(const extent_t *extent) {
|
||||||
szind_t szind = extent_szind_get_maybe_invalid(extent);
|
szind_t szind = extent_szind_get_maybe_invalid(extent);
|
||||||
assert(szind < NSIZES); /* Never call when "invalid". */
|
assert(szind < NSIZES); /* Never call when "invalid". */
|
||||||
return szind;
|
return szind;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
extent_usize_get(const extent_t *extent) {
|
extent_usize_get(const extent_t *extent) {
|
||||||
return index2size(extent_szind_get(extent));
|
return index2size(extent_szind_get(extent));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
extent_sn_get(const extent_t *extent) {
|
extent_sn_get(const extent_t *extent) {
|
||||||
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
|
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
|
||||||
EXTENT_BITS_SN_SHIFT);
|
EXTENT_BITS_SN_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE extent_state_t
|
static inline extent_state_t
|
||||||
extent_state_get(const extent_t *extent) {
|
extent_state_get(const extent_t *extent) {
|
||||||
return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
|
return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
|
||||||
EXTENT_BITS_STATE_SHIFT);
|
EXTENT_BITS_STATE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
extent_zeroed_get(const extent_t *extent) {
|
extent_zeroed_get(const extent_t *extent) {
|
||||||
return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
|
return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
|
||||||
EXTENT_BITS_ZEROED_SHIFT);
|
EXTENT_BITS_ZEROED_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
extent_committed_get(const extent_t *extent) {
|
extent_committed_get(const extent_t *extent) {
|
||||||
return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
|
return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
|
||||||
EXTENT_BITS_COMMITTED_SHIFT);
|
EXTENT_BITS_COMMITTED_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
extent_slab_get(const extent_t *extent) {
|
extent_slab_get(const extent_t *extent) {
|
||||||
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
|
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
|
||||||
EXTENT_BITS_SLAB_SHIFT);
|
EXTENT_BITS_SLAB_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE unsigned
|
static inline unsigned
|
||||||
extent_nfree_get(const extent_t *extent) {
|
extent_nfree_get(const extent_t *extent) {
|
||||||
assert(extent_slab_get(extent));
|
assert(extent_slab_get(extent));
|
||||||
return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
|
return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
|
||||||
EXTENT_BITS_NFREE_SHIFT);
|
EXTENT_BITS_NFREE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
static inline void *
|
||||||
extent_base_get(const extent_t *extent) {
|
extent_base_get(const extent_t *extent) {
|
||||||
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
||||||
!extent_slab_get(extent));
|
!extent_slab_get(extent));
|
||||||
return PAGE_ADDR2BASE(extent->e_addr);
|
return PAGE_ADDR2BASE(extent->e_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
static inline void *
|
||||||
extent_addr_get(const extent_t *extent) {
|
extent_addr_get(const extent_t *extent) {
|
||||||
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
||||||
!extent_slab_get(extent));
|
!extent_slab_get(extent));
|
||||||
return extent->e_addr;
|
return extent->e_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
extent_size_get(const extent_t *extent) {
|
extent_size_get(const extent_t *extent) {
|
||||||
return (extent->e_size_esn & EXTENT_SIZE_MASK);
|
return (extent->e_size_esn & EXTENT_SIZE_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
extent_esn_get(const extent_t *extent) {
|
extent_esn_get(const extent_t *extent) {
|
||||||
return (extent->e_size_esn & EXTENT_ESN_MASK);
|
return (extent->e_size_esn & EXTENT_ESN_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
extent_bsize_get(const extent_t *extent) {
|
extent_bsize_get(const extent_t *extent) {
|
||||||
return extent->e_bsize;
|
return extent->e_bsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
static inline void *
|
||||||
extent_before_get(const extent_t *extent) {
|
extent_before_get(const extent_t *extent) {
|
||||||
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
|
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
static inline void *
|
||||||
extent_last_get(const extent_t *extent) {
|
extent_last_get(const extent_t *extent) {
|
||||||
return (void *)((uintptr_t)extent_base_get(extent) +
|
return (void *)((uintptr_t)extent_base_get(extent) +
|
||||||
extent_size_get(extent) - PAGE);
|
extent_size_get(extent) - PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
static inline void *
|
||||||
extent_past_get(const extent_t *extent) {
|
extent_past_get(const extent_t *extent) {
|
||||||
return (void *)((uintptr_t)extent_base_get(extent) +
|
return (void *)((uintptr_t)extent_base_get(extent) +
|
||||||
extent_size_get(extent));
|
extent_size_get(extent));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE arena_slab_data_t *
|
static inline arena_slab_data_t *
|
||||||
extent_slab_data_get(extent_t *extent) {
|
extent_slab_data_get(extent_t *extent) {
|
||||||
assert(extent_slab_get(extent));
|
assert(extent_slab_get(extent));
|
||||||
return &extent->e_slab_data;
|
return &extent->e_slab_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE const arena_slab_data_t *
|
static inline const arena_slab_data_t *
|
||||||
extent_slab_data_get_const(const extent_t *extent) {
|
extent_slab_data_get_const(const extent_t *extent) {
|
||||||
assert(extent_slab_get(extent));
|
assert(extent_slab_get(extent));
|
||||||
return &extent->e_slab_data;
|
return &extent->e_slab_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE prof_tctx_t *
|
static inline prof_tctx_t *
|
||||||
extent_prof_tctx_get(const extent_t *extent) {
|
extent_prof_tctx_get(const extent_t *extent) {
|
||||||
return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
|
return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
|
||||||
ATOMIC_ACQUIRE);
|
ATOMIC_ACQUIRE);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_arena_set(extent_t *extent, arena_t *arena) {
|
extent_arena_set(extent_t *extent, arena_t *arena) {
|
||||||
unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
|
unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
|
||||||
MALLOCX_ARENA_BITS) - 1);
|
MALLOCX_ARENA_BITS) - 1);
|
||||||
@ -204,12 +147,12 @@ extent_arena_set(extent_t *extent, arena_t *arena) {
|
|||||||
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
|
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_addr_set(extent_t *extent, void *addr) {
|
extent_addr_set(extent_t *extent, void *addr) {
|
||||||
extent->e_addr = addr;
|
extent->e_addr = addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
||||||
assert(extent_base_get(extent) == extent_addr_get(extent));
|
assert(extent_base_get(extent) == extent_addr_get(extent));
|
||||||
|
|
||||||
@ -228,85 +171,85 @@ extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_size_set(extent_t *extent, size_t size) {
|
extent_size_set(extent_t *extent, size_t size) {
|
||||||
assert((size & ~EXTENT_SIZE_MASK) == 0);
|
assert((size & ~EXTENT_SIZE_MASK) == 0);
|
||||||
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
|
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_esn_set(extent_t *extent, size_t esn) {
|
extent_esn_set(extent_t *extent, size_t esn) {
|
||||||
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
|
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
|
||||||
EXTENT_ESN_MASK);
|
EXTENT_ESN_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_bsize_set(extent_t *extent, size_t bsize) {
|
extent_bsize_set(extent_t *extent, size_t bsize) {
|
||||||
extent->e_bsize = bsize;
|
extent->e_bsize = bsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_szind_set(extent_t *extent, szind_t szind) {
|
extent_szind_set(extent_t *extent, szind_t szind) {
|
||||||
assert(szind <= NSIZES); /* NSIZES means "invalid". */
|
assert(szind <= NSIZES); /* NSIZES means "invalid". */
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
|
||||||
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
|
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_nfree_set(extent_t *extent, unsigned nfree) {
|
extent_nfree_set(extent_t *extent, unsigned nfree) {
|
||||||
assert(extent_slab_get(extent));
|
assert(extent_slab_get(extent));
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
|
||||||
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
|
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_nfree_inc(extent_t *extent) {
|
extent_nfree_inc(extent_t *extent) {
|
||||||
assert(extent_slab_get(extent));
|
assert(extent_slab_get(extent));
|
||||||
extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_nfree_dec(extent_t *extent) {
|
extent_nfree_dec(extent_t *extent) {
|
||||||
assert(extent_slab_get(extent));
|
assert(extent_slab_get(extent));
|
||||||
extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_sn_set(extent_t *extent, size_t sn) {
|
extent_sn_set(extent_t *extent, size_t sn) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
|
||||||
((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
|
((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_state_set(extent_t *extent, extent_state_t state) {
|
extent_state_set(extent_t *extent, extent_state_t state) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
|
||||||
((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
|
((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_zeroed_set(extent_t *extent, bool zeroed) {
|
extent_zeroed_set(extent_t *extent, bool zeroed) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
|
||||||
((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
|
((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_committed_set(extent_t *extent, bool committed) {
|
extent_committed_set(extent_t *extent, bool committed) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
|
||||||
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
|
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_slab_set(extent_t *extent, bool slab) {
|
extent_slab_set(extent_t *extent, bool slab) {
|
||||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
|
||||||
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
|
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
||||||
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
|
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||||
bool committed) {
|
bool committed) {
|
||||||
@ -327,7 +270,7 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
|
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
|
||||||
extent_arena_set(extent, NULL);
|
extent_arena_set(extent, NULL);
|
||||||
extent_addr_set(extent, addr);
|
extent_addr_set(extent, addr);
|
||||||
@ -340,39 +283,39 @@ extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
|
|||||||
extent_committed_set(extent, true);
|
extent_committed_set(extent, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_list_init(extent_list_t *list) {
|
extent_list_init(extent_list_t *list) {
|
||||||
ql_new(list);
|
ql_new(list);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE extent_t *
|
static inline extent_t *
|
||||||
extent_list_first(const extent_list_t *list) {
|
extent_list_first(const extent_list_t *list) {
|
||||||
return ql_first(list);
|
return ql_first(list);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE extent_t *
|
static inline extent_t *
|
||||||
extent_list_last(const extent_list_t *list) {
|
extent_list_last(const extent_list_t *list) {
|
||||||
return ql_last(list, ql_link);
|
return ql_last(list, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_list_append(extent_list_t *list, extent_t *extent) {
|
extent_list_append(extent_list_t *list, extent_t *extent) {
|
||||||
ql_tail_insert(list, extent, ql_link);
|
ql_tail_insert(list, extent, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_list_replace(extent_list_t *list, extent_t *to_remove,
|
extent_list_replace(extent_list_t *list, extent_t *to_remove,
|
||||||
extent_t *to_insert) {
|
extent_t *to_insert) {
|
||||||
ql_after_insert(to_remove, to_insert, ql_link);
|
ql_after_insert(to_remove, to_insert, ql_link);
|
||||||
ql_remove(list, to_remove, ql_link);
|
ql_remove(list, to_remove, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
extent_list_remove(extent_list_t *list, extent_t *extent) {
|
extent_list_remove(extent_list_t *list, extent_t *extent) {
|
||||||
ql_remove(list, extent, ql_link);
|
ql_remove(list, extent, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE int
|
static inline int
|
||||||
extent_sn_comp(const extent_t *a, const extent_t *b) {
|
extent_sn_comp(const extent_t *a, const extent_t *b) {
|
||||||
size_t a_sn = extent_sn_get(a);
|
size_t a_sn = extent_sn_get(a);
|
||||||
size_t b_sn = extent_sn_get(b);
|
size_t b_sn = extent_sn_get(b);
|
||||||
@ -380,7 +323,7 @@ extent_sn_comp(const extent_t *a, const extent_t *b) {
|
|||||||
return (a_sn > b_sn) - (a_sn < b_sn);
|
return (a_sn > b_sn) - (a_sn < b_sn);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE int
|
static inline int
|
||||||
extent_esn_comp(const extent_t *a, const extent_t *b) {
|
extent_esn_comp(const extent_t *a, const extent_t *b) {
|
||||||
size_t a_esn = extent_esn_get(a);
|
size_t a_esn = extent_esn_get(a);
|
||||||
size_t b_esn = extent_esn_get(b);
|
size_t b_esn = extent_esn_get(b);
|
||||||
@ -388,7 +331,7 @@ extent_esn_comp(const extent_t *a, const extent_t *b) {
|
|||||||
return (a_esn > b_esn) - (a_esn < b_esn);
|
return (a_esn > b_esn) - (a_esn < b_esn);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE int
|
static inline int
|
||||||
extent_ad_comp(const extent_t *a, const extent_t *b) {
|
extent_ad_comp(const extent_t *a, const extent_t *b) {
|
||||||
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
||||||
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
||||||
@ -396,7 +339,7 @@ extent_ad_comp(const extent_t *a, const extent_t *b) {
|
|||||||
return (a_addr > b_addr) - (a_addr < b_addr);
|
return (a_addr > b_addr) - (a_addr < b_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE int
|
static inline int
|
||||||
extent_ead_comp(const extent_t *a, const extent_t *b) {
|
extent_ead_comp(const extent_t *a, const extent_t *b) {
|
||||||
uintptr_t a_eaddr = (uintptr_t)a;
|
uintptr_t a_eaddr = (uintptr_t)a;
|
||||||
uintptr_t b_eaddr = (uintptr_t)b;
|
uintptr_t b_eaddr = (uintptr_t)b;
|
||||||
@ -404,7 +347,7 @@ extent_ead_comp(const extent_t *a, const extent_t *b) {
|
|||||||
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
|
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE int
|
static inline int
|
||||||
extent_snad_comp(const extent_t *a, const extent_t *b) {
|
extent_snad_comp(const extent_t *a, const extent_t *b) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -417,7 +360,7 @@ extent_snad_comp(const extent_t *a, const extent_t *b) {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE int
|
static inline int
|
||||||
extent_esnead_comp(const extent_t *a, const extent_t *b) {
|
extent_esnead_comp(const extent_t *a, const extent_t *b) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -429,6 +372,5 @@ extent_esnead_comp(const extent_t *a, const extent_t *b) {
|
|||||||
ret = extent_ead_comp(a, b);
|
ret = extent_ead_comp(a, b);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
|
||||||
|
@ -9,30 +9,19 @@
|
|||||||
* details.
|
* details.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
|
|
||||||
void hash_x86_128(const void *key, const int len, uint32_t seed,
|
|
||||||
uint64_t r_out[2]);
|
|
||||||
void hash_x64_128(const void *key, const int len, const uint32_t seed,
|
|
||||||
uint64_t r_out[2]);
|
|
||||||
void hash(const void *key, size_t len, const uint32_t seed,
|
|
||||||
size_t r_hash[2]);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Internal implementation. */
|
/* Internal implementation. */
|
||||||
JEMALLOC_INLINE uint32_t
|
static inline uint32_t
|
||||||
hash_rotl_32(uint32_t x, int8_t r) {
|
hash_rotl_32(uint32_t x, int8_t r) {
|
||||||
return ((x << r) | (x >> (32 - r)));
|
return ((x << r) | (x >> (32 - r)));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
static inline uint64_t
|
||||||
hash_rotl_64(uint64_t x, int8_t r) {
|
hash_rotl_64(uint64_t x, int8_t r) {
|
||||||
return ((x << r) | (x >> (64 - r)));
|
return ((x << r) | (x >> (64 - r)));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
static inline uint32_t
|
||||||
hash_get_block_32(const uint32_t *p, int i) {
|
hash_get_block_32(const uint32_t *p, int i) {
|
||||||
/* Handle unaligned read. */
|
/* Handle unaligned read. */
|
||||||
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
|
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
|
||||||
@ -45,7 +34,7 @@ hash_get_block_32(const uint32_t *p, int i) {
|
|||||||
return p[i];
|
return p[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
static inline uint64_t
|
||||||
hash_get_block_64(const uint64_t *p, int i) {
|
hash_get_block_64(const uint64_t *p, int i) {
|
||||||
/* Handle unaligned read. */
|
/* Handle unaligned read. */
|
||||||
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
|
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
|
||||||
@ -58,7 +47,7 @@ hash_get_block_64(const uint64_t *p, int i) {
|
|||||||
return p[i];
|
return p[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
static inline uint32_t
|
||||||
hash_fmix_32(uint32_t h) {
|
hash_fmix_32(uint32_t h) {
|
||||||
h ^= h >> 16;
|
h ^= h >> 16;
|
||||||
h *= 0x85ebca6b;
|
h *= 0x85ebca6b;
|
||||||
@ -69,7 +58,7 @@ hash_fmix_32(uint32_t h) {
|
|||||||
return h;
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
static inline uint64_t
|
||||||
hash_fmix_64(uint64_t k) {
|
hash_fmix_64(uint64_t k) {
|
||||||
k ^= k >> 33;
|
k ^= k >> 33;
|
||||||
k *= KQU(0xff51afd7ed558ccd);
|
k *= KQU(0xff51afd7ed558ccd);
|
||||||
@ -80,7 +69,7 @@ hash_fmix_64(uint64_t k) {
|
|||||||
return k;
|
return k;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
static inline uint32_t
|
||||||
hash_x86_32(const void *key, int len, uint32_t seed) {
|
hash_x86_32(const void *key, int len, uint32_t seed) {
|
||||||
const uint8_t *data = (const uint8_t *) key;
|
const uint8_t *data = (const uint8_t *) key;
|
||||||
const int nblocks = len / 4;
|
const int nblocks = len / 4;
|
||||||
@ -130,7 +119,7 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
|
|||||||
return h1;
|
return h1;
|
||||||
}
|
}
|
||||||
|
|
||||||
UNUSED JEMALLOC_INLINE void
|
UNUSED static inline void
|
||||||
hash_x86_128(const void *key, const int len, uint32_t seed,
|
hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||||
uint64_t r_out[2]) {
|
uint64_t r_out[2]) {
|
||||||
const uint8_t * data = (const uint8_t *) key;
|
const uint8_t * data = (const uint8_t *) key;
|
||||||
@ -231,7 +220,7 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
|
|||||||
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
||||||
}
|
}
|
||||||
|
|
||||||
UNUSED JEMALLOC_INLINE void
|
UNUSED static inline void
|
||||||
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||||
uint64_t r_out[2]) {
|
uint64_t r_out[2]) {
|
||||||
const uint8_t *data = (const uint8_t *) key;
|
const uint8_t *data = (const uint8_t *) key;
|
||||||
@ -310,7 +299,7 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* API. */
|
/* API. */
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
|
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
|
||||||
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
|
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
|
||||||
|
|
||||||
@ -325,6 +314,5 @@ hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_HASH_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_HASH_INLINES_H */
|
||||||
|
@ -5,42 +5,6 @@
|
|||||||
#include "jemalloc/internal/bit_util.h"
|
#include "jemalloc/internal/bit_util.h"
|
||||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
pszind_t psz2ind(size_t psz);
|
|
||||||
size_t pind2sz_compute(pszind_t pind);
|
|
||||||
size_t pind2sz_lookup(pszind_t pind);
|
|
||||||
size_t pind2sz(pszind_t pind);
|
|
||||||
size_t psz2u(size_t psz);
|
|
||||||
szind_t size2index_compute(size_t size);
|
|
||||||
szind_t size2index_lookup(size_t size);
|
|
||||||
szind_t size2index(size_t size);
|
|
||||||
size_t index2size_compute(szind_t index);
|
|
||||||
size_t index2size_lookup(szind_t index);
|
|
||||||
size_t index2size(szind_t index);
|
|
||||||
size_t s2u_compute(size_t size);
|
|
||||||
size_t s2u_lookup(size_t size);
|
|
||||||
size_t s2u(size_t size);
|
|
||||||
size_t sa2u(size_t size, size_t alignment);
|
|
||||||
arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
|
|
||||||
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
|
|
||||||
arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena);
|
|
||||||
bool arena_is_auto(arena_t *arena);
|
|
||||||
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
|
|
||||||
bool refresh_if_missing);
|
|
||||||
arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
|
|
||||||
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
|
|
||||||
bool tcache_available(tsd_t *tsd);
|
|
||||||
tcache_bin_t *tcache_small_bin_get(tcache_t *tcache, szind_t binind);
|
|
||||||
tcache_bin_t *tcache_large_bin_get(tcache_t *tcache, szind_t binind);
|
|
||||||
tcache_t *tcache_get(tsd_t *tsd);
|
|
||||||
malloc_cpuid_t malloc_getcpu(void);
|
|
||||||
unsigned percpu_arena_choose(void);
|
|
||||||
unsigned percpu_arena_ind_limit(void);
|
|
||||||
void pre_reentrancy(tsd_t *tsd);
|
|
||||||
void post_reentrancy(tsd_t *tsd);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
|
||||||
JEMALLOC_ALWAYS_INLINE pszind_t
|
JEMALLOC_ALWAYS_INLINE pszind_t
|
||||||
psz2ind(size_t psz) {
|
psz2ind(size_t psz) {
|
||||||
if (unlikely(psz > LARGE_MAXCLASS)) {
|
if (unlikely(psz > LARGE_MAXCLASS)) {
|
||||||
@ -64,7 +28,7 @@ psz2ind(size_t psz) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
pind2sz_compute(pszind_t pind) {
|
pind2sz_compute(pszind_t pind) {
|
||||||
if (unlikely(pind == NPSIZES)) {
|
if (unlikely(pind == NPSIZES)) {
|
||||||
return LARGE_MAXCLASS + PAGE;
|
return LARGE_MAXCLASS + PAGE;
|
||||||
@ -86,20 +50,20 @@ pind2sz_compute(pszind_t pind) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
pind2sz_lookup(pszind_t pind) {
|
pind2sz_lookup(pszind_t pind) {
|
||||||
size_t ret = (size_t)pind2sz_tab[pind];
|
size_t ret = (size_t)pind2sz_tab[pind];
|
||||||
assert(ret == pind2sz_compute(pind));
|
assert(ret == pind2sz_compute(pind));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
pind2sz(pszind_t pind) {
|
pind2sz(pszind_t pind) {
|
||||||
assert(pind < NPSIZES+1);
|
assert(pind < NPSIZES+1);
|
||||||
return pind2sz_lookup(pind);
|
return pind2sz_lookup(pind);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
psz2u(size_t psz) {
|
psz2u(size_t psz) {
|
||||||
if (unlikely(psz > LARGE_MAXCLASS)) {
|
if (unlikely(psz > LARGE_MAXCLASS)) {
|
||||||
return LARGE_MAXCLASS + PAGE;
|
return LARGE_MAXCLASS + PAGE;
|
||||||
@ -115,7 +79,7 @@ psz2u(size_t psz) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE szind_t
|
static inline szind_t
|
||||||
size2index_compute(size_t size) {
|
size2index_compute(size_t size) {
|
||||||
if (unlikely(size > LARGE_MAXCLASS)) {
|
if (unlikely(size > LARGE_MAXCLASS)) {
|
||||||
return NSIZES;
|
return NSIZES;
|
||||||
@ -164,7 +128,7 @@ size2index(size_t size) {
|
|||||||
return size2index_compute(size);
|
return size2index_compute(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
index2size_compute(szind_t index) {
|
index2size_compute(szind_t index) {
|
||||||
#if (NTBINS > 0)
|
#if (NTBINS > 0)
|
||||||
if (index < NTBINS) {
|
if (index < NTBINS) {
|
||||||
@ -355,7 +319,7 @@ percpu_arena_ind_limit(void) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE arena_tdata_t *
|
static inline arena_tdata_t *
|
||||||
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
|
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
|
||||||
arena_tdata_t *tdata;
|
arena_tdata_t *tdata;
|
||||||
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
|
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
|
||||||
@ -380,7 +344,7 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
|
|||||||
return arena_tdata_get_hard(tsd, ind);
|
return arena_tdata_get_hard(tsd, ind);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE arena_t *
|
static inline arena_t *
|
||||||
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
|
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
|
||||||
arena_t *ret;
|
arena_t *ret;
|
||||||
|
|
||||||
@ -396,7 +360,7 @@ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE ticker_t *
|
static inline ticker_t *
|
||||||
decay_ticker_get(tsd_t *tsd, unsigned ind) {
|
decay_ticker_get(tsd_t *tsd, unsigned ind) {
|
||||||
arena_tdata_t *tdata;
|
arena_tdata_t *tdata;
|
||||||
|
|
||||||
@ -446,7 +410,7 @@ tcache_get(tsd_t *tsd) {
|
|||||||
return tsd_tcachep_get(tsd);
|
return tsd_tcachep_get(tsd);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
pre_reentrancy(tsd_t *tsd) {
|
pre_reentrancy(tsd_t *tsd) {
|
||||||
bool fast = tsd_fast(tsd);
|
bool fast = tsd_fast(tsd);
|
||||||
++*tsd_reentrancy_levelp_get(tsd);
|
++*tsd_reentrancy_levelp_get(tsd);
|
||||||
@ -457,7 +421,7 @@ pre_reentrancy(tsd_t *tsd) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
post_reentrancy(tsd_t *tsd) {
|
post_reentrancy(tsd_t *tsd) {
|
||||||
int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
|
int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
|
||||||
assert(*reentrancy_level > 0);
|
assert(*reentrancy_level > 0);
|
||||||
@ -466,6 +430,4 @@ post_reentrancy(tsd_t *tsd) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
|
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
|
||||||
|
@ -1,13 +1,8 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
|
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
|
||||||
#define JEMALLOC_INTERNAL_INLINES_B_H
|
#define JEMALLOC_INTERNAL_INLINES_B_H
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
extent_t *iealloc(tsdn_t *tsdn, const void *ptr);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
|
||||||
/* Choose an arena based on a per-thread value. */
|
/* Choose an arena based on a per-thread value. */
|
||||||
JEMALLOC_INLINE arena_t *
|
static inline arena_t *
|
||||||
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
|
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
|
||||||
arena_t *ret;
|
arena_t *ret;
|
||||||
|
|
||||||
@ -60,17 +55,17 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE arena_t *
|
static inline arena_t *
|
||||||
arena_choose(tsd_t *tsd, arena_t *arena) {
|
arena_choose(tsd_t *tsd, arena_t *arena) {
|
||||||
return arena_choose_impl(tsd, arena, false);
|
return arena_choose_impl(tsd, arena, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE arena_t *
|
static inline arena_t *
|
||||||
arena_ichoose(tsd_t *tsd, arena_t *arena) {
|
arena_ichoose(tsd_t *tsd, arena_t *arena) {
|
||||||
return arena_choose_impl(tsd, arena, true);
|
return arena_choose_impl(tsd, arena, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
arena_is_auto(arena_t *arena) {
|
arena_is_auto(arena_t *arena) {
|
||||||
assert(narenas_auto > 0);
|
assert(narenas_auto > 0);
|
||||||
return (arena_ind_get(arena) < narenas_auto);
|
return (arena_ind_get(arena) < narenas_auto);
|
||||||
@ -84,6 +79,5 @@ iealloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
|
return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, true);
|
(uintptr_t)ptr, true);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
|
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
|
||||||
|
@ -3,36 +3,6 @@
|
|||||||
|
|
||||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
arena_t *iaalloc(tsdn_t *tsdn, const void *ptr);
|
|
||||||
size_t isalloc(tsdn_t *tsdn, const void *ptr);
|
|
||||||
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
|
|
||||||
tcache_t *tcache, bool is_internal, arena_t *arena, bool slow_path);
|
|
||||||
void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
|
|
||||||
bool slow_path);
|
|
||||||
void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
|
||||||
tcache_t *tcache, bool is_internal, arena_t *arena);
|
|
||||||
void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
|
||||||
tcache_t *tcache, arena_t *arena);
|
|
||||||
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
|
|
||||||
size_t ivsalloc(tsdn_t *tsdn, const void *ptr);
|
|
||||||
void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|
||||||
alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path);
|
|
||||||
void idalloc(tsd_t *tsd, void *ptr);
|
|
||||||
void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
|
||||||
alloc_ctx_t *alloc_ctx, bool slow_path);
|
|
||||||
void *iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|
||||||
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
|
|
||||||
arena_t *arena);
|
|
||||||
void *iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|
||||||
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
|
|
||||||
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
|
|
||||||
size_t alignment, bool zero);
|
|
||||||
bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
|
|
||||||
size_t alignment, bool zero);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
|
||||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||||
iaalloc(tsdn_t *tsdn, const void *ptr) {
|
iaalloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
@ -214,6 +184,5 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
|
|
||||||
return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero);
|
return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
|
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
|
||||||
|
@ -1,37 +1,13 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_MACROS_H
|
#ifndef JEMALLOC_INTERNAL_MACROS_H
|
||||||
#define JEMALLOC_INTERNAL_MACROS_H
|
#define JEMALLOC_INTERNAL_MACROS_H
|
||||||
|
|
||||||
/*
|
|
||||||
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
|
|
||||||
* functions that are static inline functions if inlining is enabled, and
|
|
||||||
* single-definition library-private functions if inlining is disabled.
|
|
||||||
*
|
|
||||||
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
|
|
||||||
* which case the denoted functions are always static, regardless of whether
|
|
||||||
* inlining is enabled.
|
|
||||||
*/
|
|
||||||
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
|
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
|
||||||
/* Disable inlining to make debugging/profiling easier. */
|
# define JEMALLOC_ALWAYS_INLINE static inline
|
||||||
# define JEMALLOC_ALWAYS_INLINE
|
|
||||||
# define JEMALLOC_ALWAYS_INLINE_C static
|
|
||||||
# define JEMALLOC_INLINE
|
|
||||||
# define JEMALLOC_INLINE_C static
|
|
||||||
#else
|
#else
|
||||||
# define JEMALLOC_ENABLE_INLINE
|
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
|
||||||
# ifdef JEMALLOC_HAVE_ATTR
|
#endif
|
||||||
# define JEMALLOC_ALWAYS_INLINE \
|
#ifdef _MSC_VER
|
||||||
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
|
# define inline _inline
|
||||||
# define JEMALLOC_ALWAYS_INLINE_C \
|
|
||||||
static inline JEMALLOC_ATTR(always_inline)
|
|
||||||
# else
|
|
||||||
# define JEMALLOC_ALWAYS_INLINE static inline
|
|
||||||
# define JEMALLOC_ALWAYS_INLINE_C static inline
|
|
||||||
# endif
|
|
||||||
# define JEMALLOC_INLINE static inline
|
|
||||||
# define JEMALLOC_INLINE_C static inline
|
|
||||||
# ifdef _MSC_VER
|
|
||||||
# define inline _inline
|
|
||||||
# endif
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef JEMALLOC_CC_SILENCE
|
#ifdef JEMALLOC_CC_SILENCE
|
||||||
|
@ -5,31 +5,19 @@
|
|||||||
|
|
||||||
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
|
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
static inline void
|
||||||
void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
|
||||||
bool malloc_mutex_trylock(malloc_mutex_t *mutex);
|
|
||||||
void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
|
||||||
void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
|
||||||
void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
|
||||||
void malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
|
||||||
malloc_mutex_t *mutex);
|
|
||||||
void malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
|
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
|
||||||
MALLOC_MUTEX_LOCK(mutex);
|
MALLOC_MUTEX_LOCK(mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Trylock: return false if the lock is successfully acquired. */
|
/* Trylock: return false if the lock is successfully acquired. */
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
malloc_mutex_trylock(malloc_mutex_t *mutex) {
|
malloc_mutex_trylock(malloc_mutex_t *mutex) {
|
||||||
return MALLOC_MUTEX_TRYLOCK(mutex);
|
return MALLOC_MUTEX_TRYLOCK(mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Aggregate lock prof data. */
|
/* Aggregate lock prof data. */
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
||||||
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
|
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
|
||||||
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
|
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
|
||||||
@ -52,7 +40,7 @@ malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
|||||||
sum->n_lock_ops += data->n_lock_ops;
|
sum->n_lock_ops += data->n_lock_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||||
witness_assert_not_owner(tsdn, &mutex->witness);
|
witness_assert_not_owner(tsdn, &mutex->witness);
|
||||||
if (isthreaded) {
|
if (isthreaded) {
|
||||||
@ -72,7 +60,7 @@ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
|||||||
witness_lock(tsdn, &mutex->witness);
|
witness_lock(tsdn, &mutex->witness);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||||
witness_unlock(tsdn, &mutex->witness);
|
witness_unlock(tsdn, &mutex->witness);
|
||||||
if (isthreaded) {
|
if (isthreaded) {
|
||||||
@ -80,18 +68,18 @@ malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||||
witness_assert_owner(tsdn, &mutex->witness);
|
witness_assert_owner(tsdn, &mutex->witness);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||||
witness_assert_not_owner(tsdn, &mutex->witness);
|
witness_assert_not_owner(tsdn, &mutex->witness);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy the prof data from mutex for processing. */
|
/* Copy the prof data from mutex for processing. */
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||||
malloc_mutex_t *mutex) {
|
malloc_mutex_t *mutex) {
|
||||||
mutex_prof_data_t *source = &mutex->prof_data;
|
mutex_prof_data_t *source = &mutex->prof_data;
|
||||||
@ -108,6 +96,4 @@ malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
|||||||
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
|
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_MUTEX_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_MUTEX_INLINES_H */
|
||||||
|
@ -4,23 +4,6 @@
|
|||||||
#include "jemalloc/internal/atomic.h"
|
#include "jemalloc/internal/atomic.h"
|
||||||
#include "jemalloc/internal/bit_util.h"
|
#include "jemalloc/internal/bit_util.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
uint32_t prng_state_next_u32(uint32_t state);
|
|
||||||
uint64_t prng_state_next_u64(uint64_t state);
|
|
||||||
size_t prng_state_next_zu(size_t state);
|
|
||||||
|
|
||||||
uint32_t prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range,
|
|
||||||
bool atomic);
|
|
||||||
uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
|
|
||||||
size_t prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic);
|
|
||||||
|
|
||||||
uint32_t prng_range_u32(atomic_u32_t *state, uint32_t range,
|
|
||||||
bool atomic);
|
|
||||||
uint64_t prng_range_u64(uint64_t *state, uint64_t range);
|
|
||||||
size_t prng_range_zu(atomic_zu_t *state, size_t range, bool atomic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
|
|
||||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||||
prng_state_next_u32(uint32_t state) {
|
prng_state_next_u32(uint32_t state) {
|
||||||
return (state * PRNG_A_32) + PRNG_C_32;
|
return (state * PRNG_A_32) + PRNG_C_32;
|
||||||
@ -156,6 +139,5 @@ prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_PRNG_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_PRNG_INLINES_H */
|
||||||
|
@ -1,14 +1,7 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
||||||
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
static inline bool
|
||||||
bool prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
|
||||||
uint64_t accumbytes);
|
|
||||||
void prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
|
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
|
|
||||||
@ -46,7 +39,7 @@ prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
|
|||||||
return overflow;
|
return overflow;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
|
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
|
|
||||||
@ -73,6 +66,5 @@ prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
|
|||||||
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
|
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
|
||||||
|
@ -1,29 +1,6 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
||||||
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
bool prof_active_get_unlocked(void);
|
|
||||||
bool prof_gdump_get_unlocked(void);
|
|
||||||
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
|
|
||||||
prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr,
|
|
||||||
alloc_ctx_t *alloc_ctx);
|
|
||||||
void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
|
||||||
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx);
|
|
||||||
void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
|
|
||||||
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
|
||||||
prof_tdata_t **tdata_out);
|
|
||||||
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
|
|
||||||
bool update);
|
|
||||||
void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
|
|
||||||
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx);
|
|
||||||
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
|
|
||||||
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
|
|
||||||
size_t old_usize, prof_tctx_t *old_tctx);
|
|
||||||
void prof_free(tsd_t *tsd, const void *ptr, size_t usize,
|
|
||||||
alloc_ctx_t *alloc_ctx);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
prof_active_get_unlocked(void) {
|
prof_active_get_unlocked(void) {
|
||||||
/*
|
/*
|
||||||
@ -231,6 +208,5 @@ prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
|
|||||||
prof_free_sampled_object(tsd, usize, tctx);
|
prof_free_sampled_object(tsd, usize, tctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
|
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
|
||||||
|
@ -3,59 +3,6 @@
|
|||||||
|
|
||||||
#include "jemalloc/internal/spin.h"
|
#include "jemalloc/internal/spin.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
uintptr_t rtree_leafkey(uintptr_t key);
|
|
||||||
uintptr_t rtree_subkey(uintptr_t key, unsigned level);
|
|
||||||
# ifdef RTREE_LEAF_COMPACT
|
|
||||||
uintptr_t rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_leaf_elm_t *elm, bool acquired, bool dependent);
|
|
||||||
extent_t *rtree_leaf_elm_bits_extent_get(uintptr_t bits);
|
|
||||||
szind_t rtree_leaf_elm_bits_szind_get(uintptr_t bits);
|
|
||||||
bool rtree_leaf_elm_bits_slab_get(uintptr_t bits);
|
|
||||||
bool rtree_leaf_elm_bits_locked_get(uintptr_t bits);
|
|
||||||
# endif
|
|
||||||
extent_t *rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_leaf_elm_t *elm, bool acquired, bool dependent);
|
|
||||||
szind_t rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_leaf_elm_t *elm, bool acquired, bool dependent);
|
|
||||||
bool rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_leaf_elm_t *elm, bool acquired, bool dependent);
|
|
||||||
void rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_leaf_elm_t *elm, bool acquired, extent_t *extent);
|
|
||||||
void rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_leaf_elm_t *elm, bool acquired, szind_t szind);
|
|
||||||
void rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_leaf_elm_t *elm, bool acquired, bool slab);
|
|
||||||
void rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
|
||||||
bool acquired, extent_t *extent, szind_t szind, bool slab);
|
|
||||||
void rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_leaf_elm_t *elm, szind_t szind, bool slab);
|
|
||||||
rtree_leaf_elm_t *rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
|
|
||||||
bool rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
||||||
uintptr_t key, extent_t *extent, szind_t szind, bool slab);
|
|
||||||
rtree_leaf_elm_t *rtree_read(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent);
|
|
||||||
extent_t *rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent);
|
|
||||||
szind_t rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent);
|
|
||||||
bool rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, extent_t **r_extent,
|
|
||||||
szind_t *r_szind);
|
|
||||||
bool rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
||||||
uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab);
|
|
||||||
rtree_leaf_elm_t *rtree_leaf_elm_acquire(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
|
|
||||||
void rtree_leaf_elm_release(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_leaf_elm_t *elm);
|
|
||||||
void rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
|
|
||||||
rtree_ctx_t *rtree_ctx, uintptr_t key, szind_t szind, bool slab);
|
|
||||||
void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
||||||
uintptr_t key);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
|
||||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||||
rtree_leafkey(uintptr_t key) {
|
rtree_leafkey(uintptr_t key) {
|
||||||
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||||
@ -194,7 +141,7 @@ rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
bool acquired, extent_t *extent) {
|
bool acquired, extent_t *extent) {
|
||||||
if (config_debug && acquired) {
|
if (config_debug && acquired) {
|
||||||
@ -219,7 +166,7 @@ rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
bool acquired, szind_t szind) {
|
bool acquired, szind_t szind) {
|
||||||
if (config_debug && acquired) {
|
if (config_debug && acquired) {
|
||||||
@ -241,7 +188,7 @@ rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
bool acquired, bool slab) {
|
bool acquired, bool slab) {
|
||||||
if (config_debug && acquired) {
|
if (config_debug && acquired) {
|
||||||
@ -261,7 +208,7 @@ rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
bool acquired, extent_t *extent, szind_t szind, bool slab) {
|
bool acquired, extent_t *extent, szind_t szind, bool slab) {
|
||||||
if (config_debug && acquired) {
|
if (config_debug && acquired) {
|
||||||
@ -287,7 +234,7 @@ rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
|
rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
|
||||||
rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
|
rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
|
||||||
assert(!slab || szind < NBINS);
|
assert(!slab || szind < NBINS);
|
||||||
@ -384,7 +331,7 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|||||||
dependent, init_missing);
|
dependent, init_missing);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||||
extent_t *extent, szind_t szind, bool slab) {
|
extent_t *extent, szind_t szind, bool slab) {
|
||||||
/* Use rtree_clear() to set the extent to NULL. */
|
/* Use rtree_clear() to set the extent to NULL. */
|
||||||
@ -471,7 +418,7 @@ rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE rtree_leaf_elm_t *
|
static inline rtree_leaf_elm_t *
|
||||||
rtree_leaf_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
rtree_leaf_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
uintptr_t key, bool dependent, bool init_missing) {
|
uintptr_t key, bool dependent, bool init_missing) {
|
||||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||||
@ -511,7 +458,7 @@ rtree_leaf_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|||||||
return elm;
|
return elm;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
rtree_leaf_elm_release(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm) {
|
rtree_leaf_elm_release(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm) {
|
||||||
extent_t *extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, true,
|
extent_t *extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, true,
|
||||||
true);
|
true);
|
||||||
@ -521,7 +468,7 @@ rtree_leaf_elm_release(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
uintptr_t key, szind_t szind, bool slab) {
|
uintptr_t key, szind_t szind, bool slab) {
|
||||||
assert(!slab || szind < NBINS);
|
assert(!slab || szind < NBINS);
|
||||||
@ -530,7 +477,7 @@ rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|||||||
rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
|
rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
uintptr_t key) {
|
uintptr_t key) {
|
||||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||||
@ -538,6 +485,5 @@ rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|||||||
NULL);
|
NULL);
|
||||||
rtree_leaf_elm_write(tsdn, rtree, elm, false, NULL, NSIZES, false);
|
rtree_leaf_elm_write(tsdn, rtree, elm, false, NULL, NSIZES, false);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_RTREE_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_RTREE_INLINES_H */
|
||||||
|
@ -4,30 +4,12 @@
|
|||||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
#include "jemalloc/internal/util.h"
|
#include "jemalloc/internal/util.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
static inline bool
|
||||||
void tcache_event(tsd_t *tsd, tcache_t *tcache);
|
|
||||||
bool tcache_enabled_get(tsd_t *tsd);
|
|
||||||
tcache_t *tcache_get(tsd_t *tsd);
|
|
||||||
void tcache_enabled_set(tsd_t *tsd, bool enabled);
|
|
||||||
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
|
|
||||||
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
|
||||||
size_t size, szind_t ind, bool zero, bool slow_path);
|
|
||||||
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
|
||||||
size_t size, szind_t ind, bool zero, bool slow_path);
|
|
||||||
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
|
||||||
szind_t binind, bool slow_path);
|
|
||||||
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
|
||||||
szind_t binind, bool slow_path);
|
|
||||||
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
tcache_enabled_get(tsd_t *tsd) {
|
tcache_enabled_get(tsd_t *tsd) {
|
||||||
return tsd_tcache_enabled_get(tsd);
|
return tsd_tcache_enabled_get(tsd);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
tcache_enabled_set(tsd_t *tsd, bool enabled) {
|
tcache_enabled_set(tsd_t *tsd, bool enabled) {
|
||||||
bool was_enabled = tsd_tcache_enabled_get(tsd);
|
bool was_enabled = tsd_tcache_enabled_get(tsd);
|
||||||
|
|
||||||
@ -261,6 +243,5 @@ tcaches_get(tsd_t *tsd, unsigned ind) {
|
|||||||
}
|
}
|
||||||
return elm->tcache;
|
return elm->tcache;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
|
||||||
|
@ -1,32 +1,23 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_TICKER_INLINES_H
|
#ifndef JEMALLOC_INTERNAL_TICKER_INLINES_H
|
||||||
#define JEMALLOC_INTERNAL_TICKER_INLINES_H
|
#define JEMALLOC_INTERNAL_TICKER_INLINES_H
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
static inline void
|
||||||
void ticker_init(ticker_t *ticker, int32_t nticks);
|
|
||||||
void ticker_copy(ticker_t *ticker, const ticker_t *other);
|
|
||||||
int32_t ticker_read(const ticker_t *ticker);
|
|
||||||
bool ticker_ticks(ticker_t *ticker, int32_t nticks);
|
|
||||||
bool ticker_tick(ticker_t *ticker);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
ticker_init(ticker_t *ticker, int32_t nticks) {
|
ticker_init(ticker_t *ticker, int32_t nticks) {
|
||||||
ticker->tick = nticks;
|
ticker->tick = nticks;
|
||||||
ticker->nticks = nticks;
|
ticker->nticks = nticks;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
ticker_copy(ticker_t *ticker, const ticker_t *other) {
|
ticker_copy(ticker_t *ticker, const ticker_t *other) {
|
||||||
*ticker = *other;
|
*ticker = *other;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE int32_t
|
static inline int32_t
|
||||||
ticker_read(const ticker_t *ticker) {
|
ticker_read(const ticker_t *ticker) {
|
||||||
return ticker->tick;
|
return ticker->tick;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
ticker_ticks(ticker_t *ticker, int32_t nticks) {
|
ticker_ticks(ticker_t *ticker, int32_t nticks) {
|
||||||
if (unlikely(ticker->tick < nticks)) {
|
if (unlikely(ticker->tick < nticks)) {
|
||||||
ticker->tick = ticker->nticks;
|
ticker->tick = ticker->nticks;
|
||||||
@ -36,10 +27,9 @@ ticker_ticks(ticker_t *ticker, int32_t nticks) {
|
|||||||
return(false);
|
return(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
ticker_tick(ticker_t *ticker) {
|
ticker_tick(ticker_t *ticker) {
|
||||||
return ticker_ticks(ticker, 1);
|
return ticker_ticks(ticker, 1);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_TICKER_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_TICKER_INLINES_H */
|
||||||
|
@ -1,29 +1,6 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_TSD_INLINES_H
|
#ifndef JEMALLOC_INTERNAL_TSD_INLINES_H
|
||||||
#define JEMALLOC_INTERNAL_TSD_INLINES_H
|
#define JEMALLOC_INTERNAL_TSD_INLINES_H
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
|
|
||||||
|
|
||||||
tsd_t *tsd_fetch_impl(bool init);
|
|
||||||
tsd_t *tsd_fetch(void);
|
|
||||||
tsdn_t *tsd_tsdn(tsd_t *tsd);
|
|
||||||
bool tsd_nominal(tsd_t *tsd);
|
|
||||||
#define O(n, t, gs, i, c) \
|
|
||||||
t *tsd_##n##p_get(tsd_t *tsd); \
|
|
||||||
t tsd_##n##_get(tsd_t *tsd); \
|
|
||||||
void tsd_##n##_set(tsd_t *tsd, t n);
|
|
||||||
MALLOC_TSD
|
|
||||||
#undef O
|
|
||||||
tsdn_t *tsdn_fetch(void);
|
|
||||||
bool tsdn_null(const tsdn_t *tsdn);
|
|
||||||
tsd_t *tsdn_tsd(tsdn_t *tsdn);
|
|
||||||
rtree_ctx_t *tsd_rtree_ctx(tsd_t *tsd);
|
|
||||||
rtree_ctx_t *tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback);
|
|
||||||
bool tsd_fast(tsd_t *tsd);
|
|
||||||
bool tsd_assert_fast(tsd_t *tsd);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
|
|
||||||
malloc_tsd_externs(, tsd_t)
|
malloc_tsd_externs(, tsd_t)
|
||||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
|
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
|
||||||
|
|
||||||
@ -97,7 +74,7 @@ tsd_tsdn(tsd_t *tsd) {
|
|||||||
return (tsdn_t *)tsd;
|
return (tsdn_t *)tsd;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
tsd_nominal(tsd_t *tsd) {
|
tsd_nominal(tsd_t *tsd) {
|
||||||
return (tsd->state <= tsd_state_nominal_max);
|
return (tsd->state <= tsd_state_nominal_max);
|
||||||
}
|
}
|
||||||
@ -140,6 +117,5 @@ tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
|
|||||||
}
|
}
|
||||||
return tsd_rtree_ctx(tsdn_tsd(tsdn));
|
return tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_TSD_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_TSD_INLINES_H */
|
||||||
|
@ -3,21 +3,8 @@
|
|||||||
|
|
||||||
#include "jemalloc/internal/ql.h"
|
#include "jemalloc/internal/ql.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
bool witness_owner(tsd_t *tsd, const witness_t *witness);
|
|
||||||
void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
|
|
||||||
void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
|
|
||||||
void witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
|
|
||||||
unsigned depth);
|
|
||||||
void witness_assert_depth(tsdn_t *tsdn, unsigned depth);
|
|
||||||
void witness_assert_lockless(tsdn_t *tsdn);
|
|
||||||
void witness_lock(tsdn_t *tsdn, witness_t *witness);
|
|
||||||
void witness_unlock(tsdn_t *tsdn, witness_t *witness);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
|
||||||
/* Helper, not intended for direct use. */
|
/* Helper, not intended for direct use. */
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
witness_owner(tsd_t *tsd, const witness_t *witness) {
|
witness_owner(tsd_t *tsd, const witness_t *witness) {
|
||||||
witness_list_t *witnesses;
|
witness_list_t *witnesses;
|
||||||
witness_t *w;
|
witness_t *w;
|
||||||
@ -34,7 +21,7 @@ witness_owner(tsd_t *tsd, const witness_t *witness) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) {
|
witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) {
|
||||||
tsd_t *tsd;
|
tsd_t *tsd;
|
||||||
|
|
||||||
@ -56,7 +43,7 @@ witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) {
|
|||||||
witness_owner_error(witness);
|
witness_owner_error(witness);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) {
|
witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) {
|
||||||
tsd_t *tsd;
|
tsd_t *tsd;
|
||||||
witness_list_t *witnesses;
|
witness_list_t *witnesses;
|
||||||
@ -82,7 +69,7 @@ witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
|
witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
|
||||||
unsigned depth) {
|
unsigned depth) {
|
||||||
tsd_t *tsd;
|
tsd_t *tsd;
|
||||||
@ -115,17 +102,17 @@ witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
witness_assert_depth(tsdn_t *tsdn, unsigned depth) {
|
witness_assert_depth(tsdn_t *tsdn, unsigned depth) {
|
||||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth);
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
witness_assert_lockless(tsdn_t *tsdn) {
|
witness_assert_lockless(tsdn_t *tsdn) {
|
||||||
witness_assert_depth(tsdn, 0);
|
witness_assert_depth(tsdn, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
witness_lock(tsdn_t *tsdn, witness_t *witness) {
|
witness_lock(tsdn_t *tsdn, witness_t *witness) {
|
||||||
tsd_t *tsd;
|
tsd_t *tsd;
|
||||||
witness_list_t *witnesses;
|
witness_list_t *witnesses;
|
||||||
@ -168,7 +155,7 @@ witness_lock(tsdn_t *tsdn, witness_t *witness) {
|
|||||||
ql_tail_insert(witnesses, witness, link);
|
ql_tail_insert(witnesses, witness, link);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
witness_unlock(tsdn_t *tsdn, witness_t *witness) {
|
witness_unlock(tsdn_t *tsdn, witness_t *witness) {
|
||||||
tsd_t *tsd;
|
tsd_t *tsd;
|
||||||
witness_list_t *witnesses;
|
witness_list_t *witnesses;
|
||||||
@ -197,6 +184,5 @@ witness_unlock(tsdn_t *tsdn, witness_t *witness) {
|
|||||||
witness_assert_owner(tsdn, witness);
|
witness_assert_owner(tsdn, witness);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_WITNESS_INLINES_H */
|
#endif /* JEMALLOC_INTERNAL_WITNESS_INLINES_H */
|
||||||
|
@ -359,7 +359,7 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C void *
|
static void *
|
||||||
arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
|
arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
|
||||||
const arena_bin_info_t *bin_info) {
|
const arena_bin_info_t *bin_info) {
|
||||||
void *ret;
|
void *ret;
|
||||||
@ -377,7 +377,7 @@ arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifndef JEMALLOC_JET
|
#ifndef JEMALLOC_JET
|
||||||
JEMALLOC_INLINE_C
|
static
|
||||||
#endif
|
#endif
|
||||||
size_t
|
size_t
|
||||||
arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
|
arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
|
||||||
@ -414,7 +414,7 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
|
|||||||
return regind;
|
return regind;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C void
|
static void
|
||||||
arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
|
arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
|
||||||
arena_slab_data_t *slab_data, void *ptr) {
|
arena_slab_data_t *slab_data, void *ptr) {
|
||||||
szind_t binind = extent_szind_get(slab);
|
szind_t binind = extent_szind_get(slab);
|
||||||
|
12
src/ckh.c
12
src/ckh.c
@ -54,7 +54,7 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
|
|||||||
* Search bucket for key and return the cell number if found; SIZE_T_MAX
|
* Search bucket for key and return the cell number if found; SIZE_T_MAX
|
||||||
* otherwise.
|
* otherwise.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE_C size_t
|
static size_t
|
||||||
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
|
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
|
||||||
ckhc_t *cell;
|
ckhc_t *cell;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
@ -72,7 +72,7 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
|
|||||||
/*
|
/*
|
||||||
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
|
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE_C size_t
|
static size_t
|
||||||
ckh_isearch(ckh_t *ckh, const void *key) {
|
ckh_isearch(ckh_t *ckh, const void *key) {
|
||||||
size_t hashes[2], bucket, cell;
|
size_t hashes[2], bucket, cell;
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ ckh_isearch(ckh_t *ckh, const void *key) {
|
|||||||
return cell;
|
return cell;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C bool
|
static bool
|
||||||
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||||
const void *data) {
|
const void *data) {
|
||||||
ckhc_t *cell;
|
ckhc_t *cell;
|
||||||
@ -125,7 +125,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
|||||||
* eviction/relocation procedure until either success or detection of an
|
* eviction/relocation procedure until either success or detection of an
|
||||||
* eviction/relocation bucket cycle.
|
* eviction/relocation bucket cycle.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE_C bool
|
static bool
|
||||||
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||||
void const **argdata) {
|
void const **argdata) {
|
||||||
const void *key, *data, *tkey, *tdata;
|
const void *key, *data, *tkey, *tdata;
|
||||||
@ -196,7 +196,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C bool
|
static bool
|
||||||
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
|
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
|
||||||
size_t hashes[2], bucket;
|
size_t hashes[2], bucket;
|
||||||
const void *key = *argkey;
|
const void *key = *argkey;
|
||||||
@ -226,7 +226,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
|
|||||||
* Try to rebuild the hash table from scratch by inserting all items from the
|
* Try to rebuild the hash table from scratch by inserting all items from the
|
||||||
* old table into the new.
|
* old table into the new.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE_C bool
|
static bool
|
||||||
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
|
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
|
||||||
size_t count, i, nins;
|
size_t count, i, nins;
|
||||||
const void *key, *data;
|
const void *key, *data;
|
||||||
|
@ -21,19 +21,19 @@ static ctl_arenas_t *ctl_arenas;
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Helpers for named and indexed nodes. */
|
/* Helpers for named and indexed nodes. */
|
||||||
|
|
||||||
JEMALLOC_INLINE_C const ctl_named_node_t *
|
static const ctl_named_node_t *
|
||||||
ctl_named_node(const ctl_node_t *node) {
|
ctl_named_node(const ctl_node_t *node) {
|
||||||
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
|
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C const ctl_named_node_t *
|
static const ctl_named_node_t *
|
||||||
ctl_named_children(const ctl_named_node_t *node, size_t index) {
|
ctl_named_children(const ctl_named_node_t *node, size_t index) {
|
||||||
const ctl_named_node_t *children = ctl_named_node(node->children);
|
const ctl_named_node_t *children = ctl_named_node(node->children);
|
||||||
|
|
||||||
return (children ? &children[index] : NULL);
|
return (children ? &children[index] : NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C const ctl_indexed_node_t *
|
static const ctl_indexed_node_t *
|
||||||
ctl_indexed_node(const ctl_node_t *node) {
|
ctl_indexed_node(const ctl_node_t *node) {
|
||||||
return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
|
return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
|
||||||
}
|
}
|
||||||
|
@ -288,7 +288,7 @@ malloc_initialized(void) {
|
|||||||
return (malloc_init_state == malloc_init_initialized);
|
return (malloc_init_state == malloc_init_initialized);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
malloc_init_a0(void) {
|
malloc_init_a0(void) {
|
||||||
if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
|
if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
|
||||||
return malloc_init_hard_a0();
|
return malloc_init_hard_a0();
|
||||||
@ -296,7 +296,7 @@ malloc_init_a0(void) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
malloc_init(void) {
|
malloc_init(void) {
|
||||||
if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
|
if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
|
||||||
return true;
|
return true;
|
||||||
@ -1490,7 +1490,7 @@ struct static_opts_s {
|
|||||||
bool slow;
|
bool slow;
|
||||||
};
|
};
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
static_opts_init(static_opts_t *static_opts) {
|
static_opts_init(static_opts_t *static_opts) {
|
||||||
static_opts->may_overflow = false;
|
static_opts->may_overflow = false;
|
||||||
static_opts->bump_empty_alloc = false;
|
static_opts->bump_empty_alloc = false;
|
||||||
@ -1523,7 +1523,7 @@ struct dynamic_opts_s {
|
|||||||
unsigned arena_ind;
|
unsigned arena_ind;
|
||||||
};
|
};
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
|
dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
|
||||||
dynamic_opts->result = NULL;
|
dynamic_opts->result = NULL;
|
||||||
dynamic_opts->num_items = 0;
|
dynamic_opts->num_items = 0;
|
||||||
@ -1535,7 +1535,7 @@ dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* ind is ignored if dopts->alignment > 0. */
|
/* ind is ignored if dopts->alignment > 0. */
|
||||||
JEMALLOC_ALWAYS_INLINE_C void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
||||||
size_t size, size_t usize, szind_t ind) {
|
size_t size, size_t usize, szind_t ind) {
|
||||||
tcache_t *tcache;
|
tcache_t *tcache;
|
||||||
@ -1577,7 +1577,7 @@ imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
|||||||
arena, sopts->slow);
|
arena, sopts->slow);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
||||||
size_t usize, szind_t ind) {
|
size_t usize, szind_t ind) {
|
||||||
void *ret;
|
void *ret;
|
||||||
@ -1611,7 +1611,7 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
|||||||
* Returns true if the allocation will overflow, and false otherwise. Sets
|
* Returns true if the allocation will overflow, and false otherwise. Sets
|
||||||
* *size to the product either way.
|
* *size to the product either way.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_ALWAYS_INLINE_C bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
|
compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
|
||||||
size_t *size) {
|
size_t *size) {
|
||||||
/*
|
/*
|
||||||
@ -1649,7 +1649,7 @@ compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C int
|
JEMALLOC_ALWAYS_INLINE int
|
||||||
imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
|
imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
|
||||||
/* Where the actual allocated memory will live. */
|
/* Where the actual allocated memory will live. */
|
||||||
void *allocation = NULL;
|
void *allocation = NULL;
|
||||||
@ -1850,7 +1850,7 @@ label_invalid_alignment:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Returns the errno-style error code of the allocation. */
|
/* Returns the errno-style error code of the allocation. */
|
||||||
JEMALLOC_ALWAYS_INLINE_C int
|
JEMALLOC_ALWAYS_INLINE int
|
||||||
imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
|
imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
|
||||||
if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
|
if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
|
||||||
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
||||||
@ -2011,7 +2011,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
|
|||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
|
irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
|
||||||
alloc_ctx_t *alloc_ctx) {
|
alloc_ctx_t *alloc_ctx) {
|
||||||
void *p;
|
void *p;
|
||||||
@ -2036,7 +2036,7 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
|
|||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
||||||
assert(slow_path || tsd_assert_fast(tsd));
|
assert(slow_path || tsd_assert_fast(tsd));
|
||||||
if (tsd_reentrancy_level_get(tsd) == 0) {
|
if (tsd_reentrancy_level_get(tsd) == 0) {
|
||||||
@ -2074,7 +2074,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
||||||
assert(slow_path || tsd_assert_fast(tsd));
|
assert(slow_path || tsd_assert_fast(tsd));
|
||||||
if (tsd_reentrancy_level_get(tsd) == 0) {
|
if (tsd_reentrancy_level_get(tsd) == 0) {
|
||||||
@ -2403,7 +2403,7 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
|
|||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
||||||
size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
|
size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
|
||||||
arena_t *arena, alloc_ctx_t *alloc_ctx) {
|
arena_t *arena, alloc_ctx_t *alloc_ctx) {
|
||||||
@ -2528,7 +2528,7 @@ label_oom:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
|
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
|
||||||
size_t extra, size_t alignment, bool zero) {
|
size_t extra, size_t alignment, bool zero) {
|
||||||
size_t usize;
|
size_t usize;
|
||||||
@ -2555,7 +2555,7 @@ ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
|
|||||||
return usize;
|
return usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
||||||
size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
|
size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
|
||||||
size_t usize_max, usize;
|
size_t usize_max, usize;
|
||||||
@ -2727,7 +2727,7 @@ je_dallocx(void *ptr, int flags) {
|
|||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
inallocx(tsdn_t *tsdn, size_t size, int flags) {
|
inallocx(tsdn_t *tsdn, size_t size, int flags) {
|
||||||
witness_assert_lockless(tsdn);
|
witness_assert_lockless(tsdn);
|
||||||
|
|
||||||
|
@ -40,7 +40,6 @@ void operator delete[](void *ptr, std::size_t size) noexcept;
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
template <bool IsNoExcept>
|
template <bool IsNoExcept>
|
||||||
JEMALLOC_INLINE
|
|
||||||
void *
|
void *
|
||||||
newImpl(std::size_t size) noexcept(IsNoExcept) {
|
newImpl(std::size_t size) noexcept(IsNoExcept) {
|
||||||
void *ptr = je_malloc(size);
|
void *ptr = je_malloc(size);
|
||||||
|
12
src/prof.c
12
src/prof.c
@ -145,7 +145,7 @@ static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Red-black trees. */
|
/* Red-black trees. */
|
||||||
|
|
||||||
JEMALLOC_INLINE_C int
|
static int
|
||||||
prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
|
prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
|
||||||
uint64_t a_thr_uid = a->thr_uid;
|
uint64_t a_thr_uid = a->thr_uid;
|
||||||
uint64_t b_thr_uid = b->thr_uid;
|
uint64_t b_thr_uid = b->thr_uid;
|
||||||
@ -168,7 +168,7 @@ prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
|
|||||||
rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
|
rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
|
||||||
tctx_link, prof_tctx_comp)
|
tctx_link, prof_tctx_comp)
|
||||||
|
|
||||||
JEMALLOC_INLINE_C int
|
static int
|
||||||
prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
|
prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
|
||||||
unsigned a_len = a->bt.len;
|
unsigned a_len = a->bt.len;
|
||||||
unsigned b_len = b->bt.len;
|
unsigned b_len = b->bt.len;
|
||||||
@ -183,7 +183,7 @@ prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
|
|||||||
rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
|
rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
|
||||||
prof_gctx_comp)
|
prof_gctx_comp)
|
||||||
|
|
||||||
JEMALLOC_INLINE_C int
|
static int
|
||||||
prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
|
prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
|
||||||
int ret;
|
int ret;
|
||||||
uint64_t a_uid = a->thr_uid;
|
uint64_t a_uid = a->thr_uid;
|
||||||
@ -273,7 +273,7 @@ bt_init(prof_bt_t *bt, void **vec) {
|
|||||||
bt->len = 0;
|
bt->len = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C void
|
static void
|
||||||
prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
|
prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(tdata == prof_tdata_get(tsd, false));
|
assert(tdata == prof_tdata_get(tsd, false));
|
||||||
@ -286,7 +286,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
|
|||||||
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
|
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C void
|
static void
|
||||||
prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
|
prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(tdata == prof_tdata_get(tsd, false));
|
assert(tdata == prof_tdata_get(tsd, false));
|
||||||
@ -1884,7 +1884,7 @@ prof_bt_keycomp(const void *k1, const void *k2) {
|
|||||||
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
|
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C uint64_t
|
static uint64_t
|
||||||
prof_thr_uid_alloc(tsdn_t *tsdn) {
|
prof_thr_uid_alloc(tsdn_t *tsdn) {
|
||||||
uint64_t thr_uid;
|
uint64_t thr_uid;
|
||||||
|
|
||||||
|
@ -33,8 +33,8 @@
|
|||||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
/**
|
/**
|
||||||
* @file SFMT-alti.h
|
* @file SFMT-alti.h
|
||||||
*
|
*
|
||||||
* @brief SIMD oriented Fast Mersenne Twister(SFMT)
|
* @brief SIMD oriented Fast Mersenne Twister(SFMT)
|
||||||
* pseudorandom number generator
|
* pseudorandom number generator
|
||||||
@ -95,7 +95,7 @@ vector unsigned int vec_recursion(vector unsigned int a,
|
|||||||
* This function fills the internal state array with pseudorandom
|
* This function fills the internal state array with pseudorandom
|
||||||
* integers.
|
* integers.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
|
static inline void gen_rand_all(sfmt_t *ctx) {
|
||||||
int i;
|
int i;
|
||||||
vector unsigned int r, r1, r2;
|
vector unsigned int r, r1, r2;
|
||||||
|
|
||||||
@ -119,10 +119,10 @@ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
|
|||||||
* This function fills the user-specified array with pseudorandom
|
* This function fills the user-specified array with pseudorandom
|
||||||
* integers.
|
* integers.
|
||||||
*
|
*
|
||||||
* @param array an 128-bit array to be filled by pseudorandom numbers.
|
* @param array an 128-bit array to be filled by pseudorandom numbers.
|
||||||
* @param size number of 128-bit pesudorandom numbers to be generated.
|
* @param size number of 128-bit pesudorandom numbers to be generated.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
|
static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
|
||||||
int i, j;
|
int i, j;
|
||||||
vector unsigned int r, r1, r2;
|
vector unsigned int r, r1, r2;
|
||||||
|
|
||||||
@ -173,7 +173,7 @@ JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
|
|||||||
* @param array an 128-bit array to be swaped.
|
* @param array an 128-bit array to be swaped.
|
||||||
* @param size size of 128-bit array.
|
* @param size size of 128-bit array.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE void swap(w128_t *array, int size) {
|
static inline void swap(w128_t *array, int size) {
|
||||||
int i;
|
int i;
|
||||||
const vector unsigned char perm = ALTI_SWAP;
|
const vector unsigned char perm = ALTI_SWAP;
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
/**
|
/**
|
||||||
* @file SFMT-sse2.h
|
* @file SFMT-sse2.h
|
||||||
* @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2
|
* @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2
|
||||||
*
|
*
|
||||||
@ -60,10 +60,10 @@
|
|||||||
* @param mask 128-bit mask
|
* @param mask 128-bit mask
|
||||||
* @return output
|
* @return output
|
||||||
*/
|
*/
|
||||||
JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b,
|
JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b,
|
||||||
__m128i c, __m128i d, __m128i mask) {
|
__m128i c, __m128i d, __m128i mask) {
|
||||||
__m128i v, x, y, z;
|
__m128i v, x, y, z;
|
||||||
|
|
||||||
x = _mm_load_si128(a);
|
x = _mm_load_si128(a);
|
||||||
y = _mm_srli_epi32(*b, SR1);
|
y = _mm_srli_epi32(*b, SR1);
|
||||||
z = _mm_srli_si128(c, SR2);
|
z = _mm_srli_si128(c, SR2);
|
||||||
@ -81,7 +81,7 @@ JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b,
|
|||||||
* This function fills the internal state array with pseudorandom
|
* This function fills the internal state array with pseudorandom
|
||||||
* integers.
|
* integers.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
|
static inline void gen_rand_all(sfmt_t *ctx) {
|
||||||
int i;
|
int i;
|
||||||
__m128i r, r1, r2, mask;
|
__m128i r, r1, r2, mask;
|
||||||
mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
|
mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
|
||||||
@ -108,10 +108,10 @@ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
|
|||||||
* This function fills the user-specified array with pseudorandom
|
* This function fills the user-specified array with pseudorandom
|
||||||
* integers.
|
* integers.
|
||||||
*
|
*
|
||||||
* @param array an 128-bit array to be filled by pseudorandom numbers.
|
* @param array an 128-bit array to be filled by pseudorandom numbers.
|
||||||
* @param size number of 128-bit pesudorandom numbers to be generated.
|
* @param size number of 128-bit pesudorandom numbers to be generated.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
|
static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
|
||||||
int i, j;
|
int i, j;
|
||||||
__m128i r, r1, r2, mask;
|
__m128i r, r1, r2, mask;
|
||||||
mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
|
mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
|
||||||
|
@ -81,76 +81,62 @@ const char *get_idstring(void);
|
|||||||
int get_min_array_size32(void);
|
int get_min_array_size32(void);
|
||||||
int get_min_array_size64(void);
|
int get_min_array_size64(void);
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
double to_real1(uint32_t v);
|
|
||||||
double genrand_real1(sfmt_t *ctx);
|
|
||||||
double to_real2(uint32_t v);
|
|
||||||
double genrand_real2(sfmt_t *ctx);
|
|
||||||
double to_real3(uint32_t v);
|
|
||||||
double genrand_real3(sfmt_t *ctx);
|
|
||||||
double to_res53(uint64_t v);
|
|
||||||
double to_res53_mix(uint32_t x, uint32_t y);
|
|
||||||
double genrand_res53(sfmt_t *ctx);
|
|
||||||
double genrand_res53_mix(sfmt_t *ctx);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_))
|
|
||||||
/* These real versions are due to Isaku Wada */
|
/* These real versions are due to Isaku Wada */
|
||||||
/** generates a random number on [0,1]-real-interval */
|
/** generates a random number on [0,1]-real-interval */
|
||||||
JEMALLOC_INLINE double to_real1(uint32_t v) {
|
static inline double to_real1(uint32_t v) {
|
||||||
return v * (1.0/4294967295.0);
|
return v * (1.0/4294967295.0);
|
||||||
/* divided by 2^32-1 */
|
/* divided by 2^32-1 */
|
||||||
}
|
}
|
||||||
|
|
||||||
/** generates a random number on [0,1]-real-interval */
|
/** generates a random number on [0,1]-real-interval */
|
||||||
JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) {
|
static inline double genrand_real1(sfmt_t *ctx) {
|
||||||
return to_real1(gen_rand32(ctx));
|
return to_real1(gen_rand32(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** generates a random number on [0,1)-real-interval */
|
/** generates a random number on [0,1)-real-interval */
|
||||||
JEMALLOC_INLINE double to_real2(uint32_t v) {
|
static inline double to_real2(uint32_t v) {
|
||||||
return v * (1.0/4294967296.0);
|
return v * (1.0/4294967296.0);
|
||||||
/* divided by 2^32 */
|
/* divided by 2^32 */
|
||||||
}
|
}
|
||||||
|
|
||||||
/** generates a random number on [0,1)-real-interval */
|
/** generates a random number on [0,1)-real-interval */
|
||||||
JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) {
|
static inline double genrand_real2(sfmt_t *ctx) {
|
||||||
return to_real2(gen_rand32(ctx));
|
return to_real2(gen_rand32(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** generates a random number on (0,1)-real-interval */
|
/** generates a random number on (0,1)-real-interval */
|
||||||
JEMALLOC_INLINE double to_real3(uint32_t v) {
|
static inline double to_real3(uint32_t v) {
|
||||||
return (((double)v) + 0.5)*(1.0/4294967296.0);
|
return (((double)v) + 0.5)*(1.0/4294967296.0);
|
||||||
/* divided by 2^32 */
|
/* divided by 2^32 */
|
||||||
}
|
}
|
||||||
|
|
||||||
/** generates a random number on (0,1)-real-interval */
|
/** generates a random number on (0,1)-real-interval */
|
||||||
JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) {
|
static inline double genrand_real3(sfmt_t *ctx) {
|
||||||
return to_real3(gen_rand32(ctx));
|
return to_real3(gen_rand32(ctx));
|
||||||
}
|
}
|
||||||
/** These real versions are due to Isaku Wada */
|
/** These real versions are due to Isaku Wada */
|
||||||
|
|
||||||
/** generates a random number on [0,1) with 53-bit resolution*/
|
/** generates a random number on [0,1) with 53-bit resolution*/
|
||||||
JEMALLOC_INLINE double to_res53(uint64_t v) {
|
static inline double to_res53(uint64_t v) {
|
||||||
return v * (1.0/18446744073709551616.0L);
|
return v * (1.0/18446744073709551616.0L);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** generates a random number on [0,1) with 53-bit resolution from two
|
/** generates a random number on [0,1) with 53-bit resolution from two
|
||||||
* 32 bit integers */
|
* 32 bit integers */
|
||||||
JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) {
|
static inline double to_res53_mix(uint32_t x, uint32_t y) {
|
||||||
return to_res53(x | ((uint64_t)y << 32));
|
return to_res53(x | ((uint64_t)y << 32));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** generates a random number on [0,1) with 53-bit resolution
|
/** generates a random number on [0,1) with 53-bit resolution
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) {
|
static inline double genrand_res53(sfmt_t *ctx) {
|
||||||
return to_res53(gen_rand64(ctx));
|
return to_res53(gen_rand64(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** generates a random number on [0,1) with 53-bit resolution
|
/** generates a random number on [0,1) with 53-bit resolution
|
||||||
using 32bit integer.
|
using 32bit integer.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) {
|
static inline double genrand_res53_mix(sfmt_t *ctx) {
|
||||||
uint32_t x, y;
|
uint32_t x, y;
|
||||||
|
|
||||||
x = gen_rand32(ctx);
|
x = gen_rand32(ctx);
|
||||||
@ -158,4 +144,3 @@ JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) {
|
|||||||
return to_res53_mix(x, y);
|
return to_res53_mix(x, y);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
@ -1,12 +1,3 @@
|
|||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
double ln_gamma(double x);
|
|
||||||
double i_gamma(double x, double p, double ln_gamma_p);
|
|
||||||
double pt_norm(double p);
|
|
||||||
double pt_chi2(double p, double df, double ln_gamma_df_2);
|
|
||||||
double pt_gamma(double p, double shape, double scale, double ln_gamma_shape);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_))
|
|
||||||
/*
|
/*
|
||||||
* Compute the natural log of Gamma(x), accurate to 10 decimal places.
|
* Compute the natural log of Gamma(x), accurate to 10 decimal places.
|
||||||
*
|
*
|
||||||
@ -15,7 +6,7 @@ double pt_gamma(double p, double shape, double scale, double ln_gamma_shape);
|
|||||||
* Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function
|
* Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function
|
||||||
* [S14]. Communications of the ACM 9(9):684.
|
* [S14]. Communications of the ACM 9(9):684.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE double
|
static inline double
|
||||||
ln_gamma(double x) {
|
ln_gamma(double x) {
|
||||||
double f, z;
|
double f, z;
|
||||||
|
|
||||||
@ -50,7 +41,7 @@ ln_gamma(double x) {
|
|||||||
* Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral.
|
* Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral.
|
||||||
* Applied Statistics 19:285-287.
|
* Applied Statistics 19:285-287.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE double
|
static inline double
|
||||||
i_gamma(double x, double p, double ln_gamma_p) {
|
i_gamma(double x, double p, double ln_gamma_p) {
|
||||||
double acu, factor, oflo, gin, term, rn, a, b, an, dif;
|
double acu, factor, oflo, gin, term, rn, a, b, an, dif;
|
||||||
double pn[6];
|
double pn[6];
|
||||||
@ -134,7 +125,7 @@ i_gamma(double x, double p, double ln_gamma_p) {
|
|||||||
* Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal
|
* Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal
|
||||||
* distribution. Applied Statistics 37(3):477-484.
|
* distribution. Applied Statistics 37(3):477-484.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE double
|
static inline double
|
||||||
pt_norm(double p) {
|
pt_norm(double p) {
|
||||||
double q, r, ret;
|
double q, r, ret;
|
||||||
|
|
||||||
@ -222,7 +213,7 @@ pt_norm(double p) {
|
|||||||
* Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage
|
* Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage
|
||||||
* points of the Chi^2 distribution. Applied Statistics 40(1):233-235.
|
* points of the Chi^2 distribution. Applied Statistics 40(1):233-235.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE double
|
static inline double
|
||||||
pt_chi2(double p, double df, double ln_gamma_df_2) {
|
pt_chi2(double p, double df, double ln_gamma_df_2) {
|
||||||
double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
|
double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
@ -309,8 +300,7 @@ pt_chi2(double p, double df, double ln_gamma_df_2) {
|
|||||||
* compute the upper limit on the definite integral from [0..z] that satisfies
|
* compute the upper limit on the definite integral from [0..z] that satisfies
|
||||||
* p.
|
* p.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE double
|
static inline double
|
||||||
pt_gamma(double p, double shape, double scale, double ln_gamma_shape) {
|
pt_gamma(double p, double shape, double scale, double ln_gamma_shape) {
|
||||||
return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale;
|
return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
/**
|
/**
|
||||||
* @file SFMT.c
|
* @file SFMT.c
|
||||||
* @brief SIMD oriented Fast Mersenne Twister(SFMT)
|
* @brief SIMD oriented Fast Mersenne Twister(SFMT)
|
||||||
*
|
*
|
||||||
@ -108,7 +108,7 @@ struct sfmt_s {
|
|||||||
|
|
||||||
/*--------------------------------------
|
/*--------------------------------------
|
||||||
FILE GLOBAL VARIABLES
|
FILE GLOBAL VARIABLES
|
||||||
internal state, index counter and flag
|
internal state, index counter and flag
|
||||||
--------------------------------------*/
|
--------------------------------------*/
|
||||||
|
|
||||||
/** a parity check vector which certificate the period of 2^{MEXP} */
|
/** a parity check vector which certificate the period of 2^{MEXP} */
|
||||||
@ -117,18 +117,18 @@ static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4};
|
|||||||
/*----------------
|
/*----------------
|
||||||
STATIC FUNCTIONS
|
STATIC FUNCTIONS
|
||||||
----------------*/
|
----------------*/
|
||||||
JEMALLOC_INLINE_C int idxof(int i);
|
static inline int idxof(int i);
|
||||||
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
|
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
|
||||||
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift);
|
static inline void rshift128(w128_t *out, w128_t const *in, int shift);
|
||||||
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift);
|
static inline void lshift128(w128_t *out, w128_t const *in, int shift);
|
||||||
#endif
|
#endif
|
||||||
JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx);
|
static inline void gen_rand_all(sfmt_t *ctx);
|
||||||
JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
|
static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
|
||||||
JEMALLOC_INLINE_C uint32_t func1(uint32_t x);
|
static inline uint32_t func1(uint32_t x);
|
||||||
JEMALLOC_INLINE_C uint32_t func2(uint32_t x);
|
static inline uint32_t func2(uint32_t x);
|
||||||
static void period_certification(sfmt_t *ctx);
|
static void period_certification(sfmt_t *ctx);
|
||||||
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
|
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
|
||||||
JEMALLOC_INLINE_C void swap(w128_t *array, int size);
|
static inline void swap(w128_t *array, int size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(HAVE_ALTIVEC)
|
#if defined(HAVE_ALTIVEC)
|
||||||
@ -138,15 +138,15 @@ JEMALLOC_INLINE_C void swap(w128_t *array, int size);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function simulate a 64-bit index of LITTLE ENDIAN
|
* This function simulate a 64-bit index of LITTLE ENDIAN
|
||||||
* in BIG ENDIAN machine.
|
* in BIG ENDIAN machine.
|
||||||
*/
|
*/
|
||||||
#ifdef ONLY64
|
#ifdef ONLY64
|
||||||
JEMALLOC_INLINE_C int idxof(int i) {
|
static inline int idxof(int i) {
|
||||||
return i ^ 1;
|
return i ^ 1;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
JEMALLOC_INLINE_C int idxof(int i) {
|
static inline int idxof(int i) {
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -160,7 +160,7 @@ JEMALLOC_INLINE_C int idxof(int i) {
|
|||||||
*/
|
*/
|
||||||
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
|
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
|
||||||
#ifdef ONLY64
|
#ifdef ONLY64
|
||||||
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
|
static inline void rshift128(w128_t *out, w128_t const *in, int shift) {
|
||||||
uint64_t th, tl, oh, ol;
|
uint64_t th, tl, oh, ol;
|
||||||
|
|
||||||
th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
|
th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
|
||||||
@ -175,7 +175,7 @@ JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
|
|||||||
out->u[3] = (uint32_t)oh;
|
out->u[3] = (uint32_t)oh;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
|
static inline void rshift128(w128_t *out, w128_t const *in, int shift) {
|
||||||
uint64_t th, tl, oh, ol;
|
uint64_t th, tl, oh, ol;
|
||||||
|
|
||||||
th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
|
th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
|
||||||
@ -199,7 +199,7 @@ JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
|
|||||||
* @param shift the shift value
|
* @param shift the shift value
|
||||||
*/
|
*/
|
||||||
#ifdef ONLY64
|
#ifdef ONLY64
|
||||||
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
|
static inline void lshift128(w128_t *out, w128_t const *in, int shift) {
|
||||||
uint64_t th, tl, oh, ol;
|
uint64_t th, tl, oh, ol;
|
||||||
|
|
||||||
th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
|
th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
|
||||||
@ -214,7 +214,7 @@ JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
|
|||||||
out->u[3] = (uint32_t)oh;
|
out->u[3] = (uint32_t)oh;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
|
static inline void lshift128(w128_t *out, w128_t const *in, int shift) {
|
||||||
uint64_t th, tl, oh, ol;
|
uint64_t th, tl, oh, ol;
|
||||||
|
|
||||||
th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
|
th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
|
||||||
@ -241,37 +241,37 @@ JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
|
|||||||
*/
|
*/
|
||||||
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
|
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
|
||||||
#ifdef ONLY64
|
#ifdef ONLY64
|
||||||
JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
|
static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
|
||||||
w128_t *d) {
|
w128_t *d) {
|
||||||
w128_t x;
|
w128_t x;
|
||||||
w128_t y;
|
w128_t y;
|
||||||
|
|
||||||
lshift128(&x, a, SL2);
|
lshift128(&x, a, SL2);
|
||||||
rshift128(&y, c, SR2);
|
rshift128(&y, c, SR2);
|
||||||
r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0]
|
r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0]
|
||||||
^ (d->u[0] << SL1);
|
^ (d->u[0] << SL1);
|
||||||
r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1]
|
r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1]
|
||||||
^ (d->u[1] << SL1);
|
^ (d->u[1] << SL1);
|
||||||
r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2]
|
r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2]
|
||||||
^ (d->u[2] << SL1);
|
^ (d->u[2] << SL1);
|
||||||
r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3]
|
r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3]
|
||||||
^ (d->u[3] << SL1);
|
^ (d->u[3] << SL1);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
|
static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
|
||||||
w128_t *d) {
|
w128_t *d) {
|
||||||
w128_t x;
|
w128_t x;
|
||||||
w128_t y;
|
w128_t y;
|
||||||
|
|
||||||
lshift128(&x, a, SL2);
|
lshift128(&x, a, SL2);
|
||||||
rshift128(&y, c, SR2);
|
rshift128(&y, c, SR2);
|
||||||
r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0]
|
r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0]
|
||||||
^ (d->u[0] << SL1);
|
^ (d->u[0] << SL1);
|
||||||
r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1]
|
r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1]
|
||||||
^ (d->u[1] << SL1);
|
^ (d->u[1] << SL1);
|
||||||
r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2]
|
r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2]
|
||||||
^ (d->u[2] << SL1);
|
^ (d->u[2] << SL1);
|
||||||
r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3]
|
r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3]
|
||||||
^ (d->u[3] << SL1);
|
^ (d->u[3] << SL1);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -282,7 +282,7 @@ JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
|
|||||||
* This function fills the internal state array with pseudorandom
|
* This function fills the internal state array with pseudorandom
|
||||||
* integers.
|
* integers.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) {
|
static inline void gen_rand_all(sfmt_t *ctx) {
|
||||||
int i;
|
int i;
|
||||||
w128_t *r1, *r2;
|
w128_t *r1, *r2;
|
||||||
|
|
||||||
@ -306,10 +306,10 @@ JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) {
|
|||||||
* This function fills the user-specified array with pseudorandom
|
* This function fills the user-specified array with pseudorandom
|
||||||
* integers.
|
* integers.
|
||||||
*
|
*
|
||||||
* @param array an 128-bit array to be filled by pseudorandom numbers.
|
* @param array an 128-bit array to be filled by pseudorandom numbers.
|
||||||
* @param size number of 128-bit pseudorandom numbers to be generated.
|
* @param size number of 128-bit pseudorandom numbers to be generated.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
|
static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
|
||||||
int i, j;
|
int i, j;
|
||||||
w128_t *r1, *r2;
|
w128_t *r1, *r2;
|
||||||
|
|
||||||
@ -343,7 +343,7 @@ JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
|
#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
|
||||||
JEMALLOC_INLINE_C void swap(w128_t *array, int size) {
|
static inline void swap(w128_t *array, int size) {
|
||||||
int i;
|
int i;
|
||||||
uint32_t x, y;
|
uint32_t x, y;
|
||||||
|
|
||||||
@ -476,7 +476,7 @@ uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
|
|||||||
* This function generates and returns 64-bit pseudorandom number.
|
* This function generates and returns 64-bit pseudorandom number.
|
||||||
* init_gen_rand or init_by_array must be called before this function.
|
* init_gen_rand or init_by_array must be called before this function.
|
||||||
* The function gen_rand64 should not be called after gen_rand32,
|
* The function gen_rand64 should not be called after gen_rand32,
|
||||||
* unless an initialization is again executed.
|
* unless an initialization is again executed.
|
||||||
* @return 64-bit pseudorandom number
|
* @return 64-bit pseudorandom number
|
||||||
*/
|
*/
|
||||||
uint64_t gen_rand64(sfmt_t *ctx) {
|
uint64_t gen_rand64(sfmt_t *ctx) {
|
||||||
@ -618,7 +618,7 @@ sfmt_t *init_gen_rand(uint32_t seed) {
|
|||||||
|
|
||||||
psfmt32[idxof(0)] = seed;
|
psfmt32[idxof(0)] = seed;
|
||||||
for (i = 1; i < N32; i++) {
|
for (i = 1; i < N32; i++) {
|
||||||
psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)]
|
psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)]
|
||||||
^ (psfmt32[idxof(i - 1)] >> 30))
|
^ (psfmt32[idxof(i - 1)] >> 30))
|
||||||
+ i;
|
+ i;
|
||||||
}
|
}
|
||||||
@ -668,7 +668,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
|
|||||||
} else {
|
} else {
|
||||||
count = N32;
|
count = N32;
|
||||||
}
|
}
|
||||||
r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)]
|
r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)]
|
||||||
^ psfmt32[idxof(N32 - 1)]);
|
^ psfmt32[idxof(N32 - 1)]);
|
||||||
psfmt32[idxof(mid)] += r;
|
psfmt32[idxof(mid)] += r;
|
||||||
r += key_length;
|
r += key_length;
|
||||||
@ -677,7 +677,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
|
|||||||
|
|
||||||
count--;
|
count--;
|
||||||
for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
|
for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
|
||||||
r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
|
r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
|
||||||
^ psfmt32[idxof((i + N32 - 1) % N32)]);
|
^ psfmt32[idxof((i + N32 - 1) % N32)]);
|
||||||
psfmt32[idxof((i + mid) % N32)] += r;
|
psfmt32[idxof((i + mid) % N32)] += r;
|
||||||
r += init_key[j] + i;
|
r += init_key[j] + i;
|
||||||
@ -686,7 +686,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
|
|||||||
i = (i + 1) % N32;
|
i = (i + 1) % N32;
|
||||||
}
|
}
|
||||||
for (; j < count; j++) {
|
for (; j < count; j++) {
|
||||||
r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
|
r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
|
||||||
^ psfmt32[idxof((i + N32 - 1) % N32)]);
|
^ psfmt32[idxof((i + N32 - 1) % N32)]);
|
||||||
psfmt32[idxof((i + mid) % N32)] += r;
|
psfmt32[idxof((i + mid) % N32)] += r;
|
||||||
r += i;
|
r += i;
|
||||||
@ -695,7 +695,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
|
|||||||
i = (i + 1) % N32;
|
i = (i + 1) % N32;
|
||||||
}
|
}
|
||||||
for (j = 0; j < N32; j++) {
|
for (j = 0; j < N32; j++) {
|
||||||
r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)]
|
r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)]
|
||||||
+ psfmt32[idxof((i + N32 - 1) % N32)]);
|
+ psfmt32[idxof((i + N32 - 1) % N32)]);
|
||||||
psfmt32[idxof((i + mid) % N32)] ^= r;
|
psfmt32[idxof((i + mid) % N32)] ^= r;
|
||||||
r -= i;
|
r -= i;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include "test/jemalloc_test.h"
|
#include "test/jemalloc_test.h"
|
||||||
|
|
||||||
JEMALLOC_INLINE_C void
|
static inline void
|
||||||
time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter,
|
time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter,
|
||||||
void (*func)(void)) {
|
void (*func)(void)) {
|
||||||
uint64_t i;
|
uint64_t i;
|
||||||
|
Loading…
Reference in New Issue
Block a user