@@ -1,16 +1,16 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
|
||||
|
||||
#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
|
||||
#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
|
||||
|
||||
/* Maximum number of regions in one slab. */
|
||||
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
||||
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
|
||||
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
||||
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
|
||||
|
||||
/* Default decay time in seconds. */
|
||||
#define DECAY_TIME_DEFAULT 10
|
||||
#define DECAY_TIME_DEFAULT 10
|
||||
/* Number of event ticks between time checks. */
|
||||
#define DECAY_NTICKS_PER_UPDATE 1000
|
||||
#define DECAY_NTICKS_PER_UPDATE 1000
|
||||
|
||||
typedef struct arena_slab_data_s arena_slab_data_t;
|
||||
typedef struct arena_bin_info_s arena_bin_info_t;
|
||||
|
@@ -3,7 +3,7 @@
|
||||
* assertion failure.
|
||||
*/
|
||||
#ifndef assert
|
||||
#define assert(e) do { \
|
||||
#define assert(e) do { \
|
||||
if (unlikely(config_debug && !(e))) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
|
||||
@@ -14,7 +14,7 @@
|
||||
#endif
|
||||
|
||||
#ifndef not_reached
|
||||
#define not_reached() do { \
|
||||
#define not_reached() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Unreachable code reached\n", \
|
||||
@@ -26,7 +26,7 @@
|
||||
#endif
|
||||
|
||||
#ifndef not_implemented
|
||||
#define not_implemented() do { \
|
||||
#define not_implemented() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
|
||||
__FILE__, __LINE__); \
|
||||
@@ -36,7 +36,7 @@
|
||||
#endif
|
||||
|
||||
#ifndef assert_not_implemented
|
||||
#define assert_not_implemented(e) do { \
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (unlikely(config_debug && !(e))) { \
|
||||
not_implemented(); \
|
||||
} \
|
||||
|
@@ -2,11 +2,11 @@
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_EXTERNS_H
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||
#define atomic_read_u64(p) atomic_add_u64(p, 0)
|
||||
#define atomic_read_u64(p) atomic_add_u64(p, 0)
|
||||
#endif
|
||||
#define atomic_read_u32(p) atomic_add_u32(p, 0)
|
||||
#define atomic_read_p(p) atomic_add_p(p, NULL)
|
||||
#define atomic_read_zu(p) atomic_add_zu(p, 0)
|
||||
#define atomic_read_u(p) atomic_add_u(p, 0)
|
||||
#define atomic_read_u32(p) atomic_add_u32(p, 0)
|
||||
#define atomic_read_p(p) atomic_add_p(p, NULL)
|
||||
#define atomic_read_zu(p) atomic_add_zu(p, 0)
|
||||
#define atomic_read_u(p) atomic_add_u(p, 0)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_EXTERNS_H */
|
||||
|
@@ -2,18 +2,18 @@
|
||||
#define JEMALLOC_INTERNAL_BITMAP_TYPES_H
|
||||
|
||||
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
||||
#define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
|
||||
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
||||
#define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
|
||||
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
||||
|
||||
typedef struct bitmap_level_s bitmap_level_t;
|
||||
typedef struct bitmap_info_s bitmap_info_t;
|
||||
typedef unsigned long bitmap_t;
|
||||
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
||||
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
||||
|
||||
/* Number of bits per group. */
|
||||
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
||||
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
|
||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
||||
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
|
||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||
|
||||
/*
|
||||
* Do some analysis on how big the bitmap is before we use a tree. For a brute
|
||||
@@ -25,22 +25,22 @@ typedef unsigned long bitmap_t;
|
||||
#endif
|
||||
|
||||
/* Number of groups required to store a given number of bits. */
|
||||
#define BITMAP_BITS2GROUPS(nbits) \
|
||||
#define BITMAP_BITS2GROUPS(nbits) \
|
||||
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
|
||||
|
||||
/*
|
||||
* Number of groups required at a particular level for a given number of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_L0(nbits) \
|
||||
#define BITMAP_GROUPS_L0(nbits) \
|
||||
BITMAP_BITS2GROUPS(nbits)
|
||||
#define BITMAP_GROUPS_L1(nbits) \
|
||||
#define BITMAP_GROUPS_L1(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
|
||||
#define BITMAP_GROUPS_L2(nbits) \
|
||||
#define BITMAP_GROUPS_L2(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
|
||||
#define BITMAP_GROUPS_L3(nbits) \
|
||||
#define BITMAP_GROUPS_L3(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS((nbits)))))
|
||||
#define BITMAP_GROUPS_L4(nbits) \
|
||||
#define BITMAP_GROUPS_L4(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
|
||||
|
||||
@@ -48,15 +48,15 @@ typedef unsigned long bitmap_t;
|
||||
* Assuming the number of levels, number of groups required for a given number
|
||||
* of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_1_LEVEL(nbits) \
|
||||
#define BITMAP_GROUPS_1_LEVEL(nbits) \
|
||||
BITMAP_GROUPS_L0(nbits)
|
||||
#define BITMAP_GROUPS_2_LEVEL(nbits) \
|
||||
#define BITMAP_GROUPS_2_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
|
||||
#define BITMAP_GROUPS_3_LEVEL(nbits) \
|
||||
#define BITMAP_GROUPS_3_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
|
||||
#define BITMAP_GROUPS_4_LEVEL(nbits) \
|
||||
#define BITMAP_GROUPS_4_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
|
||||
#define BITMAP_GROUPS_5_LEVEL(nbits) \
|
||||
#define BITMAP_GROUPS_5_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
|
||||
|
||||
/*
|
||||
@@ -92,9 +92,9 @@ typedef unsigned long bitmap_t;
|
||||
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
|
||||
* are not impacted.
|
||||
*/
|
||||
#define BITMAP_MAX_LEVELS 5
|
||||
#define BITMAP_MAX_LEVELS 5
|
||||
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
/* nbits. */ \
|
||||
nbits, \
|
||||
/* nlevels. */ \
|
||||
@@ -119,9 +119,9 @@ typedef unsigned long bitmap_t;
|
||||
|
||||
#else /* BITMAP_USE_TREE */
|
||||
|
||||
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
|
||||
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
|
||||
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
/* nbits. */ \
|
||||
nbits, \
|
||||
/* ngroups. */ \
|
||||
|
@@ -9,14 +9,14 @@ typedef void ckh_hash_t (const void *, size_t[2]);
|
||||
typedef bool ckh_keycomp_t (const void *, const void *);
|
||||
|
||||
/* Maintain counters used to get an idea of performance. */
|
||||
/* #define CKH_COUNT */
|
||||
/* #define CKH_COUNT */
|
||||
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
|
||||
/* #define CKH_VERBOSE */
|
||||
/* #define CKH_VERBOSE */
|
||||
|
||||
/*
|
||||
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
|
||||
* one bucket per L1 cache line.
|
||||
*/
|
||||
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
||||
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CKH_TYPES_H */
|
||||
|
@@ -13,7 +13,7 @@ void ctl_prefork(tsdn_t *tsdn);
|
||||
void ctl_postfork_parent(tsdn_t *tsdn);
|
||||
void ctl_postfork_child(tsdn_t *tsdn);
|
||||
|
||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
|
||||
!= 0) { \
|
||||
malloc_printf( \
|
||||
@@ -23,7 +23,7 @@ void ctl_postfork_child(tsdn_t *tsdn);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
||||
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
||||
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
|
||||
malloc_printf("<jemalloc>: Failure in " \
|
||||
"xmallctlnametomib(\"%s\", ...)\n", name); \
|
||||
@@ -31,7 +31,7 @@ void ctl_postfork_child(tsdn_t *tsdn);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
||||
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
|
||||
newlen) != 0) { \
|
||||
malloc_write( \
|
||||
|
@@ -8,7 +8,7 @@ typedef enum {
|
||||
|
||||
dss_prec_limit = 3
|
||||
} dss_prec_t;
|
||||
#define DSS_PREC_DEFAULT dss_prec_secondary
|
||||
#define DSS_DEFAULT "secondary"
|
||||
#define DSS_PREC_DEFAULT dss_prec_secondary
|
||||
#define DSS_DEFAULT "secondary"
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_TYPES_H */
|
||||
|
@@ -3,6 +3,6 @@
|
||||
|
||||
typedef struct extent_s extent_t;
|
||||
|
||||
#define EXTENT_HOOKS_INITIALIZER NULL
|
||||
#define EXTENT_HOOKS_INITIALIZER NULL
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#ifndef JEMALLOC_INTERNAL_H
|
||||
#define JEMALLOC_INTERNAL_H
|
||||
#define JEMALLOC_INTERNAL_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -12,7 +12,7 @@ extern "C" {
|
||||
#include <sys/ktrace.h>
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_NO_DEMANGLE
|
||||
#define JEMALLOC_NO_DEMANGLE
|
||||
#ifdef JEMALLOC_JET
|
||||
# define JEMALLOC_N(n) jet_##n
|
||||
# include "jemalloc/internal/public_namespace.h"
|
||||
@@ -166,7 +166,7 @@ static const bool have_thp =
|
||||
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#ifndef __PGI
|
||||
#define RB_COMPACT
|
||||
#define RB_COMPACT
|
||||
#endif
|
||||
#include "jemalloc/internal/rb.h"
|
||||
#include "jemalloc/internal/qr.h"
|
||||
@@ -224,34 +224,34 @@ typedef unsigned szind_t;
|
||||
*
|
||||
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
|
||||
*/
|
||||
#define MALLOCX_ARENA_BITS 12
|
||||
#define MALLOCX_TCACHE_BITS 12
|
||||
#define MALLOCX_LG_ALIGN_BITS 6
|
||||
#define MALLOCX_ARENA_SHIFT 20
|
||||
#define MALLOCX_TCACHE_SHIFT 8
|
||||
#define MALLOCX_ARENA_MASK \
|
||||
#define MALLOCX_ARENA_BITS 12
|
||||
#define MALLOCX_TCACHE_BITS 12
|
||||
#define MALLOCX_LG_ALIGN_BITS 6
|
||||
#define MALLOCX_ARENA_SHIFT 20
|
||||
#define MALLOCX_TCACHE_SHIFT 8
|
||||
#define MALLOCX_ARENA_MASK \
|
||||
(((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
|
||||
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
|
||||
#define MALLOCX_ARENA_MAX ((1 << MALLOCX_ARENA_BITS) - 2)
|
||||
#define MALLOCX_TCACHE_MASK \
|
||||
#define MALLOCX_ARENA_MAX ((1 << MALLOCX_ARENA_BITS) - 2)
|
||||
#define MALLOCX_TCACHE_MASK \
|
||||
(((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
|
||||
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
|
||||
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
|
||||
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
|
||||
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
|
||||
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
|
||||
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
|
||||
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
|
||||
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
|
||||
#define MALLOCX_ALIGN_GET(flags) \
|
||||
#define MALLOCX_ALIGN_GET(flags) \
|
||||
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
|
||||
#define MALLOCX_ZERO_GET(flags) \
|
||||
#define MALLOCX_ZERO_GET(flags) \
|
||||
((bool)(flags & MALLOCX_ZERO))
|
||||
|
||||
#define MALLOCX_TCACHE_GET(flags) \
|
||||
#define MALLOCX_TCACHE_GET(flags) \
|
||||
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
|
||||
#define MALLOCX_ARENA_GET(flags) \
|
||||
#define MALLOCX_ARENA_GET(flags) \
|
||||
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
|
||||
|
||||
/* Smallest size class to support. */
|
||||
#define TINY_MIN (1U << LG_TINY_MIN)
|
||||
#define TINY_MIN (1U << LG_TINY_MIN)
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
@@ -312,25 +312,25 @@ typedef unsigned szind_t;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
|
||||
#define QUANTUM_MASK (QUANTUM - 1)
|
||||
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
|
||||
#define QUANTUM_MASK (QUANTUM - 1)
|
||||
|
||||
/* Return the smallest quantum multiple that is >= a. */
|
||||
#define QUANTUM_CEILING(a) \
|
||||
#define QUANTUM_CEILING(a) \
|
||||
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
|
||||
|
||||
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
|
||||
#define LONG_MASK (LONG - 1)
|
||||
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
|
||||
#define LONG_MASK (LONG - 1)
|
||||
|
||||
/* Return the smallest long multiple that is >= a. */
|
||||
#define LONG_CEILING(a) \
|
||||
#define LONG_CEILING(a) \
|
||||
(((a) + LONG_MASK) & ~LONG_MASK)
|
||||
|
||||
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
|
||||
#define PTR_MASK (SIZEOF_PTR - 1)
|
||||
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
|
||||
#define PTR_MASK (SIZEOF_PTR - 1)
|
||||
|
||||
/* Return the smallest (void *) multiple that is >= a. */
|
||||
#define PTR_CEILING(a) \
|
||||
#define PTR_CEILING(a) \
|
||||
(((a) + PTR_MASK) & ~PTR_MASK)
|
||||
|
||||
/*
|
||||
@@ -340,24 +340,24 @@ typedef unsigned szind_t;
|
||||
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
|
||||
* only handle raw constants.
|
||||
*/
|
||||
#define LG_CACHELINE 6
|
||||
#define CACHELINE 64
|
||||
#define CACHELINE_MASK (CACHELINE - 1)
|
||||
#define LG_CACHELINE 6
|
||||
#define CACHELINE 64
|
||||
#define CACHELINE_MASK (CACHELINE - 1)
|
||||
|
||||
/* Return the smallest cacheline multiple that is >= s. */
|
||||
#define CACHELINE_CEILING(s) \
|
||||
#define CACHELINE_CEILING(s) \
|
||||
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
|
||||
|
||||
/* Return the nearest aligned address at or below a. */
|
||||
#define ALIGNMENT_ADDR2BASE(a, alignment) \
|
||||
#define ALIGNMENT_ADDR2BASE(a, alignment) \
|
||||
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
|
||||
|
||||
/* Return the offset between a and the nearest aligned address at or below a. */
|
||||
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
|
||||
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
|
||||
((size_t)((uintptr_t)(a) & (alignment - 1)))
|
||||
|
||||
/* Return the smallest alignment multiple that is >= s. */
|
||||
#define ALIGNMENT_CEILING(s, alignment) \
|
||||
#define ALIGNMENT_CEILING(s, alignment) \
|
||||
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
|
||||
|
||||
/* Declare a variable-length array. */
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#ifndef JEMALLOC_INTERNAL_DECLS_H
|
||||
#define JEMALLOC_INTERNAL_DECLS_H
|
||||
#define JEMALLOC_INTERNAL_DECLS_H
|
||||
|
||||
#include <math.h>
|
||||
#ifdef _WIN32
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
|
@@ -41,15 +41,15 @@
|
||||
# define UNUSED
|
||||
#endif
|
||||
|
||||
#define ZU(z) ((size_t)z)
|
||||
#define ZI(z) ((ssize_t)z)
|
||||
#define QU(q) ((uint64_t)q)
|
||||
#define QI(q) ((int64_t)q)
|
||||
#define ZU(z) ((size_t)z)
|
||||
#define ZI(z) ((ssize_t)z)
|
||||
#define QU(q) ((uint64_t)q)
|
||||
#define QI(q) ((int64_t)q)
|
||||
|
||||
#define KZU(z) ZU(z##ULL)
|
||||
#define KZI(z) ZI(z##LL)
|
||||
#define KQU(q) QU(q##ULL)
|
||||
#define KQI(q) QI(q##LL)
|
||||
#define KZU(z) ZU(z##ULL)
|
||||
#define KZI(z) ZI(z##LL)
|
||||
#define KQU(q) QU(q##ULL)
|
||||
#define KQI(q) QI(q##LL)
|
||||
|
||||
#ifndef __DECONST
|
||||
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
|
||||
|
@@ -4,6 +4,6 @@
|
||||
typedef struct nstime_s nstime_t;
|
||||
|
||||
/* Maximum supported number of seconds (~584 years). */
|
||||
#define NSTIME_SEC_MAX KQU(18446744072)
|
||||
#define NSTIME_SEC_MAX KQU(18446744072)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_NSTIME_TYPES_H */
|
||||
|
@@ -5,23 +5,23 @@
|
||||
#ifdef PAGE_MASK
|
||||
# undef PAGE_MASK
|
||||
#endif
|
||||
#define PAGE ((size_t)(1U << LG_PAGE))
|
||||
#define PAGE_MASK ((size_t)(PAGE - 1))
|
||||
#define PAGE ((size_t)(1U << LG_PAGE))
|
||||
#define PAGE_MASK ((size_t)(PAGE - 1))
|
||||
/* Return the page base address for the page containing address a. */
|
||||
#define PAGE_ADDR2BASE(a) \
|
||||
#define PAGE_ADDR2BASE(a) \
|
||||
((void *)((uintptr_t)(a) & ~PAGE_MASK))
|
||||
/* Return the smallest pagesize multiple that is >= s. */
|
||||
#define PAGE_CEILING(s) \
|
||||
#define PAGE_CEILING(s) \
|
||||
(((s) + PAGE_MASK) & ~PAGE_MASK)
|
||||
|
||||
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
|
||||
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
|
||||
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
|
||||
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
|
||||
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
|
||||
/* Return the huge page base address for the huge page containing address a. */
|
||||
#define HUGEPAGE_ADDR2BASE(a) \
|
||||
#define HUGEPAGE_ADDR2BASE(a) \
|
||||
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
|
||||
/* Return the smallest pagesize multiple that is >= s. */
|
||||
#define HUGEPAGE_CEILING(s) \
|
||||
#define HUGEPAGE_CEILING(s) \
|
||||
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
|
||||
|
||||
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
|
||||
|
@@ -13,10 +13,10 @@
|
||||
*/
|
||||
|
||||
#ifndef PH_H_
|
||||
#define PH_H_
|
||||
#define PH_H_
|
||||
|
||||
/* Node structure. */
|
||||
#define phn(a_type) \
|
||||
#define phn(a_type) \
|
||||
struct { \
|
||||
a_type *phn_prev; \
|
||||
a_type *phn_next; \
|
||||
@@ -24,31 +24,31 @@ struct { \
|
||||
}
|
||||
|
||||
/* Root structure. */
|
||||
#define ph(a_type) \
|
||||
#define ph(a_type) \
|
||||
struct { \
|
||||
a_type *ph_root; \
|
||||
}
|
||||
|
||||
/* Internal utility macros. */
|
||||
#define phn_lchild_get(a_type, a_field, a_phn) \
|
||||
#define phn_lchild_get(a_type, a_field, a_phn) \
|
||||
(a_phn->a_field.phn_lchild)
|
||||
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
|
||||
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
|
||||
a_phn->a_field.phn_lchild = a_lchild; \
|
||||
} while (0)
|
||||
|
||||
#define phn_next_get(a_type, a_field, a_phn) \
|
||||
#define phn_next_get(a_type, a_field, a_phn) \
|
||||
(a_phn->a_field.phn_next)
|
||||
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
|
||||
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
|
||||
a_phn->a_field.phn_prev = a_prev; \
|
||||
} while (0)
|
||||
|
||||
#define phn_prev_get(a_type, a_field, a_phn) \
|
||||
#define phn_prev_get(a_type, a_field, a_phn) \
|
||||
(a_phn->a_field.phn_prev)
|
||||
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
|
||||
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
|
||||
a_phn->a_field.phn_next = a_next; \
|
||||
} while (0)
|
||||
|
||||
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
|
||||
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
|
||||
a_type *phn0child; \
|
||||
\
|
||||
assert(a_phn0 != NULL); \
|
||||
@@ -64,7 +64,7 @@ struct { \
|
||||
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
|
||||
} while (0)
|
||||
|
||||
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
|
||||
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
|
||||
if (a_phn0 == NULL) { \
|
||||
r_phn = a_phn1; \
|
||||
} else if (a_phn1 == NULL) { \
|
||||
@@ -80,7 +80,7 @@ struct { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
|
||||
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
|
||||
a_type *head = NULL; \
|
||||
a_type *tail = NULL; \
|
||||
a_type *phn0 = a_phn; \
|
||||
@@ -167,7 +167,7 @@ struct { \
|
||||
r_phn = phn0; \
|
||||
} while (0)
|
||||
|
||||
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
|
||||
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
|
||||
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
|
||||
if (phn != NULL) { \
|
||||
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
|
||||
@@ -180,7 +180,7 @@ struct { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
|
||||
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
|
||||
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
|
||||
if (lchild == NULL) { \
|
||||
r_phn = NULL; \
|
||||
@@ -194,7 +194,7 @@ struct { \
|
||||
* The ph_proto() macro generates function prototypes that correspond to the
|
||||
* functions generated by an equivalently parameterized call to ph_gen().
|
||||
*/
|
||||
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
|
||||
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
|
||||
a_attr void a_prefix##new(a_ph_type *ph); \
|
||||
a_attr bool a_prefix##empty(a_ph_type *ph); \
|
||||
a_attr a_type *a_prefix##first(a_ph_type *ph); \
|
||||
@@ -206,7 +206,7 @@ a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
|
||||
* The ph_gen() macro generates a type-specific pairing heap implementation,
|
||||
* based on the above cpp macros.
|
||||
*/
|
||||
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
|
||||
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_ph_type *ph) { \
|
||||
memset(ph, 0, sizeof(ph(a_type))); \
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
for symbol in `cat $1` ; do
|
||||
echo "#define ${symbol} JEMALLOC_N(${symbol})"
|
||||
echo "#define ${symbol} JEMALLOC_N(${symbol})"
|
||||
done
|
||||
|
@@ -20,10 +20,10 @@
|
||||
* bits.
|
||||
*/
|
||||
|
||||
#define PRNG_A_32 UINT32_C(1103515241)
|
||||
#define PRNG_C_32 UINT32_C(12347)
|
||||
#define PRNG_A_32 UINT32_C(1103515241)
|
||||
#define PRNG_C_32 UINT32_C(12347)
|
||||
|
||||
#define PRNG_A_64 UINT64_C(6364136223846793005)
|
||||
#define PRNG_C_64 UINT64_C(1442695040888963407)
|
||||
#define PRNG_A_64 UINT64_C(6364136223846793005)
|
||||
#define PRNG_C_64 UINT64_C(1442695040888963407)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PRNG_TYPES_H */
|
||||
|
@@ -13,43 +13,43 @@ typedef struct prof_tdata_s prof_tdata_t;
|
||||
#else
|
||||
# define PROF_PREFIX_DEFAULT ""
|
||||
#endif
|
||||
#define LG_PROF_SAMPLE_DEFAULT 19
|
||||
#define LG_PROF_INTERVAL_DEFAULT -1
|
||||
#define LG_PROF_SAMPLE_DEFAULT 19
|
||||
#define LG_PROF_INTERVAL_DEFAULT -1
|
||||
|
||||
/*
|
||||
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
|
||||
* is based on __builtin_return_address() necessarily has a hard-coded number
|
||||
* of backtrace frame handlers, and should be kept in sync with this setting.
|
||||
*/
|
||||
#define PROF_BT_MAX 128
|
||||
#define PROF_BT_MAX 128
|
||||
|
||||
/* Initial hash table size. */
|
||||
#define PROF_CKH_MINITEMS 64
|
||||
#define PROF_CKH_MINITEMS 64
|
||||
|
||||
/* Size of memory buffer to use when writing dump files. */
|
||||
#define PROF_DUMP_BUFSIZE 65536
|
||||
#define PROF_DUMP_BUFSIZE 65536
|
||||
|
||||
/* Size of stack-allocated buffer used by prof_printf(). */
|
||||
#define PROF_PRINTF_BUFSIZE 128
|
||||
#define PROF_PRINTF_BUFSIZE 128
|
||||
|
||||
/*
|
||||
* Number of mutexes shared among all gctx's. No space is allocated for these
|
||||
* unless profiling is enabled, so it's okay to over-provision.
|
||||
*/
|
||||
#define PROF_NCTX_LOCKS 1024
|
||||
#define PROF_NCTX_LOCKS 1024
|
||||
|
||||
/*
|
||||
* Number of mutexes shared among all tdata's. No space is allocated for these
|
||||
* unless profiling is enabled, so it's okay to over-provision.
|
||||
*/
|
||||
#define PROF_NTDATA_LOCKS 256
|
||||
#define PROF_NTDATA_LOCKS 256
|
||||
|
||||
/*
|
||||
* prof_tdata pointers close to NULL are used to encode state information that
|
||||
* is used for cleaning up during thread shutdown.
|
||||
*/
|
||||
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
|
||||
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
|
||||
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
|
||||
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
|
||||
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
|
||||
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
|
||||
|
@@ -2,5 +2,5 @@
|
||||
|
||||
for nm in `cat $1` ; do
|
||||
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
|
||||
echo "#define je_${n} JEMALLOC_N(${n})"
|
||||
echo "#define je_${n} JEMALLOC_N(${n})"
|
||||
done
|
||||
|
@@ -2,61 +2,61 @@
|
||||
#define JEMALLOC_INTERNAL_QL_H
|
||||
|
||||
/* List definitions. */
|
||||
#define ql_head(a_type) \
|
||||
#define ql_head(a_type) \
|
||||
struct { \
|
||||
a_type *qlh_first; \
|
||||
}
|
||||
|
||||
#define ql_head_initializer(a_head) {NULL}
|
||||
#define ql_head_initializer(a_head) {NULL}
|
||||
|
||||
#define ql_elm(a_type) qr(a_type)
|
||||
#define ql_elm(a_type) qr(a_type)
|
||||
|
||||
/* List functions. */
|
||||
#define ql_new(a_head) do { \
|
||||
#define ql_new(a_head) do { \
|
||||
(a_head)->qlh_first = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
|
||||
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
|
||||
|
||||
#define ql_first(a_head) ((a_head)->qlh_first)
|
||||
#define ql_first(a_head) ((a_head)->qlh_first)
|
||||
|
||||
#define ql_last(a_head, a_field) \
|
||||
#define ql_last(a_head, a_field) \
|
||||
((ql_first(a_head) != NULL) \
|
||||
? qr_prev(ql_first(a_head), a_field) : NULL)
|
||||
|
||||
#define ql_next(a_head, a_elm, a_field) \
|
||||
#define ql_next(a_head, a_elm, a_field) \
|
||||
((ql_last(a_head, a_field) != (a_elm)) \
|
||||
? qr_next((a_elm), a_field) : NULL)
|
||||
|
||||
#define ql_prev(a_head, a_elm, a_field) \
|
||||
#define ql_prev(a_head, a_elm, a_field) \
|
||||
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
|
||||
: NULL)
|
||||
|
||||
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
|
||||
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
|
||||
qr_before_insert((a_qlelm), (a_elm), a_field); \
|
||||
if (ql_first(a_head) == (a_qlelm)) { \
|
||||
ql_first(a_head) = (a_elm); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ql_after_insert(a_qlelm, a_elm, a_field) \
|
||||
#define ql_after_insert(a_qlelm, a_elm, a_field) \
|
||||
qr_after_insert((a_qlelm), (a_elm), a_field)
|
||||
|
||||
#define ql_head_insert(a_head, a_elm, a_field) do { \
|
||||
#define ql_head_insert(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) != NULL) { \
|
||||
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
|
||||
} \
|
||||
ql_first(a_head) = (a_elm); \
|
||||
} while (0)
|
||||
|
||||
#define ql_tail_insert(a_head, a_elm, a_field) do { \
|
||||
#define ql_tail_insert(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) != NULL) { \
|
||||
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
|
||||
} \
|
||||
ql_first(a_head) = qr_next((a_elm), a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_remove(a_head, a_elm, a_field) do { \
|
||||
#define ql_remove(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) == (a_elm)) { \
|
||||
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
|
||||
} \
|
||||
@@ -67,20 +67,20 @@ struct { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ql_head_remove(a_head, a_type, a_field) do { \
|
||||
#define ql_head_remove(a_head, a_type, a_field) do { \
|
||||
a_type *t = ql_first(a_head); \
|
||||
ql_remove((a_head), t, a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_tail_remove(a_head, a_type, a_field) do { \
|
||||
#define ql_tail_remove(a_head, a_type, a_field) do { \
|
||||
a_type *t = ql_last(a_head, a_field); \
|
||||
ql_remove((a_head), t, a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_foreach(a_var, a_head, a_field) \
|
||||
#define ql_foreach(a_var, a_head, a_field) \
|
||||
qr_foreach((a_var), ql_first(a_head), a_field)
|
||||
|
||||
#define ql_reverse_foreach(a_var, a_head, a_field) \
|
||||
#define ql_reverse_foreach(a_var, a_head, a_field) \
|
||||
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_QL_H */
|
||||
|
@@ -2,37 +2,37 @@
|
||||
#define JEMALLOC_INTERNAL_QR_H
|
||||
|
||||
/* Ring definitions. */
|
||||
#define qr(a_type) \
|
||||
#define qr(a_type) \
|
||||
struct { \
|
||||
a_type *qre_next; \
|
||||
a_type *qre_prev; \
|
||||
}
|
||||
|
||||
/* Ring functions. */
|
||||
#define qr_new(a_qr, a_field) do { \
|
||||
#define qr_new(a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_next = (a_qr); \
|
||||
(a_qr)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
|
||||
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
|
||||
|
||||
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
|
||||
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
|
||||
|
||||
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
|
||||
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
|
||||
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_next = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
|
||||
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
|
||||
a_type *t; \
|
||||
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
|
||||
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
|
||||
@@ -45,10 +45,10 @@ struct { \
|
||||
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
||||
* have two copies of the code.
|
||||
*/
|
||||
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
|
||||
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
|
||||
qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
|
||||
|
||||
#define qr_remove(a_qr, a_field) do { \
|
||||
#define qr_remove(a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_prev->a_field.qre_next \
|
||||
= (a_qr)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev \
|
||||
@@ -57,13 +57,13 @@ struct { \
|
||||
(a_qr)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_foreach(var, a_qr, a_field) \
|
||||
#define qr_foreach(var, a_qr, a_field) \
|
||||
for ((var) = (a_qr); \
|
||||
(var) != NULL; \
|
||||
(var) = (((var)->a_field.qre_next != (a_qr)) \
|
||||
? (var)->a_field.qre_next : NULL))
|
||||
|
||||
#define qr_reverse_foreach(var, a_qr, a_field) \
|
||||
#define qr_reverse_foreach(var, a_qr, a_field) \
|
||||
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
|
||||
(var) != NULL; \
|
||||
(var) = (((var) != (a_qr)) \
|
||||
|
@@ -20,17 +20,17 @@
|
||||
*/
|
||||
|
||||
#ifndef RB_H_
|
||||
#define RB_H_
|
||||
#define RB_H_
|
||||
|
||||
#ifdef RB_COMPACT
|
||||
/* Node structure. */
|
||||
#define rb_node(a_type) \
|
||||
#define rb_node(a_type) \
|
||||
struct { \
|
||||
a_type *rbn_left; \
|
||||
a_type *rbn_right_red; \
|
||||
}
|
||||
#else
|
||||
#define rb_node(a_type) \
|
||||
#define rb_node(a_type) \
|
||||
struct { \
|
||||
a_type *rbn_left; \
|
||||
a_type *rbn_right; \
|
||||
@@ -39,48 +39,48 @@ struct { \
|
||||
#endif
|
||||
|
||||
/* Root structure. */
|
||||
#define rb_tree(a_type) \
|
||||
#define rb_tree(a_type) \
|
||||
struct { \
|
||||
a_type *rbt_root; \
|
||||
}
|
||||
|
||||
/* Left accessors. */
|
||||
#define rbtn_left_get(a_type, a_field, a_node) \
|
||||
#define rbtn_left_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_left)
|
||||
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
|
||||
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
|
||||
(a_node)->a_field.rbn_left = a_left; \
|
||||
} while (0)
|
||||
|
||||
#ifdef RB_COMPACT
|
||||
/* Right accessors. */
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
|
||||
& ((ssize_t)-2)))
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
|
||||
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
|
||||
} while (0)
|
||||
|
||||
/* Color accessors. */
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
|
||||
& ((size_t)1)))
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
|
||||
| ((ssize_t)a_red)); \
|
||||
} while (0)
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
|
||||
} while (0)
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
|
||||
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
|
||||
} while (0)
|
||||
|
||||
/* Node initializer. */
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
/* Bookkeeping bit cannot be used by node pointer. */ \
|
||||
assert(((uintptr_t)(a_node) & 0x1) == 0); \
|
||||
rbtn_left_set(a_type, a_field, (a_node), NULL); \
|
||||
@@ -89,27 +89,27 @@ struct { \
|
||||
} while (0)
|
||||
#else
|
||||
/* Right accessors. */
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
#define rbtn_right_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_right)
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
|
||||
(a_node)->a_field.rbn_right = a_right; \
|
||||
} while (0)
|
||||
|
||||
/* Color accessors. */
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
#define rbtn_red_get(a_type, a_field, a_node) \
|
||||
((a_node)->a_field.rbn_red)
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
|
||||
(a_node)->a_field.rbn_red = (a_red); \
|
||||
} while (0)
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_red_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_red = true; \
|
||||
} while (0)
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
#define rbtn_black_set(a_type, a_field, a_node) do { \
|
||||
(a_node)->a_field.rbn_red = false; \
|
||||
} while (0)
|
||||
|
||||
/* Node initializer. */
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
|
||||
rbtn_left_set(a_type, a_field, (a_node), NULL); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), NULL); \
|
||||
rbtn_red_set(a_type, a_field, (a_node)); \
|
||||
@@ -117,12 +117,12 @@ struct { \
|
||||
#endif
|
||||
|
||||
/* Tree initializer. */
|
||||
#define rb_new(a_type, a_field, a_rbt) do { \
|
||||
#define rb_new(a_type, a_field, a_rbt) do { \
|
||||
(a_rbt)->rbt_root = NULL; \
|
||||
} while (0)
|
||||
|
||||
/* Internal utility macros. */
|
||||
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
(r_node) = (a_root); \
|
||||
if ((r_node) != NULL) { \
|
||||
for (; \
|
||||
@@ -132,7 +132,7 @@ struct { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
|
||||
(r_node) = (a_root); \
|
||||
if ((r_node) != NULL) { \
|
||||
for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
|
||||
@@ -141,14 +141,14 @@ struct { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
|
||||
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
|
||||
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
|
||||
rbtn_right_set(a_type, a_field, (a_node), \
|
||||
rbtn_left_get(a_type, a_field, (r_node))); \
|
||||
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
|
||||
} while (0)
|
||||
|
||||
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
|
||||
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
|
||||
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
|
||||
rbtn_left_set(a_type, a_field, (a_node), \
|
||||
rbtn_right_get(a_type, a_field, (r_node))); \
|
||||
@@ -160,7 +160,7 @@ struct { \
|
||||
* functions generated by an equivalently parameterized call to rb_gen().
|
||||
*/
|
||||
|
||||
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
|
||||
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_rbt_type *rbtree); \
|
||||
a_attr bool \
|
||||
@@ -335,7 +335,7 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
|
||||
* has begun.
|
||||
* arg : Opaque pointer passed to cb().
|
||||
*/
|
||||
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
|
||||
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_rbt_type *rbtree) { \
|
||||
rb_new(a_type, a_field, rbtree); \
|
||||
|
@@ -225,9 +225,9 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
start_level, dependent);
|
||||
}
|
||||
|
||||
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
|
||||
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
|
||||
switch (start_level + RTREE_GET_BIAS) {
|
||||
#define RTREE_GET_SUBTREE(level) \
|
||||
#define RTREE_GET_SUBTREE(level) \
|
||||
case level: \
|
||||
assert(level < (RTREE_HEIGHT_MAX-1)); \
|
||||
if (!dependent && unlikely(!rtree_node_valid(node))) { \
|
||||
@@ -246,7 +246,7 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
node; \
|
||||
} \
|
||||
/* Fall through. */
|
||||
#define RTREE_GET_LEAF(level) \
|
||||
#define RTREE_GET_LEAF(level) \
|
||||
case level: \
|
||||
assert(level == (RTREE_HEIGHT_MAX-1)); \
|
||||
if (!dependent && unlikely(!rtree_node_valid(node))) { \
|
||||
|
@@ -19,13 +19,13 @@ typedef struct rtree_s rtree_t;
|
||||
* RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
|
||||
* machine address width.
|
||||
*/
|
||||
#define LG_RTREE_BITS_PER_LEVEL 4
|
||||
#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
|
||||
#define LG_RTREE_BITS_PER_LEVEL 4
|
||||
#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
|
||||
/* Maximum rtree height. */
|
||||
#define RTREE_HEIGHT_MAX \
|
||||
#define RTREE_HEIGHT_MAX \
|
||||
((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
|
||||
|
||||
#define RTREE_CTX_INITIALIZER { \
|
||||
#define RTREE_CTX_INITIALIZER { \
|
||||
false, \
|
||||
0, \
|
||||
0, \
|
||||
@@ -38,15 +38,15 @@ typedef struct rtree_s rtree_t;
|
||||
* have a witness_t directly embedded, but that would dramatically bloat the
|
||||
* tree. This must contain enough entries to e.g. coalesce two extents.
|
||||
*/
|
||||
#define RTREE_ELM_ACQUIRE_MAX 4
|
||||
#define RTREE_ELM_ACQUIRE_MAX 4
|
||||
|
||||
/* Initializers for rtree_elm_witness_tsd_t. */
|
||||
#define RTREE_ELM_WITNESS_INITIALIZER { \
|
||||
#define RTREE_ELM_WITNESS_INITIALIZER { \
|
||||
NULL, \
|
||||
WITNESS_INITIALIZER("rtree_elm", WITNESS_RANK_RTREE_ELM) \
|
||||
}
|
||||
|
||||
#define RTREE_ELM_WITNESS_TSD_INITIALIZER { \
|
||||
#define RTREE_ELM_WITNESS_TSD_INITIALIZER { \
|
||||
{ \
|
||||
RTREE_ELM_WITNESS_INITIALIZER, \
|
||||
RTREE_ELM_WITNESS_INITIALIZER, \
|
||||
|
@@ -150,7 +150,7 @@ size_classes() {
|
||||
pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result}
|
||||
pow2 ${lg_g}; g=${pow2_result}
|
||||
|
||||
echo "#define SIZE_CLASSES \\"
|
||||
echo "#define SIZE_CLASSES \\"
|
||||
echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \\"
|
||||
|
||||
ntbins=0
|
||||
@@ -294,7 +294,7 @@ cat <<EOF
|
||||
* LARGE_MAXCLASS: Maximum (large) size class.
|
||||
*/
|
||||
|
||||
#define LG_SIZE_CLASS_GROUP ${lg_g}
|
||||
#define LG_SIZE_CLASS_GROUP ${lg_g}
|
||||
|
||||
EOF
|
||||
|
||||
@@ -306,17 +306,17 @@ for lg_z in ${lg_zarr} ; do
|
||||
for lg_p in ${lg_parr} ; do
|
||||
echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
|
||||
size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g}
|
||||
echo "#define SIZE_CLASSES_DEFINED"
|
||||
echo "#define NTBINS ${ntbins}"
|
||||
echo "#define NLBINS ${nlbins}"
|
||||
echo "#define NBINS ${nbins}"
|
||||
echo "#define NSIZES ${nsizes}"
|
||||
echo "#define NPSIZES ${npsizes}"
|
||||
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
|
||||
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
|
||||
echo "#define SMALL_MAXCLASS ${small_maxclass}"
|
||||
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
|
||||
echo "#define LARGE_MAXCLASS ${large_maxclass}"
|
||||
echo "#define SIZE_CLASSES_DEFINED"
|
||||
echo "#define NTBINS ${ntbins}"
|
||||
echo "#define NLBINS ${nlbins}"
|
||||
echo "#define NBINS ${nbins}"
|
||||
echo "#define NSIZES ${nsizes}"
|
||||
echo "#define NPSIZES ${npsizes}"
|
||||
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
|
||||
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
|
||||
echo "#define SMALL_MAXCLASS ${small_maxclass}"
|
||||
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
|
||||
echo "#define LARGE_MAXCLASS ${large_maxclass}"
|
||||
echo "#endif"
|
||||
echo
|
||||
done
|
||||
|
@@ -23,10 +23,10 @@
|
||||
* smootheststep(x) = -20x + 70x - 84x + 35x
|
||||
*/
|
||||
|
||||
#define SMOOTHSTEP_VARIANT "smoother"
|
||||
#define SMOOTHSTEP_NSTEPS 200
|
||||
#define SMOOTHSTEP_BFP 24
|
||||
#define SMOOTHSTEP \
|
||||
#define SMOOTHSTEP_VARIANT "smoother"
|
||||
#define SMOOTHSTEP_NSTEPS 200
|
||||
#define SMOOTHSTEP_BFP 24
|
||||
#define SMOOTHSTEP \
|
||||
/* STEP(step, h, x, y) */ \
|
||||
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
|
||||
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
|
||||
|
@@ -79,10 +79,10 @@ cat <<EOF
|
||||
* smootheststep(x) = -20x + 70x - 84x + 35x
|
||||
*/
|
||||
|
||||
#define SMOOTHSTEP_VARIANT "${variant}"
|
||||
#define SMOOTHSTEP_NSTEPS ${nsteps}
|
||||
#define SMOOTHSTEP_BFP ${bfp}
|
||||
#define SMOOTHSTEP \\
|
||||
#define SMOOTHSTEP_VARIANT "${variant}"
|
||||
#define SMOOTHSTEP_NSTEPS ${nsteps}
|
||||
#define SMOOTHSTEP_BFP ${bfp}
|
||||
#define SMOOTHSTEP \\
|
||||
/* STEP(step, h, x, y) */ \\
|
||||
EOF
|
||||
|
||||
|
@@ -11,15 +11,15 @@ typedef struct tcaches_s tcaches_t;
|
||||
* used for two purposes: preventing thread caching on a per thread basis and
|
||||
* cleaning up during thread shutdown.
|
||||
*/
|
||||
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
|
||||
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
|
||||
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
|
||||
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
|
||||
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
|
||||
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
|
||||
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
|
||||
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
|
||||
|
||||
/*
|
||||
* Absolute minimum number of cache slots for each small bin.
|
||||
*/
|
||||
#define TCACHE_NSLOTS_SMALL_MIN 20
|
||||
#define TCACHE_NSLOTS_SMALL_MIN 20
|
||||
|
||||
/*
|
||||
* Absolute maximum number of cache slots for each small bin in the thread
|
||||
@@ -28,23 +28,23 @@ typedef struct tcaches_s tcaches_t;
|
||||
*
|
||||
* This constant must be an even number.
|
||||
*/
|
||||
#define TCACHE_NSLOTS_SMALL_MAX 200
|
||||
#define TCACHE_NSLOTS_SMALL_MAX 200
|
||||
|
||||
/* Number of cache slots for large size classes. */
|
||||
#define TCACHE_NSLOTS_LARGE 20
|
||||
#define TCACHE_NSLOTS_LARGE 20
|
||||
|
||||
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
|
||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||
|
||||
/*
|
||||
* TCACHE_GC_SWEEP is the approximate number of allocation events between
|
||||
* full GC sweeps. Integer rounding may cause the actual number to be
|
||||
* slightly higher, since GC is performed incrementally.
|
||||
*/
|
||||
#define TCACHE_GC_SWEEP 8192
|
||||
#define TCACHE_GC_SWEEP 8192
|
||||
|
||||
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
||||
#define TCACHE_GC_INCR \
|
||||
#define TCACHE_GC_INCR \
|
||||
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
|
||||
|
@@ -8,7 +8,7 @@ tsd_t *tsd_fetch_impl(bool init);
|
||||
tsd_t *tsd_fetch(void);
|
||||
tsdn_t *tsd_tsdn(tsd_t *tsd);
|
||||
bool tsd_nominal(tsd_t *tsd);
|
||||
#define O(n, t, c) \
|
||||
#define O(n, t, c) \
|
||||
t *tsd_##n##p_get(tsd_t *tsd); \
|
||||
t tsd_##n##_get(tsd_t *tsd); \
|
||||
void tsd_##n##_set(tsd_t *tsd, t n);
|
||||
@@ -64,7 +64,7 @@ tsd_nominal(tsd_t *tsd) {
|
||||
return (tsd->state == tsd_state_nominal);
|
||||
}
|
||||
|
||||
#define O(n, t, c) \
|
||||
#define O(n, t, c) \
|
||||
JEMALLOC_ALWAYS_INLINE t * \
|
||||
tsd_##n##p_get(tsd_t *tsd) { \
|
||||
return &tsd->n; \
|
||||
|
@@ -14,7 +14,7 @@ struct tsd_init_head_s {
|
||||
};
|
||||
#endif
|
||||
|
||||
#define MALLOC_TSD \
|
||||
#define MALLOC_TSD \
|
||||
/* O(name, type, cleanup) */ \
|
||||
O(tcache, tcache_t *, yes) \
|
||||
O(thread_allocated, uint64_t, no) \
|
||||
@@ -31,7 +31,7 @@ struct tsd_init_head_s {
|
||||
O(rtree_elm_witnesses, rtree_elm_witness_tsd_t,no) \
|
||||
O(witness_fork, bool, no) \
|
||||
|
||||
#define TSD_INITIALIZER { \
|
||||
#define TSD_INITIALIZER { \
|
||||
tsd_state_uninitialized, \
|
||||
NULL, \
|
||||
0, \
|
||||
@@ -51,7 +51,7 @@ struct tsd_init_head_s {
|
||||
|
||||
struct tsd_s {
|
||||
tsd_state_t state;
|
||||
#define O(n, t, c) \
|
||||
#define O(n, t, c) \
|
||||
t n;
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
@@ -2,7 +2,7 @@
|
||||
#define JEMALLOC_INTERNAL_TSD_TYPES_H
|
||||
|
||||
/* Maximum number of malloc_tsd users with cleanup functions. */
|
||||
#define MALLOC_TSD_CLEANUPS_MAX 2
|
||||
#define MALLOC_TSD_CLEANUPS_MAX 2
|
||||
|
||||
typedef bool (*malloc_tsd_cleanup_t)(void);
|
||||
|
||||
@@ -15,7 +15,7 @@ typedef struct tsd_init_head_s tsd_init_head_t;
|
||||
typedef struct tsd_s tsd_t;
|
||||
typedef struct tsdn_s tsdn_t;
|
||||
|
||||
#define TSDN_NULL ((tsdn_t *)0)
|
||||
#define TSDN_NULL ((tsdn_t *)0)
|
||||
|
||||
typedef enum {
|
||||
tsd_state_uninitialized,
|
||||
@@ -77,17 +77,17 @@ typedef enum {
|
||||
|
||||
/* malloc_tsd_types(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_types(a_name, a_type)
|
||||
#define malloc_tsd_types(a_name, a_type)
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_types(a_name, a_type)
|
||||
#define malloc_tsd_types(a_name, a_type)
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_types(a_name, a_type) \
|
||||
#define malloc_tsd_types(a_name, a_type) \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##tsd_wrapper_t;
|
||||
#else
|
||||
#define malloc_tsd_types(a_name, a_type) \
|
||||
#define malloc_tsd_types(a_name, a_type) \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
@@ -95,7 +95,7 @@ typedef struct { \
|
||||
#endif
|
||||
|
||||
/* malloc_tsd_protos(). */
|
||||
#define malloc_tsd_protos(a_attr, a_name, a_type) \
|
||||
#define malloc_tsd_protos(a_attr, a_name, a_type) \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot0(void); \
|
||||
a_attr void \
|
||||
@@ -111,22 +111,22 @@ a_name##tsd_set(a_type *val);
|
||||
|
||||
/* malloc_tsd_externs(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern __thread a_type a_name##tsd_tls; \
|
||||
extern __thread bool a_name##tsd_initialized; \
|
||||
extern bool a_name##tsd_booted;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern __thread a_type a_name##tsd_tls; \
|
||||
extern pthread_key_t a_name##tsd_tsd; \
|
||||
extern bool a_name##tsd_booted;
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern DWORD a_name##tsd_tsd; \
|
||||
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
|
||||
extern bool a_name##tsd_booted;
|
||||
#else
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern pthread_key_t a_name##tsd_tsd; \
|
||||
extern tsd_init_head_t a_name##tsd_init_head; \
|
||||
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
|
||||
@@ -135,20 +135,20 @@ extern bool a_name##tsd_booted;
|
||||
|
||||
/* malloc_tsd_data(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||
a_name##tsd_tls = a_initializer; \
|
||||
a_attr __thread bool JEMALLOC_TLS_MODEL \
|
||||
a_name##tsd_initialized = false; \
|
||||
a_attr bool a_name##tsd_booted = false;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||
a_name##tsd_tls = a_initializer; \
|
||||
a_attr pthread_key_t a_name##tsd_tsd; \
|
||||
a_attr bool a_name##tsd_booted = false;
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr DWORD a_name##tsd_tsd; \
|
||||
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
|
||||
false, \
|
||||
@@ -156,7 +156,7 @@ a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
|
||||
}; \
|
||||
a_attr bool a_name##tsd_booted = false;
|
||||
#else
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr pthread_key_t a_name##tsd_tsd; \
|
||||
a_attr tsd_init_head_t a_name##tsd_init_head = { \
|
||||
ql_head_initializer(blocks), \
|
||||
@@ -171,7 +171,7 @@ a_attr bool a_name##tsd_booted = false;
|
||||
|
||||
/* malloc_tsd_funcs(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
@@ -224,7 +224,7 @@ a_name##tsd_set(a_type *val) { \
|
||||
} \
|
||||
}
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
@@ -278,7 +278,7 @@ a_name##tsd_set(a_type *val) { \
|
||||
} \
|
||||
}
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
@@ -403,7 +403,7 @@ a_name##tsd_set(a_type *val) { \
|
||||
} \
|
||||
}
|
||||
#else
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr void \
|
||||
|
@@ -32,13 +32,13 @@
|
||||
#endif
|
||||
|
||||
/* Size of stack-allocated buffer passed to buferror(). */
|
||||
#define BUFERROR_BUF 64
|
||||
#define BUFERROR_BUF 64
|
||||
|
||||
/*
|
||||
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
|
||||
* large enough for all possible uses within jemalloc.
|
||||
*/
|
||||
#define MALLOC_PRINTF_BUFSIZE 4096
|
||||
#define MALLOC_PRINTF_BUFSIZE 4096
|
||||
|
||||
/* Junk fill patterns. */
|
||||
#ifndef JEMALLOC_ALLOC_JUNK
|
||||
@@ -52,11 +52,11 @@
|
||||
* Wrap a cpp argument that contains commas such that it isn't broken up into
|
||||
* multiple arguments.
|
||||
*/
|
||||
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
|
||||
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
|
||||
|
||||
/* cpp macro definition stringification. */
|
||||
#define STRINGIFY_HELPER(x) #x
|
||||
#define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
#define STRINGIFY_HELPER(x) #x
|
||||
#define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
|
||||
/*
|
||||
* Silence compiler warnings due to uninitialized values. This is used
|
||||
@@ -86,7 +86,7 @@
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||
#define cassert(c) do { \
|
||||
#define cassert(c) do { \
|
||||
if (unlikely(!(c))) { \
|
||||
not_reached(); \
|
||||
} \
|
||||
|
@@ -11,36 +11,36 @@ typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
|
||||
* Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
|
||||
* the witness machinery.
|
||||
*/
|
||||
#define WITNESS_RANK_OMIT 0U
|
||||
#define WITNESS_RANK_OMIT 0U
|
||||
|
||||
#define WITNESS_RANK_INIT 1U
|
||||
#define WITNESS_RANK_CTL 1U
|
||||
#define WITNESS_RANK_ARENAS 2U
|
||||
#define WITNESS_RANK_INIT 1U
|
||||
#define WITNESS_RANK_CTL 1U
|
||||
#define WITNESS_RANK_ARENAS 2U
|
||||
|
||||
#define WITNESS_RANK_PROF_DUMP 3U
|
||||
#define WITNESS_RANK_PROF_BT2GCTX 4U
|
||||
#define WITNESS_RANK_PROF_TDATAS 5U
|
||||
#define WITNESS_RANK_PROF_TDATA 6U
|
||||
#define WITNESS_RANK_PROF_GCTX 7U
|
||||
#define WITNESS_RANK_PROF_DUMP 3U
|
||||
#define WITNESS_RANK_PROF_BT2GCTX 4U
|
||||
#define WITNESS_RANK_PROF_TDATAS 5U
|
||||
#define WITNESS_RANK_PROF_TDATA 6U
|
||||
#define WITNESS_RANK_PROF_GCTX 7U
|
||||
|
||||
#define WITNESS_RANK_ARENA 8U
|
||||
#define WITNESS_RANK_ARENA_EXTENTS 9U
|
||||
#define WITNESS_RANK_ARENA_EXTENT_CACHE 10
|
||||
#define WITNESS_RANK_ARENA 8U
|
||||
#define WITNESS_RANK_ARENA_EXTENTS 9U
|
||||
#define WITNESS_RANK_ARENA_EXTENT_CACHE 10
|
||||
|
||||
#define WITNESS_RANK_RTREE_ELM 11U
|
||||
#define WITNESS_RANK_RTREE 12U
|
||||
#define WITNESS_RANK_BASE 13U
|
||||
#define WITNESS_RANK_RTREE_ELM 11U
|
||||
#define WITNESS_RANK_RTREE 12U
|
||||
#define WITNESS_RANK_BASE 13U
|
||||
|
||||
#define WITNESS_RANK_LEAF 0xffffffffU
|
||||
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_ARENA_LARGE WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_LEAF 0xffffffffU
|
||||
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_ARENA_LARGE WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
|
||||
|
||||
#define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
|
||||
#define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_WITNESS_TYPES_H */
|
||||
|
Reference in New Issue
Block a user