2012-03-22 09:33:03 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
|
|
|
|
/* Maximum number of malloc_tsd users with cleanup functions. */
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
#define MALLOC_TSD_CLEANUPS_MAX 2
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2012-04-19 00:29:49 +08:00
|
|
|
typedef bool (*malloc_tsd_cleanup_t)(void);
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2013-10-22 05:12:16 +08:00
|
|
|
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
|
|
|
!defined(_WIN32))
|
|
|
|
typedef struct tsd_init_block_s tsd_init_block_t;
|
|
|
|
typedef struct tsd_init_head_s tsd_init_head_t;
|
|
|
|
#endif
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
typedef struct tsd_s tsd_t;
|
2016-05-11 13:21:10 +08:00
|
|
|
typedef struct tsdn_s tsdn_t;
|
|
|
|
|
|
|
|
#define TSDN_NULL ((tsdn_t *)0)
|
2014-09-23 12:09:23 +08:00
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
tsd_state_uninitialized,
|
|
|
|
tsd_state_nominal,
|
|
|
|
tsd_state_purgatory,
|
|
|
|
tsd_state_reincarnated
|
|
|
|
} tsd_state_t;
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
/*
|
|
|
|
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* are five macros that support (at least) three use cases: file-private,
|
2012-03-22 09:33:03 +08:00
|
|
|
* library-private, and library-private inlined. Following is an example
|
|
|
|
* library-private tsd variable:
|
|
|
|
*
|
|
|
|
* In example.h:
|
|
|
|
* typedef struct {
|
|
|
|
* int x;
|
|
|
|
* int y;
|
|
|
|
* } example_t;
|
|
|
|
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* malloc_tsd_types(example_, example_t)
|
|
|
|
* malloc_tsd_protos(, example_, example_t)
|
|
|
|
* malloc_tsd_externs(example_, example_t)
|
2012-03-22 09:33:03 +08:00
|
|
|
* In example.c:
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
|
|
|
|
* malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
|
2012-03-22 09:33:03 +08:00
|
|
|
* example_tsd_cleanup)
|
|
|
|
*
|
|
|
|
* The result is a set of generated functions, e.g.:
|
|
|
|
*
|
|
|
|
* bool example_tsd_boot(void) {...}
|
2016-05-11 13:21:10 +08:00
|
|
|
* bool example_tsd_booted_get(void) {...}
|
2016-10-21 14:59:12 +08:00
|
|
|
* example_t *example_tsd_get(bool init) {...}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* void example_tsd_set(example_t *val) {...}
|
2012-03-22 09:33:03 +08:00
|
|
|
*
|
|
|
|
* Note that all of the functions deal in terms of (a_type *) rather than
|
2015-01-21 07:37:51 +08:00
|
|
|
* (a_type) so that it is possible to support non-pointer types (unlike
|
2012-03-22 09:33:03 +08:00
|
|
|
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
|
2014-10-05 02:12:53 +08:00
|
|
|
* cast to (void *). This means that the cleanup function needs to cast the
|
|
|
|
* function argument to (a_type *), then dereference the resulting pointer to
|
|
|
|
* access fields, e.g.
|
2012-03-22 09:33:03 +08:00
|
|
|
*
|
2014-10-05 02:12:53 +08:00
|
|
|
* void
|
2012-03-22 09:33:03 +08:00
|
|
|
* example_tsd_cleanup(void *arg)
|
|
|
|
* {
|
2014-10-05 02:12:53 +08:00
|
|
|
* example_t *example = (example_t *)arg;
|
2012-03-22 09:33:03 +08:00
|
|
|
*
|
2014-10-05 02:12:53 +08:00
|
|
|
* example->x = 42;
|
2012-03-22 09:33:03 +08:00
|
|
|
* [...]
|
2014-10-05 02:12:53 +08:00
|
|
|
* if ([want the cleanup function to be called again])
|
|
|
|
* example_tsd_set(example);
|
2012-03-22 09:33:03 +08:00
|
|
|
* }
|
|
|
|
*
|
|
|
|
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
|
|
|
|
* called again. This is similar to how pthreads TSD destruction works, except
|
|
|
|
* that pthreads only calls the cleanup function again if the value was set to
|
|
|
|
* non-NULL.
|
|
|
|
*/
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/* malloc_tsd_types(). */
|
|
|
|
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
|
|
|
#define malloc_tsd_types(a_name, a_type)
|
|
|
|
#elif (defined(JEMALLOC_TLS))
|
|
|
|
#define malloc_tsd_types(a_name, a_type)
|
|
|
|
#elif (defined(_WIN32))
|
|
|
|
#define malloc_tsd_types(a_name, a_type) \
|
|
|
|
typedef struct { \
|
|
|
|
bool initialized; \
|
|
|
|
a_type val; \
|
|
|
|
} a_name##tsd_wrapper_t;
|
|
|
|
#else
|
|
|
|
#define malloc_tsd_types(a_name, a_type) \
|
|
|
|
typedef struct { \
|
|
|
|
bool initialized; \
|
|
|
|
a_type val; \
|
|
|
|
} a_name##tsd_wrapper_t;
|
|
|
|
#endif
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
/* malloc_tsd_protos(). */
|
|
|
|
#define malloc_tsd_protos(a_attr, a_name, a_type) \
|
|
|
|
a_attr bool \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_name##tsd_boot0(void); \
|
|
|
|
a_attr void \
|
|
|
|
a_name##tsd_boot1(void); \
|
|
|
|
a_attr bool \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_boot(void); \
|
2016-05-11 13:21:10 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_booted_get(void); \
|
2012-03-22 09:33:03 +08:00
|
|
|
a_attr a_type * \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_name##tsd_get(bool init); \
|
2012-03-22 09:33:03 +08:00
|
|
|
a_attr void \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_set(a_type *val);
|
2012-03-22 09:33:03 +08:00
|
|
|
|
|
|
|
/* malloc_tsd_externs(). */
|
|
|
|
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
|
|
|
#define malloc_tsd_externs(a_name, a_type) \
|
2014-09-23 12:09:23 +08:00
|
|
|
extern __thread a_type a_name##tsd_tls; \
|
|
|
|
extern __thread bool a_name##tsd_initialized; \
|
|
|
|
extern bool a_name##tsd_booted;
|
2012-03-22 09:33:03 +08:00
|
|
|
#elif (defined(JEMALLOC_TLS))
|
|
|
|
#define malloc_tsd_externs(a_name, a_type) \
|
2014-09-23 12:09:23 +08:00
|
|
|
extern __thread a_type a_name##tsd_tls; \
|
|
|
|
extern pthread_key_t a_name##tsd_tsd; \
|
|
|
|
extern bool a_name##tsd_booted;
|
2012-04-22 12:27:46 +08:00
|
|
|
#elif (defined(_WIN32))
|
2013-12-09 14:28:27 +08:00
|
|
|
#define malloc_tsd_externs(a_name, a_type) \
|
2014-09-23 12:09:23 +08:00
|
|
|
extern DWORD a_name##tsd_tsd; \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
|
2014-09-23 12:09:23 +08:00
|
|
|
extern bool a_name##tsd_booted;
|
2012-03-22 09:33:03 +08:00
|
|
|
#else
|
|
|
|
#define malloc_tsd_externs(a_name, a_type) \
|
2014-09-23 12:09:23 +08:00
|
|
|
extern pthread_key_t a_name##tsd_tsd; \
|
|
|
|
extern tsd_init_head_t a_name##tsd_init_head; \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
|
2014-09-23 12:09:23 +08:00
|
|
|
extern bool a_name##tsd_booted;
|
2012-03-22 09:33:03 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* malloc_tsd_data(). */
|
|
|
|
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
|
|
|
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
2012-04-04 13:30:05 +08:00
|
|
|
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_tls = a_initializer; \
|
2012-04-04 13:30:05 +08:00
|
|
|
a_attr __thread bool JEMALLOC_TLS_MODEL \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_initialized = false; \
|
|
|
|
a_attr bool a_name##tsd_booted = false;
|
2012-03-22 09:33:03 +08:00
|
|
|
#elif (defined(JEMALLOC_TLS))
|
|
|
|
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
2012-04-04 13:30:05 +08:00
|
|
|
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_tls = a_initializer; \
|
|
|
|
a_attr pthread_key_t a_name##tsd_tsd; \
|
|
|
|
a_attr bool a_name##tsd_booted = false;
|
2012-04-22 12:27:46 +08:00
|
|
|
#elif (defined(_WIN32))
|
|
|
|
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_attr DWORD a_name##tsd_tsd; \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
|
|
|
|
false, \
|
|
|
|
a_initializer \
|
|
|
|
}; \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_attr bool a_name##tsd_booted = false;
|
2012-03-22 09:33:03 +08:00
|
|
|
#else
|
|
|
|
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_attr pthread_key_t a_name##tsd_tsd; \
|
|
|
|
a_attr tsd_init_head_t a_name##tsd_init_head = { \
|
2013-10-22 05:12:16 +08:00
|
|
|
ql_head_initializer(blocks), \
|
|
|
|
MALLOC_MUTEX_INITIALIZER \
|
|
|
|
}; \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
|
|
|
|
false, \
|
|
|
|
a_initializer \
|
|
|
|
}; \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_attr bool a_name##tsd_booted = false;
|
2012-03-22 09:33:03 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* malloc_tsd_funcs(). */
|
|
|
|
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
|
|
|
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
|
|
|
a_cleanup) \
|
|
|
|
/* Initialization/cleanup. */ \
|
2012-03-31 03:36:52 +08:00
|
|
|
a_attr bool \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_cleanup_wrapper(void) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
|
|
|
\
|
2014-09-23 12:09:23 +08:00
|
|
|
if (a_name##tsd_initialized) { \
|
|
|
|
a_name##tsd_initialized = false; \
|
|
|
|
a_cleanup(&a_name##tsd_tls); \
|
2012-03-22 09:33:03 +08:00
|
|
|
} \
|
2014-09-23 12:09:23 +08:00
|
|
|
return (a_name##tsd_initialized); \
|
2012-03-22 09:33:03 +08:00
|
|
|
} \
|
|
|
|
a_attr bool \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_name##tsd_boot0(void) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
|
|
|
\
|
|
|
|
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
|
|
|
malloc_tsd_cleanup_register( \
|
2014-09-23 12:09:23 +08:00
|
|
|
&a_name##tsd_cleanup_wrapper); \
|
2012-03-22 09:33:03 +08:00
|
|
|
} \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_booted = true; \
|
2012-03-22 09:33:03 +08:00
|
|
|
return (false); \
|
|
|
|
} \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_attr void \
|
2015-09-21 12:57:32 +08:00
|
|
|
a_name##tsd_boot1(void) \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{ \
|
|
|
|
\
|
|
|
|
/* Do nothing. */ \
|
|
|
|
} \
|
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_boot(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (a_name##tsd_boot0()); \
|
|
|
|
} \
|
2016-05-11 13:21:10 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_booted_get(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (a_name##tsd_booted); \
|
|
|
|
} \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_get_allocates(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (false); \
|
|
|
|
} \
|
2012-03-22 09:33:03 +08:00
|
|
|
/* Get/set. */ \
|
|
|
|
a_attr a_type * \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_name##tsd_get(bool init) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
|
|
|
\
|
2014-09-23 12:09:23 +08:00
|
|
|
assert(a_name##tsd_booted); \
|
|
|
|
return (&a_name##tsd_tls); \
|
2012-03-22 09:33:03 +08:00
|
|
|
} \
|
|
|
|
a_attr void \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_set(a_type *val) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
|
|
|
\
|
2014-09-23 12:09:23 +08:00
|
|
|
assert(a_name##tsd_booted); \
|
|
|
|
a_name##tsd_tls = (*val); \
|
2012-03-22 09:33:03 +08:00
|
|
|
if (a_cleanup != malloc_tsd_no_cleanup) \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_initialized = true; \
|
2012-03-22 09:33:03 +08:00
|
|
|
}
|
|
|
|
#elif (defined(JEMALLOC_TLS))
|
|
|
|
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
|
|
|
a_cleanup) \
|
|
|
|
/* Initialization/cleanup. */ \
|
|
|
|
a_attr bool \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_name##tsd_boot0(void) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
|
|
|
\
|
|
|
|
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
2014-09-23 12:09:23 +08:00
|
|
|
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
|
|
|
|
0) \
|
2012-03-22 09:33:03 +08:00
|
|
|
return (true); \
|
|
|
|
} \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_booted = true; \
|
2012-03-22 09:33:03 +08:00
|
|
|
return (false); \
|
|
|
|
} \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_attr void \
|
2015-09-21 12:57:32 +08:00
|
|
|
a_name##tsd_boot1(void) \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{ \
|
|
|
|
\
|
|
|
|
/* Do nothing. */ \
|
|
|
|
} \
|
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_boot(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (a_name##tsd_boot0()); \
|
|
|
|
} \
|
2016-05-11 13:21:10 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_booted_get(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (a_name##tsd_booted); \
|
|
|
|
} \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_get_allocates(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (false); \
|
|
|
|
} \
|
2012-03-22 09:33:03 +08:00
|
|
|
/* Get/set. */ \
|
|
|
|
a_attr a_type * \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_name##tsd_get(bool init) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
|
|
|
\
|
2014-09-23 12:09:23 +08:00
|
|
|
assert(a_name##tsd_booted); \
|
|
|
|
return (&a_name##tsd_tls); \
|
2012-03-22 09:33:03 +08:00
|
|
|
} \
|
|
|
|
a_attr void \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_set(a_type *val) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
|
|
|
\
|
2014-09-23 12:09:23 +08:00
|
|
|
assert(a_name##tsd_booted); \
|
|
|
|
a_name##tsd_tls = (*val); \
|
2012-03-22 09:33:03 +08:00
|
|
|
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
2014-09-23 12:09:23 +08:00
|
|
|
if (pthread_setspecific(a_name##tsd_tsd, \
|
|
|
|
(void *)(&a_name##tsd_tls))) { \
|
2012-03-22 09:33:03 +08:00
|
|
|
malloc_write("<jemalloc>: Error" \
|
|
|
|
" setting TSD for "#a_name"\n"); \
|
|
|
|
if (opt_abort) \
|
|
|
|
abort(); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
}
|
2012-04-22 12:27:46 +08:00
|
|
|
#elif (defined(_WIN32))
|
|
|
|
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
|
|
|
a_cleanup) \
|
|
|
|
/* Initialization/cleanup. */ \
|
|
|
|
a_attr bool \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_cleanup_wrapper(void) \
|
2012-04-22 12:27:46 +08:00
|
|
|
{ \
|
2015-03-04 09:54:10 +08:00
|
|
|
DWORD error = GetLastError(); \
|
|
|
|
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
|
|
|
|
TlsGetValue(a_name##tsd_tsd); \
|
|
|
|
SetLastError(error); \
|
2012-04-22 12:27:46 +08:00
|
|
|
\
|
|
|
|
if (wrapper == NULL) \
|
|
|
|
return (false); \
|
|
|
|
if (a_cleanup != malloc_tsd_no_cleanup && \
|
|
|
|
wrapper->initialized) { \
|
|
|
|
wrapper->initialized = false; \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_cleanup(&wrapper->val); \
|
2012-04-22 12:27:46 +08:00
|
|
|
if (wrapper->initialized) { \
|
|
|
|
/* Trigger another cleanup round. */ \
|
|
|
|
return (true); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
malloc_tsd_dalloc(wrapper); \
|
|
|
|
return (false); \
|
|
|
|
} \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_attr void \
|
|
|
|
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
|
2012-04-22 12:27:46 +08:00
|
|
|
{ \
|
|
|
|
\
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
|
|
|
|
malloc_write("<jemalloc>: Error setting" \
|
|
|
|
" TSD for "#a_name"\n"); \
|
|
|
|
abort(); \
|
2012-04-22 12:27:46 +08:00
|
|
|
} \
|
|
|
|
} \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_attr a_name##tsd_wrapper_t * \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_name##tsd_wrapper_get(bool init) \
|
2012-04-22 12:27:46 +08:00
|
|
|
{ \
|
2015-03-04 09:54:10 +08:00
|
|
|
DWORD error = GetLastError(); \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
|
|
|
|
TlsGetValue(a_name##tsd_tsd); \
|
2015-03-04 09:54:10 +08:00
|
|
|
SetLastError(error); \
|
2012-04-22 12:27:46 +08:00
|
|
|
\
|
2016-10-21 14:59:12 +08:00
|
|
|
if (init && unlikely(wrapper == NULL)) { \
|
2014-09-23 12:09:23 +08:00
|
|
|
wrapper = (a_name##tsd_wrapper_t *) \
|
|
|
|
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
2012-04-22 12:27:46 +08:00
|
|
|
if (wrapper == NULL) { \
|
|
|
|
malloc_write("<jemalloc>: Error allocating" \
|
|
|
|
" TSD for "#a_name"\n"); \
|
|
|
|
abort(); \
|
|
|
|
} else { \
|
|
|
|
wrapper->initialized = false; \
|
2014-09-23 12:09:23 +08:00
|
|
|
wrapper->val = a_initializer; \
|
2012-04-22 12:27:46 +08:00
|
|
|
} \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_name##tsd_wrapper_set(wrapper); \
|
2012-04-22 12:27:46 +08:00
|
|
|
} \
|
|
|
|
return (wrapper); \
|
|
|
|
} \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_boot0(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
a_name##tsd_tsd = TlsAlloc(); \
|
|
|
|
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
|
|
|
|
return (true); \
|
|
|
|
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
|
|
|
malloc_tsd_cleanup_register( \
|
|
|
|
&a_name##tsd_cleanup_wrapper); \
|
|
|
|
} \
|
|
|
|
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
|
|
|
|
a_name##tsd_booted = true; \
|
|
|
|
return (false); \
|
|
|
|
} \
|
|
|
|
a_attr void \
|
2015-09-21 12:57:32 +08:00
|
|
|
a_name##tsd_boot1(void) \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{ \
|
|
|
|
a_name##tsd_wrapper_t *wrapper; \
|
|
|
|
wrapper = (a_name##tsd_wrapper_t *) \
|
|
|
|
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
|
|
|
if (wrapper == NULL) { \
|
|
|
|
malloc_write("<jemalloc>: Error allocating" \
|
|
|
|
" TSD for "#a_name"\n"); \
|
|
|
|
abort(); \
|
|
|
|
} \
|
|
|
|
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
|
|
|
|
sizeof(a_name##tsd_wrapper_t)); \
|
|
|
|
a_name##tsd_wrapper_set(wrapper); \
|
|
|
|
} \
|
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_boot(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
if (a_name##tsd_boot0()) \
|
|
|
|
return (true); \
|
|
|
|
a_name##tsd_boot1(); \
|
|
|
|
return (false); \
|
|
|
|
} \
|
2016-05-11 13:21:10 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_booted_get(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (a_name##tsd_booted); \
|
|
|
|
} \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_get_allocates(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (true); \
|
|
|
|
} \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/* Get/set. */ \
|
2012-04-22 12:27:46 +08:00
|
|
|
a_attr a_type * \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_name##tsd_get(bool init) \
|
2012-04-22 12:27:46 +08:00
|
|
|
{ \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_wrapper_t *wrapper; \
|
2012-04-22 12:27:46 +08:00
|
|
|
\
|
2014-09-23 12:09:23 +08:00
|
|
|
assert(a_name##tsd_booted); \
|
2016-10-21 14:59:12 +08:00
|
|
|
wrapper = a_name##tsd_wrapper_get(init); \
|
|
|
|
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
|
|
|
|
return (NULL); \
|
2012-04-22 12:27:46 +08:00
|
|
|
return (&wrapper->val); \
|
|
|
|
} \
|
|
|
|
a_attr void \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_set(a_type *val) \
|
2012-04-22 12:27:46 +08:00
|
|
|
{ \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_wrapper_t *wrapper; \
|
2012-04-22 12:27:46 +08:00
|
|
|
\
|
2014-09-23 12:09:23 +08:00
|
|
|
assert(a_name##tsd_booted); \
|
2016-10-21 14:59:12 +08:00
|
|
|
wrapper = a_name##tsd_wrapper_get(true); \
|
2012-04-22 12:27:46 +08:00
|
|
|
wrapper->val = *(val); \
|
|
|
|
if (a_cleanup != malloc_tsd_no_cleanup) \
|
|
|
|
wrapper->initialized = true; \
|
|
|
|
}
|
2012-03-22 09:33:03 +08:00
|
|
|
#else
|
|
|
|
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
|
|
|
a_cleanup) \
|
|
|
|
/* Initialization/cleanup. */ \
|
|
|
|
a_attr void \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_cleanup_wrapper(void *arg) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
|
2012-03-22 09:33:03 +08:00
|
|
|
\
|
|
|
|
if (a_cleanup != malloc_tsd_no_cleanup && \
|
|
|
|
wrapper->initialized) { \
|
|
|
|
wrapper->initialized = false; \
|
|
|
|
a_cleanup(&wrapper->val); \
|
|
|
|
if (wrapper->initialized) { \
|
|
|
|
/* Trigger another cleanup round. */ \
|
2014-09-23 12:09:23 +08:00
|
|
|
if (pthread_setspecific(a_name##tsd_tsd, \
|
2012-03-22 09:33:03 +08:00
|
|
|
(void *)wrapper)) { \
|
|
|
|
malloc_write("<jemalloc>: Error" \
|
|
|
|
" setting TSD for "#a_name"\n"); \
|
|
|
|
if (opt_abort) \
|
|
|
|
abort(); \
|
|
|
|
} \
|
|
|
|
return; \
|
|
|
|
} \
|
|
|
|
} \
|
2012-04-19 00:29:48 +08:00
|
|
|
malloc_tsd_dalloc(wrapper); \
|
2012-03-22 09:33:03 +08:00
|
|
|
} \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_attr void \
|
|
|
|
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
|
|
|
\
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (pthread_setspecific(a_name##tsd_tsd, \
|
|
|
|
(void *)wrapper)) { \
|
|
|
|
malloc_write("<jemalloc>: Error setting" \
|
|
|
|
" TSD for "#a_name"\n"); \
|
|
|
|
abort(); \
|
|
|
|
} \
|
2012-03-22 09:33:03 +08:00
|
|
|
} \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_attr a_name##tsd_wrapper_t * \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_name##tsd_wrapper_get(bool init) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
|
|
|
|
pthread_getspecific(a_name##tsd_tsd); \
|
2012-03-22 09:33:03 +08:00
|
|
|
\
|
2016-10-21 14:59:12 +08:00
|
|
|
if (init && unlikely(wrapper == NULL)) { \
|
2013-10-22 05:12:16 +08:00
|
|
|
tsd_init_block_t block; \
|
2016-12-14 05:38:11 +08:00
|
|
|
wrapper = (a_name##tsd_wrapper_t *) \
|
|
|
|
tsd_init_check_recursion(&a_name##tsd_init_head, \
|
|
|
|
&block); \
|
2013-10-22 05:12:16 +08:00
|
|
|
if (wrapper) \
|
2013-12-13 06:41:02 +08:00
|
|
|
return (wrapper); \
|
2014-09-23 12:09:23 +08:00
|
|
|
wrapper = (a_name##tsd_wrapper_t *) \
|
|
|
|
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
2016-12-14 05:38:11 +08:00
|
|
|
block.data = (void *)wrapper; \
|
2012-03-22 09:33:03 +08:00
|
|
|
if (wrapper == NULL) { \
|
|
|
|
malloc_write("<jemalloc>: Error allocating" \
|
|
|
|
" TSD for "#a_name"\n"); \
|
2012-04-19 00:29:48 +08:00
|
|
|
abort(); \
|
2012-03-22 09:33:03 +08:00
|
|
|
} else { \
|
2012-04-19 00:29:47 +08:00
|
|
|
wrapper->initialized = false; \
|
2014-09-23 12:09:23 +08:00
|
|
|
wrapper->val = a_initializer; \
|
2012-03-22 09:33:03 +08:00
|
|
|
} \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_name##tsd_wrapper_set(wrapper); \
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_init_finish(&a_name##tsd_init_head, &block); \
|
2012-03-22 09:33:03 +08:00
|
|
|
} \
|
|
|
|
return (wrapper); \
|
|
|
|
} \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_boot0(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
if (pthread_key_create(&a_name##tsd_tsd, \
|
|
|
|
a_name##tsd_cleanup_wrapper) != 0) \
|
|
|
|
return (true); \
|
|
|
|
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
|
|
|
|
a_name##tsd_booted = true; \
|
|
|
|
return (false); \
|
|
|
|
} \
|
|
|
|
a_attr void \
|
2015-09-21 12:57:32 +08:00
|
|
|
a_name##tsd_boot1(void) \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{ \
|
|
|
|
a_name##tsd_wrapper_t *wrapper; \
|
|
|
|
wrapper = (a_name##tsd_wrapper_t *) \
|
|
|
|
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
|
|
|
if (wrapper == NULL) { \
|
|
|
|
malloc_write("<jemalloc>: Error allocating" \
|
|
|
|
" TSD for "#a_name"\n"); \
|
|
|
|
abort(); \
|
|
|
|
} \
|
|
|
|
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
|
|
|
|
sizeof(a_name##tsd_wrapper_t)); \
|
|
|
|
a_name##tsd_wrapper_set(wrapper); \
|
|
|
|
} \
|
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_boot(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
if (a_name##tsd_boot0()) \
|
|
|
|
return (true); \
|
|
|
|
a_name##tsd_boot1(); \
|
|
|
|
return (false); \
|
|
|
|
} \
|
2016-05-11 13:21:10 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_booted_get(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (a_name##tsd_booted); \
|
|
|
|
} \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_attr bool \
|
|
|
|
a_name##tsd_get_allocates(void) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (true); \
|
|
|
|
} \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/* Get/set. */ \
|
2012-03-22 09:33:03 +08:00
|
|
|
a_attr a_type * \
|
2016-10-21 14:59:12 +08:00
|
|
|
a_name##tsd_get(bool init) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_wrapper_t *wrapper; \
|
2012-03-22 09:33:03 +08:00
|
|
|
\
|
2014-09-23 12:09:23 +08:00
|
|
|
assert(a_name##tsd_booted); \
|
2016-10-21 14:59:12 +08:00
|
|
|
wrapper = a_name##tsd_wrapper_get(init); \
|
|
|
|
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
|
|
|
|
return (NULL); \
|
2012-03-22 09:33:03 +08:00
|
|
|
return (&wrapper->val); \
|
|
|
|
} \
|
|
|
|
a_attr void \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_set(a_type *val) \
|
2012-03-22 09:33:03 +08:00
|
|
|
{ \
|
2014-09-23 12:09:23 +08:00
|
|
|
a_name##tsd_wrapper_t *wrapper; \
|
2012-03-22 09:33:03 +08:00
|
|
|
\
|
2014-09-23 12:09:23 +08:00
|
|
|
assert(a_name##tsd_booted); \
|
2016-10-21 14:59:12 +08:00
|
|
|
wrapper = a_name##tsd_wrapper_get(true); \
|
2012-03-22 09:33:03 +08:00
|
|
|
wrapper->val = *(val); \
|
|
|
|
if (a_cleanup != malloc_tsd_no_cleanup) \
|
|
|
|
wrapper->initialized = true; \
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
|
2013-10-22 05:12:16 +08:00
|
|
|
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
|
|
|
!defined(_WIN32))
|
|
|
|
struct tsd_init_block_s {
|
|
|
|
ql_elm(tsd_init_block_t) link;
|
|
|
|
pthread_t thread;
|
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
struct tsd_init_head_s {
|
|
|
|
ql_head(tsd_init_block_t) blocks;
|
|
|
|
malloc_mutex_t lock;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
#define MALLOC_TSD \
|
|
|
|
/* O(name, type) */ \
|
|
|
|
O(tcache, tcache_t *) \
|
|
|
|
O(thread_allocated, uint64_t) \
|
|
|
|
O(thread_deallocated, uint64_t) \
|
|
|
|
O(prof_tdata, prof_tdata_t *) \
|
2016-04-23 05:34:14 +08:00
|
|
|
O(iarena, arena_t *) \
|
2014-09-23 12:09:23 +08:00
|
|
|
O(arena, arena_t *) \
|
2016-02-20 11:37:10 +08:00
|
|
|
O(arenas_tdata, arena_tdata_t *) \
|
|
|
|
O(narenas_tdata, unsigned) \
|
|
|
|
O(arenas_tdata_bypass, bool) \
|
2014-09-23 12:09:23 +08:00
|
|
|
O(tcache_enabled, tcache_enabled_t) \
|
|
|
|
O(quarantine, quarantine_t *) \
|
2016-04-14 14:36:15 +08:00
|
|
|
O(witnesses, witness_list_t) \
|
2016-04-26 14:14:40 +08:00
|
|
|
O(witness_fork, bool) \
|
2014-09-23 12:09:23 +08:00
|
|
|
|
|
|
|
#define TSD_INITIALIZER { \
|
|
|
|
tsd_state_uninitialized, \
|
|
|
|
NULL, \
|
|
|
|
0, \
|
|
|
|
0, \
|
|
|
|
NULL, \
|
|
|
|
NULL, \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
NULL, \
|
2016-04-23 05:34:14 +08:00
|
|
|
NULL, \
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
0, \
|
|
|
|
false, \
|
2014-09-23 12:09:23 +08:00
|
|
|
tcache_enabled_default, \
|
2016-04-14 14:36:15 +08:00
|
|
|
NULL, \
|
2016-04-26 14:14:40 +08:00
|
|
|
ql_head_initializer(witnesses), \
|
|
|
|
false \
|
2014-09-23 12:09:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct tsd_s {
|
|
|
|
tsd_state_t state;
|
|
|
|
#define O(n, t) \
|
|
|
|
t n;
|
|
|
|
MALLOC_TSD
|
|
|
|
#undef O
|
|
|
|
};
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
/*
|
|
|
|
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
|
|
|
|
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
|
|
|
|
* explicitly converted to tsd_t, which is non-nullable.
|
|
|
|
*/
|
|
|
|
struct tsdn_s {
|
|
|
|
tsd_t tsd;
|
|
|
|
};
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
static const tsd_t tsd_initializer = TSD_INITIALIZER;
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
malloc_tsd_types(, tsd_t)
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
|
|
|
|
void *malloc_tsd_malloc(size_t size);
|
|
|
|
void malloc_tsd_dalloc(void *wrapper);
|
2014-09-23 12:09:23 +08:00
|
|
|
void malloc_tsd_no_cleanup(void *arg);
|
2012-04-19 00:29:49 +08:00
|
|
|
void malloc_tsd_cleanup_register(bool (*f)(void));
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd_t *malloc_tsd_boot0(void);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
void malloc_tsd_boot1(void);
|
2013-10-22 05:12:16 +08:00
|
|
|
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
|
|
|
!defined(_WIN32))
|
|
|
|
void *tsd_init_check_recursion(tsd_init_head_t *head,
|
|
|
|
tsd_init_block_t *block);
|
|
|
|
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
|
|
|
|
#endif
|
2014-09-23 12:09:23 +08:00
|
|
|
void tsd_cleanup(void *arg);
|
2012-03-22 09:33:03 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
|
|
|
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
|
|
|
|
|
2016-10-21 14:59:12 +08:00
|
|
|
tsd_t *tsd_fetch_impl(bool init);
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd_t *tsd_fetch(void);
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsd_tsdn(tsd_t *tsd);
|
2014-10-05 02:12:53 +08:00
|
|
|
bool tsd_nominal(tsd_t *tsd);
|
2014-09-23 12:09:23 +08:00
|
|
|
#define O(n, t) \
|
|
|
|
t *tsd_##n##p_get(tsd_t *tsd); \
|
|
|
|
t tsd_##n##_get(tsd_t *tsd); \
|
|
|
|
void tsd_##n##_set(tsd_t *tsd, t n);
|
|
|
|
MALLOC_TSD
|
|
|
|
#undef O
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn_fetch(void);
|
|
|
|
bool tsdn_null(const tsdn_t *tsdn);
|
|
|
|
tsd_t *tsdn_tsd(tsdn_t *tsdn);
|
2014-09-23 12:09:23 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
|
|
|
|
malloc_tsd_externs(, tsd_t)
|
|
|
|
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
|
|
|
|
|
2014-10-05 02:12:53 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE tsd_t *
|
2016-10-21 14:59:12 +08:00
|
|
|
tsd_fetch_impl(bool init)
|
2014-09-23 12:09:23 +08:00
|
|
|
{
|
2016-10-21 14:59:12 +08:00
|
|
|
tsd_t *tsd = tsd_get(init);
|
|
|
|
|
|
|
|
if (!init && tsd_get_allocates() && tsd == NULL)
|
|
|
|
return (NULL);
|
|
|
|
assert(tsd != NULL);
|
2014-09-23 12:09:23 +08:00
|
|
|
|
2014-10-05 02:12:53 +08:00
|
|
|
if (unlikely(tsd->state != tsd_state_nominal)) {
|
|
|
|
if (tsd->state == tsd_state_uninitialized) {
|
|
|
|
tsd->state = tsd_state_nominal;
|
|
|
|
/* Trigger cleanup handler registration. */
|
|
|
|
tsd_set(tsd);
|
|
|
|
} else if (tsd->state == tsd_state_purgatory) {
|
|
|
|
tsd->state = tsd_state_reincarnated;
|
|
|
|
tsd_set(tsd);
|
|
|
|
} else
|
|
|
|
assert(tsd->state == tsd_state_reincarnated);
|
2014-09-23 12:09:23 +08:00
|
|
|
}
|
2014-10-05 02:12:53 +08:00
|
|
|
|
|
|
|
return (tsd);
|
|
|
|
}
|
|
|
|
|
2016-10-21 14:59:12 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE tsd_t *
|
|
|
|
tsd_fetch(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (tsd_fetch_impl(true));
|
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE tsdn_t *
|
|
|
|
tsd_tsdn(tsd_t *tsd)
|
|
|
|
{
|
|
|
|
|
|
|
|
return ((tsdn_t *)tsd);
|
|
|
|
}
|
|
|
|
|
2014-10-05 02:12:53 +08:00
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
tsd_nominal(tsd_t *tsd)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (tsd->state == tsd_state_nominal);
|
2014-09-23 12:09:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define O(n, t) \
|
2014-10-05 02:12:53 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE t * \
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_##n##p_get(tsd_t *tsd) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (&tsd->n); \
|
|
|
|
} \
|
|
|
|
\
|
2014-10-05 02:12:53 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE t \
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_##n##_get(tsd_t *tsd) \
|
|
|
|
{ \
|
|
|
|
\
|
|
|
|
return (*tsd_##n##p_get(tsd)); \
|
|
|
|
} \
|
|
|
|
\
|
2014-10-05 02:12:53 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void \
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_##n##_set(tsd_t *tsd, t n) \
|
|
|
|
{ \
|
|
|
|
\
|
2014-10-05 02:12:53 +08:00
|
|
|
assert(tsd->state == tsd_state_nominal); \
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd->n = n; \
|
|
|
|
}
|
|
|
|
MALLOC_TSD
|
|
|
|
#undef O
|
2016-05-11 13:21:10 +08:00
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE tsdn_t *
|
|
|
|
tsdn_fetch(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (!tsd_booted_get())
|
|
|
|
return (NULL);
|
|
|
|
|
2016-10-21 14:59:12 +08:00
|
|
|
return (tsd_tsdn(tsd_fetch_impl(false)));
|
2016-05-11 13:21:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
tsdn_null(const tsdn_t *tsdn)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (tsdn == NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE tsd_t *
|
|
|
|
tsdn_tsd(tsdn_t *tsdn)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(!tsdn_null(tsdn));
|
|
|
|
|
|
|
|
return (&tsdn->tsd);
|
|
|
|
}
|
2014-09-23 12:09:23 +08:00
|
|
|
#endif
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
|
|
/******************************************************************************/
|