2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_C_
|
2017-04-11 09:17:55 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/assert.h"
|
2017-04-11 10:04:40 +08:00
|
|
|
#include "jemalloc/internal/atomic.h"
|
2020-01-10 08:36:09 +08:00
|
|
|
#include "jemalloc/internal/buf_writer.h"
|
2017-04-25 08:09:56 +08:00
|
|
|
#include "jemalloc/internal/ctl.h"
|
2020-01-28 05:55:46 +08:00
|
|
|
#include "jemalloc/internal/emap.h"
|
2017-05-24 05:36:09 +08:00
|
|
|
#include "jemalloc/internal/extent_dss.h"
|
2017-05-24 05:42:32 +08:00
|
|
|
#include "jemalloc/internal/extent_mmap.h"
|
2020-12-02 05:13:55 +08:00
|
|
|
#include "jemalloc/internal/fxp.h"
|
2021-04-27 05:22:25 +08:00
|
|
|
#include "jemalloc/internal/guard.h"
|
2018-04-10 10:11:46 +08:00
|
|
|
#include "jemalloc/internal/hook.h"
|
2017-04-18 06:52:44 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
2017-07-20 07:36:46 +08:00
|
|
|
#include "jemalloc/internal/log.h"
|
2017-04-12 04:06:31 +08:00
|
|
|
#include "jemalloc/internal/malloc_io.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2020-06-02 21:42:44 +08:00
|
|
|
#include "jemalloc/internal/nstime.h"
|
2017-05-24 05:26:31 +08:00
|
|
|
#include "jemalloc/internal/rtree.h"
|
2019-03-23 03:53:11 +08:00
|
|
|
#include "jemalloc/internal/safety_check.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/sc.h"
|
2017-04-18 07:35:04 +08:00
|
|
|
#include "jemalloc/internal/spin.h"
|
2017-05-31 01:45:37 +08:00
|
|
|
#include "jemalloc/internal/sz.h"
|
2017-04-20 04:39:33 +08:00
|
|
|
#include "jemalloc/internal/ticker.h"
|
2019-09-04 06:04:48 +08:00
|
|
|
#include "jemalloc/internal/thread_event.h"
|
2017-04-12 04:31:16 +08:00
|
|
|
#include "jemalloc/internal/util.h"
|
2017-04-11 10:04:40 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Runtime configuration options. */
|
2016-10-29 14:03:25 +08:00
|
|
|
const char *je_malloc_conf
|
2016-10-29 14:59:42 +08:00
|
|
|
#ifndef _WIN32
|
2016-10-29 14:03:25 +08:00
|
|
|
JEMALLOC_ATTR(weak)
|
|
|
|
#endif
|
|
|
|
;
|
2020-03-30 01:41:23 +08:00
|
|
|
/*
|
|
|
|
* The usual rule is that the closer to runtime you are, the higher priority
|
|
|
|
* your configuration settings are (so the jemalloc config options get lower
|
|
|
|
* priority than the per-binary setting, which gets lower priority than the /etc
|
|
|
|
* setting, which gets lower priority than the environment settings).
|
|
|
|
*
|
|
|
|
* But it's a fairly common use case in some testing environments for a user to
|
|
|
|
* be able to control the binary, but nothing else (e.g. a performancy canary
|
|
|
|
* uses the production OS and environment variables, but can run any binary in
|
|
|
|
* those circumstances). For these use cases, it's handy to have an in-binary
|
|
|
|
* mechanism for overriding environment variable settings, with the idea that if
|
|
|
|
* the results are positive they get promoted to the official settings, and
|
|
|
|
* moved from the binary to the environment variable.
|
|
|
|
*
|
|
|
|
* We don't actually want this to be widespread, so we'll give it a silly name
|
|
|
|
* and not mention it in headers or documentation.
|
|
|
|
*/
|
|
|
|
const char *je_malloc_conf_2_conf_harder
|
|
|
|
#ifndef _WIN32
|
|
|
|
JEMALLOC_ATTR(weak)
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
2013-01-23 08:54:26 +08:00
|
|
|
bool opt_abort =
|
2017-05-26 06:30:11 +08:00
|
|
|
#ifdef JEMALLOC_DEBUG
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
bool opt_abort_conf =
|
2013-01-23 08:54:26 +08:00
|
|
|
#ifdef JEMALLOC_DEBUG
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
2019-05-01 04:54:00 +08:00
|
|
|
/* Intentionally default off, even with debug builds. */
|
|
|
|
bool opt_confirm_conf = false;
|
2014-12-09 05:12:41 +08:00
|
|
|
const char *opt_junk =
|
|
|
|
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
|
|
|
|
"true"
|
|
|
|
#else
|
|
|
|
"false"
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
bool opt_junk_alloc =
|
2013-01-23 08:54:26 +08:00
|
|
|
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
2014-12-09 05:12:41 +08:00
|
|
|
bool opt_junk_free =
|
|
|
|
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
Add runtime detection for MADV_DONTNEED zeroes pages (mostly for qemu)
qemu does not support this, yet [1], and you can get very tricky assert
if you will run program with jemalloc in use under qemu:
<jemalloc>: ../contrib/jemalloc/src/extent.c:1195: Failed assertion: "p[i] == 0"
[1]: https://patchwork.kernel.org/patch/10576637/
Here is a simple example that shows the problem [2]:
// Gist to check possible issues with MADV_DONTNEED
// For example it does not supported by qemu user
// There is a patch for this [1], but it hasn't been applied.
// [1]: https://lists.gnu.org/archive/html/qemu-devel/2018-08/msg05422.html
#include <sys/mman.h>
#include <stdio.h>
#include <stddef.h>
#include <assert.h>
#include <string.h>
int main(int argc, char **argv)
{
void *addr = mmap(NULL, 1<<16, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
return 1;
}
memset(addr, 'A', 1<<16);
if (!madvise(addr, 1<<16, MADV_DONTNEED)) {
puts("MADV_DONTNEED does not return error. Check memory.");
for (int i = 0; i < 1<<16; ++i) {
assert(((unsigned char *)addr)[i] == 0);
}
} else {
perror("madvise");
}
if (munmap(addr, 1<<16)) {
perror("munmap");
return 1;
}
return 0;
}
### unpatched qemu
$ qemu-x86_64-static /tmp/test-MADV_DONTNEED
MADV_DONTNEED does not return error. Check memory.
test-MADV_DONTNEED: /tmp/test-MADV_DONTNEED.c:19: main: Assertion `((unsigned char *)addr)[i] == 0' failed.
qemu: uncaught target signal 6 (Aborted) - core dumped
Aborted (core dumped)
### patched qemu (by returning ENOSYS error)
$ qemu-x86_64 /tmp/test-MADV_DONTNEED
madvise: Success
### patch for qemu to return ENOSYS
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 897d20c076..5540792e0e 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -11775,7 +11775,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
turns private file-backed mappings into anonymous mappings.
This will break MADV_DONTNEED.
This is a hint, so ignoring and returning success is ok. */
- return 0;
+ return ENOSYS;
#endif
#ifdef TARGET_NR_fcntl64
case TARGET_NR_fcntl64:
[2]: https://gist.github.com/azat/12ba2c825b710653ece34dba7f926ece
v2:
- review fixes
- add opt_dont_trust_madvise
v3:
- review fixes
- rename opt_dont_trust_madvise to opt_trust_madvise
2020-12-19 03:23:35 +08:00
|
|
|
bool opt_trust_madvise =
|
|
|
|
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
|
|
|
false
|
|
|
|
#else
|
|
|
|
true
|
|
|
|
#endif
|
|
|
|
;
|
2014-12-09 05:12:41 +08:00
|
|
|
|
2021-02-10 14:24:35 +08:00
|
|
|
bool opt_cache_oblivious =
|
|
|
|
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
2019-09-24 08:56:19 +08:00
|
|
|
zero_realloc_action_t opt_zero_realloc_action =
|
|
|
|
zero_realloc_action_strict;
|
|
|
|
|
2019-10-27 02:04:46 +08:00
|
|
|
atomic_zu_t zero_realloc_count = ATOMIC_INIT(0);
|
|
|
|
|
2019-09-24 08:56:19 +08:00
|
|
|
const char *zero_realloc_mode_names[] = {
|
|
|
|
"strict",
|
|
|
|
"free",
|
|
|
|
"abort",
|
|
|
|
};
|
|
|
|
|
2020-02-29 03:37:39 +08:00
|
|
|
/*
|
|
|
|
* These are the documented values for junk fill debugging facilities -- see the
|
|
|
|
* man page.
|
|
|
|
*/
|
|
|
|
static const uint8_t junk_alloc_byte = 0xa5;
|
|
|
|
static const uint8_t junk_free_byte = 0x5a;
|
|
|
|
|
|
|
|
static void default_junk_alloc(void *ptr, size_t usize) {
|
|
|
|
memset(ptr, junk_alloc_byte, usize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void default_junk_free(void *ptr, size_t usize) {
|
|
|
|
memset(ptr, junk_free_byte, usize);
|
|
|
|
}
|
|
|
|
|
|
|
|
void (*junk_alloc_callback)(void *ptr, size_t size) = &default_junk_alloc;
|
|
|
|
void (*junk_free_callback)(void *ptr, size_t size) = &default_junk_free;
|
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
bool opt_utrace = false;
|
2010-01-17 01:53:50 +08:00
|
|
|
bool opt_xmalloc = false;
|
2021-06-22 04:40:30 +08:00
|
|
|
bool opt_experimental_infallible_new = false;
|
2010-01-17 01:53:50 +08:00
|
|
|
bool opt_zero = false;
|
2016-02-25 03:03:40 +08:00
|
|
|
unsigned opt_narenas = 0;
|
2020-12-02 05:13:55 +08:00
|
|
|
fxp_t opt_narenas_ratio = FXP_INIT_INT(4);
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
unsigned ncpus;
|
|
|
|
|
2016-02-25 15:58:10 +08:00
|
|
|
/* Protects arenas initialization. */
|
2017-06-08 06:49:09 +08:00
|
|
|
malloc_mutex_t arenas_lock;
|
2020-08-15 04:36:41 +08:00
|
|
|
|
|
|
|
/* The global hpa, and whether it's on. */
|
|
|
|
bool opt_hpa = false;
|
2020-12-09 08:33:39 +08:00
|
|
|
hpa_shard_opts_t opt_hpa_opts = HPA_SHARD_OPTS_DEFAULT;
|
2021-01-27 10:35:18 +08:00
|
|
|
sec_opts_t opt_hpa_sec_opts = SEC_OPTS_DEFAULT;
|
2020-10-17 04:14:59 +08:00
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/*
|
|
|
|
* Arenas that are used to service external requests. Not all elements of the
|
|
|
|
* arenas array are necessarily used; arenas are created lazily as needed.
|
|
|
|
*
|
|
|
|
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
|
|
|
|
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
|
|
|
|
* takes some action to create them and allocate from them.
|
2017-04-05 06:12:24 +08:00
|
|
|
*
|
|
|
|
* Points to an arena_t.
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
*/
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
JEMALLOC_ALIGNED(CACHELINE)
|
2017-05-14 06:20:48 +08:00
|
|
|
atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
|
2017-04-05 06:12:24 +08:00
|
|
|
static atomic_u_t narenas_total; /* Use narenas_total_*(). */
|
2018-06-02 06:06:36 +08:00
|
|
|
/* Below three are read-only after initialization. */
|
|
|
|
static arena_t *a0; /* arenas[0]. */
|
|
|
|
unsigned narenas_auto;
|
|
|
|
unsigned manual_arena_base;
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2021-02-09 00:49:34 +08:00
|
|
|
malloc_init_t malloc_init_state = malloc_init_uninitialized;
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
/* False should be the common case. Set to true to trigger initialization. */
|
2017-04-12 14:13:45 +08:00
|
|
|
bool malloc_slow = true;
|
2015-10-28 06:12:10 +08:00
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
/* When malloc_slow is true, set the corresponding bits for sanity check. */
|
2015-10-28 06:12:10 +08:00
|
|
|
enum {
|
|
|
|
flag_opt_junk_alloc = (1U),
|
|
|
|
flag_opt_junk_free = (1U << 1),
|
2016-04-06 07:52:36 +08:00
|
|
|
flag_opt_zero = (1U << 2),
|
|
|
|
flag_opt_utrace = (1U << 3),
|
|
|
|
flag_opt_xmalloc = (1U << 4)
|
2015-10-28 06:12:10 +08:00
|
|
|
};
|
|
|
|
static uint8_t malloc_slow_flags;
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifdef JEMALLOC_THREADED_INIT
|
2012-03-22 09:33:03 +08:00
|
|
|
/* Used to let the initializing thread recursively allocate. */
|
2012-04-06 02:06:23 +08:00
|
|
|
# define NO_INITIALIZER ((unsigned long)0)
|
2012-02-03 14:04:57 +08:00
|
|
|
# define INITIALIZER pthread_self()
|
|
|
|
# define IS_INITIALIZER (malloc_initializer == pthread_self())
|
2012-04-06 02:06:23 +08:00
|
|
|
static pthread_t malloc_initializer = NO_INITIALIZER;
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-06 02:06:23 +08:00
|
|
|
# define NO_INITIALIZER false
|
2012-02-03 14:04:57 +08:00
|
|
|
# define INITIALIZER true
|
|
|
|
# define IS_INITIALIZER malloc_initializer
|
2012-04-06 02:06:23 +08:00
|
|
|
static bool malloc_initializer = NO_INITIALIZER;
|
2012-02-03 14:04:57 +08:00
|
|
|
#endif
|
2012-03-22 09:33:03 +08:00
|
|
|
|
|
|
|
/* Used to avoid initialization races. */
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifdef _WIN32
|
2015-06-26 04:53:58 +08:00
|
|
|
#if _WIN32_WINNT >= 0x0600
|
|
|
|
static malloc_mutex_t init_lock = SRWLOCK_INIT;
|
|
|
|
#else
|
2012-04-22 12:27:46 +08:00
|
|
|
static malloc_mutex_t init_lock;
|
2015-09-03 14:48:48 +08:00
|
|
|
static bool init_lock_initialized = false;
|
2012-04-22 12:27:46 +08:00
|
|
|
|
|
|
|
JEMALLOC_ATTR(constructor)
|
2012-04-30 18:38:31 +08:00
|
|
|
static void WINAPI
|
2017-01-16 08:56:30 +08:00
|
|
|
_init_init_lock(void) {
|
2016-06-01 06:03:51 +08:00
|
|
|
/*
|
|
|
|
* If another constructor in the same binary is using mallctl to e.g.
|
|
|
|
* set up extent hooks, it may end up running before this one, and
|
|
|
|
* malloc_init_hard will crash trying to lock the uninitialized lock. So
|
|
|
|
* we force an initialization of the lock in malloc_init_hard as well.
|
|
|
|
* We don't try to care about atomicity of the accessed to the
|
|
|
|
* init_lock_initialized boolean, since it really only matters early in
|
|
|
|
* the process creation, before any separate thread normally starts
|
|
|
|
* doing anything.
|
|
|
|
*/
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!init_lock_initialized) {
|
2017-05-16 06:38:15 +08:00
|
|
|
malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
|
|
|
|
malloc_mutex_rank_exclusive);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-09-03 14:48:48 +08:00
|
|
|
init_lock_initialized = true;
|
2012-04-22 12:27:46 +08:00
|
|
|
}
|
2012-04-30 18:38:31 +08:00
|
|
|
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
# pragma section(".CRT$XCU", read)
|
|
|
|
JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
|
|
|
|
static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
|
|
|
|
#endif
|
2015-06-26 04:53:58 +08:00
|
|
|
#endif
|
2012-04-22 12:27:46 +08:00
|
|
|
#else
|
2012-03-22 09:33:03 +08:00
|
|
|
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
|
2012-04-22 12:27:46 +08:00
|
|
|
#endif
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
typedef struct {
|
|
|
|
void *p; /* Input pointer (as in realloc(p, s)). */
|
|
|
|
size_t s; /* Request size. */
|
|
|
|
void *r; /* Result pointer. */
|
|
|
|
} malloc_utrace_t;
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_UTRACE
|
|
|
|
# define UTRACE(a, b, c) do { \
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(opt_utrace)) { \
|
2012-12-03 09:56:25 +08:00
|
|
|
int utrace_serrno = errno; \
|
2012-04-06 04:36:17 +08:00
|
|
|
malloc_utrace_t ut; \
|
|
|
|
ut.p = (a); \
|
|
|
|
ut.s = (b); \
|
|
|
|
ut.r = (c); \
|
2020-11-23 23:00:38 +08:00
|
|
|
UTRACE_CALL(&ut, sizeof(ut)); \
|
2012-12-03 09:56:25 +08:00
|
|
|
errno = utrace_serrno; \
|
2012-04-06 04:36:17 +08:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
# define UTRACE(a, b, c)
|
|
|
|
#endif
|
|
|
|
|
2017-05-26 06:30:11 +08:00
|
|
|
/* Whether encountered any invalid config options. */
|
|
|
|
static bool had_conf_error = false;
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
2014-01-13 07:05:44 +08:00
|
|
|
/*
|
|
|
|
* Function prototypes for static functions that are referenced prior to
|
|
|
|
* definition.
|
|
|
|
*/
|
|
|
|
|
2016-05-08 03:42:31 +08:00
|
|
|
static bool malloc_init_hard_a0(void);
|
2010-01-17 01:53:50 +08:00
|
|
|
static bool malloc_init_hard(void);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* Begin miscellaneous support functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_init_a0(void) {
|
|
|
|
if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return malloc_init_hard_a0();
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_init(void) {
|
|
|
|
if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-08 03:42:31 +08:00
|
|
|
* The a0*() functions are used instead of i{d,}alloc() in situations that
|
2015-01-21 07:37:51 +08:00
|
|
|
* cannot tolerate TLS variable access.
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static void *
|
2017-01-16 08:56:30 +08:00
|
|
|
a0ialloc(size_t size, bool zero, bool is_internal) {
|
|
|
|
if (unlikely(malloc_init_a0())) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
|
2017-05-31 01:45:37 +08:00
|
|
|
return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
|
2017-01-20 10:15:45 +08:00
|
|
|
is_internal, arena_get(TSDN_NULL, 0, true), true);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static void
|
2017-03-17 17:45:12 +08:00
|
|
|
a0idalloc(void *ptr, bool is_internal) {
|
2017-04-08 05:12:30 +08:00
|
|
|
idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
void *
|
2017-01-16 08:56:30 +08:00
|
|
|
a0malloc(size_t size) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return a0ialloc(size, false, true);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
a0dalloc(void *ptr) {
|
2017-03-17 17:45:12 +08:00
|
|
|
a0idalloc(ptr, true);
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
|
|
|
|
* situations that cannot tolerate TLS variable access (TLS allocation and very
|
|
|
|
* early internal data structure initialization).
|
|
|
|
*/
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
void *
|
2017-01-16 08:56:30 +08:00
|
|
|
bootstrap_malloc(size_t size) {
|
|
|
|
if (unlikely(size == 0)) {
|
2015-01-21 07:37:51 +08:00
|
|
|
size = 1;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-01-21 07:37:51 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return a0ialloc(size, false, false);
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2017-01-16 08:56:30 +08:00
|
|
|
bootstrap_calloc(size_t num, size_t size) {
|
2015-01-21 07:37:51 +08:00
|
|
|
size_t num_size;
|
|
|
|
|
|
|
|
num_size = num * size;
|
|
|
|
if (unlikely(num_size == 0)) {
|
|
|
|
assert(num == 0 || size == 0);
|
|
|
|
num_size = 1;
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return a0ialloc(num_size, true, false);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
bootstrap_free(void *ptr) {
|
|
|
|
if (unlikely(ptr == NULL)) {
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
|
2017-03-17 17:45:12 +08:00
|
|
|
a0idalloc(ptr, false);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2017-01-04 09:21:59 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_set(unsigned ind, arena_t *arena) {
|
2017-04-05 06:12:24 +08:00
|
|
|
atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
|
2016-02-25 15:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
narenas_total_set(unsigned narenas) {
|
2017-04-05 06:12:24 +08:00
|
|
|
atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
|
2016-02-25 15:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
narenas_total_inc(void) {
|
2017-04-05 06:12:24 +08:00
|
|
|
atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
|
2016-02-25 15:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
2017-01-16 08:56:30 +08:00
|
|
|
narenas_total_get(void) {
|
2017-04-05 06:12:24 +08:00
|
|
|
return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
|
2016-02-25 15:58:10 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Create a new arena and insert it into the arenas array at index ind. */
|
2014-10-08 15:54:16 +08:00
|
|
|
static arena_t *
|
2021-08-23 20:03:35 +08:00
|
|
|
arena_init_locked(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
2016-02-25 15:58:10 +08:00
|
|
|
assert(ind <= narenas_total_get());
|
2017-05-14 06:20:48 +08:00
|
|
|
if (ind >= MALLOCX_ARENA_LIMIT) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (ind == narenas_total_get()) {
|
2016-02-25 15:58:10 +08:00
|
|
|
narenas_total_inc();
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* Another thread may have already initialized arenas[ind] if it's an
|
|
|
|
* auto arena.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
2016-05-11 13:21:10 +08:00
|
|
|
arena = arena_get(tsdn, ind, false);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (arena != NULL) {
|
2018-05-22 04:33:48 +08:00
|
|
|
assert(arena_is_auto(arena));
|
2017-01-20 10:15:45 +08:00
|
|
|
return arena;
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Actually initialize the arena. */
|
2021-08-23 20:03:35 +08:00
|
|
|
arena = arena_new(tsdn, ind, config);
|
2017-03-18 03:42:33 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return arena;
|
2014-10-08 15:54:16 +08:00
|
|
|
}
|
|
|
|
|
2017-06-08 06:49:09 +08:00
|
|
|
static void
|
|
|
|
arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
|
|
|
|
if (ind == 0) {
|
|
|
|
return;
|
|
|
|
}
|
2019-01-15 06:16:09 +08:00
|
|
|
/*
|
|
|
|
* Avoid creating a new background thread just for the huge arena, which
|
|
|
|
* purges eagerly by default.
|
|
|
|
*/
|
|
|
|
if (have_background_thread && !arena_is_huge(ind)) {
|
|
|
|
if (background_thread_create(tsdn_tsd(tsdn), ind)) {
|
2017-06-08 06:49:09 +08:00
|
|
|
malloc_printf("<jemalloc>: error in background thread "
|
|
|
|
"creation for arena %u. Abort.\n", ind);
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-08 15:54:16 +08:00
|
|
|
arena_t *
|
2021-08-23 20:03:35 +08:00
|
|
|
arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
|
2014-10-08 15:54:16 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arenas_lock);
|
2021-08-23 20:03:35 +08:00
|
|
|
arena = arena_init_locked(tsdn, ind, config);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arenas_lock);
|
2017-06-08 06:49:09 +08:00
|
|
|
|
|
|
|
arena_new_create_background_thread(tsdn, ind);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return arena;
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
|
2017-03-30 08:00:52 +08:00
|
|
|
arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_nthreads_inc(arena, internal);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (internal) {
|
2016-09-23 00:13:45 +08:00
|
|
|
tsd_iarena_set(tsd, arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-09-23 00:13:45 +08:00
|
|
|
tsd_arena_set(tsd, arena);
|
2018-11-28 04:38:47 +08:00
|
|
|
unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1,
|
|
|
|
ATOMIC_RELAXED);
|
|
|
|
tsd_binshards_t *bins = tsd_binshardsp_get(tsd);
|
|
|
|
for (unsigned i = 0; i < SC_NBINS; i++) {
|
|
|
|
assert(bin_infos[i].n_shards > 0 &&
|
|
|
|
bin_infos[i].n_shards <= BIN_SHARDS_MAX);
|
|
|
|
bins->binshard[i] = shard % bin_infos[i].n_shards;
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *oldarena, *newarena;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
|
|
|
|
newarena = arena_get(tsd_tsdn(tsd), newind, false);
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_nthreads_dec(oldarena, false);
|
|
|
|
arena_nthreads_inc(newarena, false);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
tsd_arena_set(tsd, newarena);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
arena = arena_get(tsd_tsdn(tsd), ind, false);
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_nthreads_dec(arena, internal);
|
2017-03-30 08:00:52 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (internal) {
|
2016-04-23 05:34:14 +08:00
|
|
|
tsd_iarena_set(tsd, NULL);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-04-23 05:34:14 +08:00
|
|
|
tsd_arena_set(tsd, NULL);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Slow path, called only by arena_choose(). */
|
2010-01-17 01:53:50 +08:00
|
|
|
arena_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_choose_hard(tsd_t *tsd, bool internal) {
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
unsigned choose = percpu_arena_choose();
|
|
|
|
ret = arena_get(tsd_tsdn(tsd), choose, true);
|
|
|
|
assert(ret != NULL);
|
|
|
|
arena_bind(tsd, arena_ind_get(ret), false);
|
|
|
|
arena_bind(tsd, arena_ind_get(ret), true);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
if (narenas_auto > 1) {
|
2016-04-23 05:34:14 +08:00
|
|
|
unsigned i, j, choose[2], first_null;
|
2017-06-08 06:49:09 +08:00
|
|
|
bool is_new_arena[2];
|
2016-04-23 05:34:14 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine binding for both non-internal and internal
|
|
|
|
* allocation.
|
|
|
|
*
|
|
|
|
* choose[0]: For application allocation.
|
|
|
|
* choose[1]: For internal metadata allocation.
|
|
|
|
*/
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
for (j = 0; j < 2; j++) {
|
2016-04-23 05:34:14 +08:00
|
|
|
choose[j] = 0;
|
2017-06-08 06:49:09 +08:00
|
|
|
is_new_arena[j] = false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2011-03-19 04:41:33 +08:00
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
first_null = narenas_auto;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
|
|
|
|
assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 1; i < narenas_auto; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
|
2011-03-19 04:41:33 +08:00
|
|
|
/*
|
|
|
|
* Choose the first arena that has the lowest
|
|
|
|
* number of threads assigned to it.
|
|
|
|
*/
|
2016-04-23 05:34:14 +08:00
|
|
|
for (j = 0; j < 2; j++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
if (arena_nthreads_get(arena_get(
|
|
|
|
tsd_tsdn(tsd), i, false), !!j) <
|
|
|
|
arena_nthreads_get(arena_get(
|
|
|
|
tsd_tsdn(tsd), choose[j], false),
|
2017-01-16 08:56:30 +08:00
|
|
|
!!j)) {
|
2016-04-23 05:34:14 +08:00
|
|
|
choose[j] = i;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-04-23 05:34:14 +08:00
|
|
|
}
|
2012-10-12 04:53:15 +08:00
|
|
|
} else if (first_null == narenas_auto) {
|
2011-03-19 04:41:33 +08:00
|
|
|
/*
|
|
|
|
* Record the index of the first uninitialized
|
|
|
|
* arena, in case all extant arenas are in use.
|
|
|
|
*
|
|
|
|
* NB: It is possible for there to be
|
|
|
|
* discontinuities in terms of initialized
|
|
|
|
* versus uninitialized arenas, due to the
|
|
|
|
* "thread.arena" mallctl.
|
|
|
|
*/
|
|
|
|
first_null = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-23 05:34:14 +08:00
|
|
|
for (j = 0; j < 2; j++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
|
|
|
|
choose[j], false), !!j) == 0 || first_null ==
|
|
|
|
narenas_auto) {
|
2016-04-23 05:34:14 +08:00
|
|
|
/*
|
|
|
|
* Use an unloaded arena, or the least loaded
|
|
|
|
* arena if all arenas are already initialized.
|
|
|
|
*/
|
2016-05-11 13:21:10 +08:00
|
|
|
if (!!j == internal) {
|
|
|
|
ret = arena_get(tsd_tsdn(tsd),
|
|
|
|
choose[j], false);
|
|
|
|
}
|
2016-04-23 05:34:14 +08:00
|
|
|
} else {
|
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
/* Initialize a new arena. */
|
|
|
|
choose[j] = first_null;
|
2016-05-11 13:21:10 +08:00
|
|
|
arena = arena_init_locked(tsd_tsdn(tsd),
|
2016-12-23 06:39:10 +08:00
|
|
|
choose[j],
|
2021-08-23 20:03:35 +08:00
|
|
|
&arena_config_default);
|
2016-04-23 05:34:14 +08:00
|
|
|
if (arena == NULL) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd),
|
|
|
|
&arenas_lock);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-04-23 05:34:14 +08:00
|
|
|
}
|
2017-06-08 06:49:09 +08:00
|
|
|
is_new_arena[j] = true;
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!!j == internal) {
|
2016-04-23 05:34:14 +08:00
|
|
|
ret = arena;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_bind(tsd, choose[j], !!j);
|
2011-03-19 04:41:33 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
|
2017-06-08 06:49:09 +08:00
|
|
|
|
|
|
|
for (j = 0; j < 2; j++) {
|
|
|
|
if (is_new_arena[j]) {
|
|
|
|
assert(choose[j] > 0);
|
|
|
|
arena_new_create_background_thread(
|
|
|
|
tsd_tsdn(tsd), choose[j]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-19 04:41:33 +08:00
|
|
|
} else {
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = arena_get(tsd_tsdn(tsd), 0, false);
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_bind(tsd, 0, false);
|
|
|
|
arena_bind(tsd, 0, true);
|
2011-03-19 04:41:33 +08:00
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2009-12-29 16:09:15 +08:00
|
|
|
}
|
|
|
|
|
2016-04-23 05:34:14 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
iarena_cleanup(tsd_t *tsd) {
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_t *iarena;
|
|
|
|
|
|
|
|
iarena = tsd_iarena_get(tsd);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (iarena != NULL) {
|
2016-12-23 06:39:10 +08:00
|
|
|
arena_unbind(tsd, arena_ind_get(iarena), true);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-04-23 05:34:14 +08:00
|
|
|
}
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_cleanup(tsd_t *tsd) {
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
arena = tsd_arena_get(tsd);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (arena != NULL) {
|
2016-12-23 06:39:10 +08:00
|
|
|
arena_unbind(tsd, arena_ind_get(arena), false);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
stats_print_atexit(void) {
|
2017-04-21 08:21:37 +08:00
|
|
|
if (config_stats) {
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
2012-10-12 04:53:15 +08:00
|
|
|
unsigned narenas, i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
/*
|
|
|
|
* Merge stats from extant threads. This is racy, since
|
|
|
|
* individual threads do not lock when recording tcache stats
|
|
|
|
* events. As a consequence, the final stats may be slightly
|
|
|
|
* out of date by the time they are reported, if other threads
|
|
|
|
* continue to allocate.
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_t *arena = arena_get(tsdn, i, false);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (arena != NULL) {
|
2020-04-08 08:48:35 +08:00
|
|
|
tcache_slow_t *tcache_slow;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2017-02-13 10:50:53 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
2020-04-08 08:48:35 +08:00
|
|
|
ql_foreach(tcache_slow, &arena->tcache_ql,
|
|
|
|
link) {
|
|
|
|
tcache_stats_merge(tsdn,
|
|
|
|
tcache_slow->tcache, arena);
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2017-02-13 10:50:53 +08:00
|
|
|
malloc_mutex_unlock(tsdn,
|
|
|
|
&arena->tcache_ql_mtx);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-05-28 06:35:36 +08:00
|
|
|
je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2017-06-01 00:43:43 +08:00
|
|
|
/*
|
|
|
|
* Ensure that we don't hold any locks upon entry to or exit from allocator
|
|
|
|
* code (in a "broad" sense that doesn't count a reentrant allocation as an
|
|
|
|
* entrance or exit).
|
|
|
|
*/
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
check_entry_exit_locking(tsdn_t *tsdn) {
|
|
|
|
if (!config_debug) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (tsdn_null(tsdn)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tsd_t *tsd = tsdn_tsd(tsdn);
|
|
|
|
/*
|
|
|
|
* It's possible we hold locks at entry/exit if we're in a nested
|
|
|
|
* allocation.
|
|
|
|
*/
|
|
|
|
int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
|
|
|
|
if (reentrancy_level != 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* End miscellaneous support functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin initialization functions.
|
|
|
|
*/
|
|
|
|
|
2014-12-10 06:41:34 +08:00
|
|
|
static char *
|
2017-01-25 03:54:18 +08:00
|
|
|
jemalloc_secure_getenv(const char *name) {
|
|
|
|
#ifdef JEMALLOC_HAVE_SECURE_GETENV
|
|
|
|
return secure_getenv(name);
|
|
|
|
#else
|
2015-03-24 13:49:26 +08:00
|
|
|
# ifdef JEMALLOC_HAVE_ISSETUGID
|
2017-01-16 08:56:30 +08:00
|
|
|
if (issetugid() != 0) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-03-24 13:49:26 +08:00
|
|
|
# endif
|
2017-01-20 10:15:45 +08:00
|
|
|
return getenv(name);
|
2014-12-10 06:41:34 +08:00
|
|
|
#endif
|
2017-01-25 03:54:18 +08:00
|
|
|
}
|
2014-12-10 06:41:34 +08:00
|
|
|
|
2009-06-23 05:44:08 +08:00
|
|
|
static unsigned
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_ncpus(void) {
|
2009-06-24 10:01:18 +08:00
|
|
|
long result;
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifdef _WIN32
|
|
|
|
SYSTEM_INFO si;
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
result = si.dwNumberOfProcessors;
|
2019-11-09 05:05:43 +08:00
|
|
|
#elif defined(CPU_COUNT)
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
/*
|
2016-11-03 09:22:32 +08:00
|
|
|
* glibc >= 2.6 has the CPU_COUNT macro.
|
|
|
|
*
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
* glibc's sysconf() uses isspace(). glibc allocates for the first time
|
|
|
|
* *before* setting up the isspace tables. Therefore we need a
|
|
|
|
* different method to get the number of CPUs.
|
2019-11-09 05:05:43 +08:00
|
|
|
*
|
|
|
|
* The getaffinity approach is also preferred when only a subset of CPUs
|
|
|
|
* is available, to avoid using more arenas than necessary.
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
*/
|
|
|
|
{
|
2020-10-25 23:17:24 +08:00
|
|
|
# if defined(__FreeBSD__) || defined(__DragonFly__)
|
2019-11-09 05:05:43 +08:00
|
|
|
cpuset_t set;
|
|
|
|
# else
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
cpu_set_t set;
|
2019-11-09 05:05:43 +08:00
|
|
|
# endif
|
|
|
|
# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
|
|
|
|
sched_getaffinity(0, sizeof(set), &set);
|
|
|
|
# else
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
|
2019-11-09 05:05:43 +08:00
|
|
|
# endif
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
result = CPU_COUNT(&set);
|
|
|
|
}
|
2012-04-22 12:27:46 +08:00
|
|
|
#else
|
2009-06-24 10:01:18 +08:00
|
|
|
result = sysconf(_SC_NPROCESSORS_ONLN);
|
2012-09-27 04:28:29 +08:00
|
|
|
#endif
|
2013-11-30 08:19:44 +08:00
|
|
|
return ((result == -1) ? 1 : (unsigned)result);
|
2009-06-23 05:44:08 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2017-05-28 06:35:36 +08:00
|
|
|
static void
|
2020-01-14 14:29:17 +08:00
|
|
|
init_opt_stats_opts(const char *v, size_t vlen, char *dest) {
|
|
|
|
size_t opts_len = strlen(dest);
|
2017-05-28 06:35:36 +08:00
|
|
|
assert(opts_len <= stats_print_tot_num_options);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < vlen; i++) {
|
|
|
|
switch (v[i]) {
|
|
|
|
#define OPTION(o, v, d, s) case o: break;
|
|
|
|
STATS_PRINT_OPTIONS
|
|
|
|
#undef OPTION
|
|
|
|
default: continue;
|
|
|
|
}
|
|
|
|
|
2020-01-14 14:29:17 +08:00
|
|
|
if (strchr(dest, v[i]) != NULL) {
|
2017-05-28 06:35:36 +08:00
|
|
|
/* Ignore repeated. */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-01-14 14:29:17 +08:00
|
|
|
dest[opts_len++] = v[i];
|
|
|
|
dest[opts_len] = '\0';
|
2017-05-28 06:35:36 +08:00
|
|
|
assert(opts_len <= stats_print_tot_num_options);
|
|
|
|
}
|
2020-01-14 14:29:17 +08:00
|
|
|
assert(opts_len == strlen(dest));
|
2017-05-28 06:35:36 +08:00
|
|
|
}
|
|
|
|
|
2018-11-21 05:51:32 +08:00
|
|
|
/* Reads the next size pair in a multi-sized option. */
|
2018-04-21 12:11:03 +08:00
|
|
|
static bool
|
2018-11-21 05:51:32 +08:00
|
|
|
malloc_conf_multi_sizes_next(const char **slab_size_segment_cur,
|
|
|
|
size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) {
|
2018-04-21 12:11:03 +08:00
|
|
|
const char *cur = *slab_size_segment_cur;
|
|
|
|
char *end;
|
|
|
|
uintmax_t um;
|
|
|
|
|
|
|
|
set_errno(0);
|
|
|
|
|
|
|
|
/* First number, then '-' */
|
|
|
|
um = malloc_strtoumax(cur, &end, 0);
|
|
|
|
if (get_errno() != 0 || *end != '-') {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
*slab_start = (size_t)um;
|
|
|
|
cur = end + 1;
|
|
|
|
|
|
|
|
/* Second number, then ':' */
|
|
|
|
um = malloc_strtoumax(cur, &end, 0);
|
|
|
|
if (get_errno() != 0 || *end != ':') {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
*slab_end = (size_t)um;
|
|
|
|
cur = end + 1;
|
|
|
|
|
|
|
|
/* Last number */
|
|
|
|
um = malloc_strtoumax(cur, &end, 0);
|
|
|
|
if (get_errno() != 0) {
|
|
|
|
return true;
|
|
|
|
}
|
2018-11-21 05:51:32 +08:00
|
|
|
*new_size = (size_t)um;
|
2018-04-21 12:11:03 +08:00
|
|
|
|
|
|
|
/* Consume the separator if there is one. */
|
|
|
|
if (*end == '|') {
|
|
|
|
end++;
|
|
|
|
}
|
|
|
|
|
|
|
|
*vlen_left -= end - *slab_size_segment_cur;
|
|
|
|
*slab_size_segment_cur = end;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
static bool
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
|
2017-01-16 08:56:30 +08:00
|
|
|
char const **v_p, size_t *vlen_p) {
|
2010-10-24 09:37:06 +08:00
|
|
|
bool accept;
|
|
|
|
const char *opts = *opts_p;
|
|
|
|
|
|
|
|
*k_p = opts;
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
for (accept = false; !accept;) {
|
2010-10-24 09:37:06 +08:00
|
|
|
switch (*opts) {
|
2012-03-07 06:57:45 +08:00
|
|
|
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
|
|
|
|
case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
|
|
|
|
case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
|
|
|
|
case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
|
|
|
|
case 'Y': case 'Z':
|
|
|
|
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
|
|
|
|
case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
|
|
|
|
case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
|
|
|
|
case 's': case 't': case 'u': case 'v': case 'w': case 'x':
|
|
|
|
case 'y': case 'z':
|
|
|
|
case '0': case '1': case '2': case '3': case '4': case '5':
|
|
|
|
case '6': case '7': case '8': case '9':
|
|
|
|
case '_':
|
|
|
|
opts++;
|
|
|
|
break;
|
|
|
|
case ':':
|
|
|
|
opts++;
|
|
|
|
*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
|
|
|
|
*v_p = opts;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
if (opts != *opts_p) {
|
|
|
|
malloc_write("<jemalloc>: Conf string ends "
|
|
|
|
"with key\n");
|
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2012-03-07 06:57:45 +08:00
|
|
|
default:
|
|
|
|
malloc_write("<jemalloc>: Malformed conf string\n");
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
for (accept = false; !accept;) {
|
2010-10-24 09:37:06 +08:00
|
|
|
switch (*opts) {
|
2012-03-07 06:57:45 +08:00
|
|
|
case ',':
|
|
|
|
opts++;
|
|
|
|
/*
|
|
|
|
* Look ahead one character here, because the next time
|
|
|
|
* this function is called, it will assume that end of
|
|
|
|
* input has been cleanly reached if no input remains,
|
|
|
|
* but we have optimistically already consumed the
|
|
|
|
* comma if one exists.
|
|
|
|
*/
|
|
|
|
if (*opts == '\0') {
|
|
|
|
malloc_write("<jemalloc>: Conf string ends "
|
|
|
|
"with comma\n");
|
|
|
|
}
|
|
|
|
*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
opts++;
|
|
|
|
break;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
*opts_p = opts;
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2017-05-31 04:17:10 +08:00
|
|
|
static void
|
|
|
|
malloc_abort_invalid_conf(void) {
|
|
|
|
assert(opt_abort_conf);
|
|
|
|
malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
|
|
|
|
"value (see above).\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
static void
|
|
|
|
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t vlen) {
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
|
|
|
|
(int)vlen, v);
|
2018-04-18 05:26:26 +08:00
|
|
|
/* If abort_conf is set, error out after processing all options. */
|
2018-07-18 05:09:31 +08:00
|
|
|
const char *experimental = "experimental_";
|
|
|
|
if (strncmp(k, experimental, strlen(experimental)) == 0) {
|
|
|
|
/* However, tolerate experimental features. */
|
|
|
|
return;
|
|
|
|
}
|
2017-05-26 06:30:11 +08:00
|
|
|
had_conf_error = true;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2015-10-28 06:12:10 +08:00
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_slow_flag_init(void) {
|
2015-10-28 06:12:10 +08:00
|
|
|
/*
|
|
|
|
* Combine the runtime options into malloc_slow for fast path. Called
|
|
|
|
* after processing all the options.
|
|
|
|
*/
|
|
|
|
malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
|
|
|
|
| (opt_junk_free ? flag_opt_junk_free : 0)
|
|
|
|
| (opt_zero ? flag_opt_zero : 0)
|
|
|
|
| (opt_utrace ? flag_opt_utrace : 0)
|
|
|
|
| (opt_xmalloc ? flag_opt_xmalloc : 0);
|
|
|
|
|
|
|
|
malloc_slow = (malloc_slow_flags != 0);
|
|
|
|
}
|
|
|
|
|
2019-05-01 04:54:00 +08:00
|
|
|
/* Number of sources for initializing malloc_conf */
|
2020-03-30 01:41:23 +08:00
|
|
|
#define MALLOC_CONF_NSOURCES 5
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2019-05-01 04:54:00 +08:00
|
|
|
static const char *
|
|
|
|
obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
|
|
|
|
if (config_debug) {
|
|
|
|
static unsigned read_source = 0;
|
|
|
|
/*
|
|
|
|
* Each source should only be read once, to minimize # of
|
|
|
|
* syscalls on init.
|
|
|
|
*/
|
|
|
|
assert(read_source++ == which_source);
|
|
|
|
}
|
|
|
|
assert(which_source < MALLOC_CONF_NSOURCES);
|
|
|
|
|
|
|
|
const char *ret;
|
|
|
|
switch (which_source) {
|
|
|
|
case 0:
|
|
|
|
ret = config_malloc_conf;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
if (je_malloc_conf != NULL) {
|
|
|
|
/* Use options that were compiled into the program. */
|
|
|
|
ret = je_malloc_conf;
|
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
ret = NULL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2: {
|
|
|
|
ssize_t linklen = 0;
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifndef _WIN32
|
2019-05-01 04:54:00 +08:00
|
|
|
int saved_errno = errno;
|
|
|
|
const char *linkname =
|
2012-04-22 12:27:46 +08:00
|
|
|
# ifdef JEMALLOC_PREFIX
|
2019-05-01 04:54:00 +08:00
|
|
|
"/etc/"JEMALLOC_PREFIX"malloc.conf"
|
2012-04-22 12:27:46 +08:00
|
|
|
# else
|
2019-05-01 04:54:00 +08:00
|
|
|
"/etc/malloc.conf"
|
2012-04-22 12:27:46 +08:00
|
|
|
# endif
|
2019-05-01 04:54:00 +08:00
|
|
|
;
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2019-05-01 04:54:00 +08:00
|
|
|
/*
|
|
|
|
* Try to use the contents of the "/etc/malloc.conf" symbolic
|
|
|
|
* link's name.
|
|
|
|
*/
|
2018-08-04 03:47:40 +08:00
|
|
|
#ifndef JEMALLOC_READLINKAT
|
2019-05-01 04:54:00 +08:00
|
|
|
linklen = readlink(linkname, buf, PATH_MAX);
|
2018-08-04 03:47:40 +08:00
|
|
|
#else
|
2019-05-01 04:54:00 +08:00
|
|
|
linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX);
|
2018-08-04 03:47:40 +08:00
|
|
|
#endif
|
2019-05-01 04:54:00 +08:00
|
|
|
if (linklen == -1) {
|
|
|
|
/* No configuration specified. */
|
|
|
|
linklen = 0;
|
|
|
|
/* Restore errno. */
|
|
|
|
set_errno(saved_errno);
|
|
|
|
}
|
2013-09-21 01:58:11 +08:00
|
|
|
#endif
|
2019-05-01 04:54:00 +08:00
|
|
|
buf[linklen] = '\0';
|
|
|
|
ret = buf;
|
|
|
|
break;
|
|
|
|
} case 3: {
|
|
|
|
const char *envname =
|
2010-10-24 09:37:06 +08:00
|
|
|
#ifdef JEMALLOC_PREFIX
|
2019-05-01 04:54:00 +08:00
|
|
|
JEMALLOC_CPREFIX"MALLOC_CONF"
|
2010-10-24 09:37:06 +08:00
|
|
|
#else
|
2019-05-01 04:54:00 +08:00
|
|
|
"MALLOC_CONF"
|
2010-10-24 09:37:06 +08:00
|
|
|
#endif
|
2019-05-01 04:54:00 +08:00
|
|
|
;
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2019-05-01 04:54:00 +08:00
|
|
|
if ((ret = jemalloc_secure_getenv(envname)) != NULL) {
|
|
|
|
/*
|
|
|
|
* Do nothing; opts is already initialized to the value
|
|
|
|
* of the MALLOC_CONF environment variable.
|
|
|
|
*/
|
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
ret = NULL;
|
|
|
|
}
|
|
|
|
break;
|
2020-03-30 01:41:23 +08:00
|
|
|
} case 4: {
|
|
|
|
ret = je_malloc_conf_2_conf_harder;
|
|
|
|
break;
|
2019-05-01 04:54:00 +08:00
|
|
|
} default:
|
|
|
|
not_reached();
|
|
|
|
ret = NULL;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
|
|
|
bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES],
|
|
|
|
char buf[PATH_MAX + 1]) {
|
|
|
|
static const char *opts_explain[MALLOC_CONF_NSOURCES] = {
|
|
|
|
"string specified via --with-malloc-conf",
|
|
|
|
"string pointed to by the global variable malloc_conf",
|
|
|
|
"\"name\" of the file referenced by the symbolic link named "
|
|
|
|
"/etc/malloc.conf",
|
2020-03-30 01:41:23 +08:00
|
|
|
"value of the environment variable MALLOC_CONF",
|
|
|
|
"string pointed to by the global variable "
|
|
|
|
"malloc_conf_2_conf_harder",
|
2019-05-01 04:54:00 +08:00
|
|
|
};
|
|
|
|
unsigned i;
|
|
|
|
const char *opts, *k, *v;
|
|
|
|
size_t klen, vlen;
|
|
|
|
|
|
|
|
for (i = 0; i < MALLOC_CONF_NSOURCES; i++) {
|
|
|
|
/* Get runtime configuration. */
|
|
|
|
if (initial_call) {
|
|
|
|
opts_cache[i] = obtain_malloc_conf(i, buf);
|
|
|
|
}
|
|
|
|
opts = opts_cache[i];
|
|
|
|
if (!initial_call && opt_confirm_conf) {
|
|
|
|
malloc_printf(
|
|
|
|
"<jemalloc>: malloc_conf #%u (%s): \"%s\"\n",
|
|
|
|
i + 1, opts_explain[i], opts != NULL ? opts : "");
|
|
|
|
}
|
|
|
|
if (opts == NULL) {
|
|
|
|
continue;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
|
|
|
|
&vlen)) {
|
2019-05-01 04:54:00 +08:00
|
|
|
|
|
|
|
#define CONF_ERROR(msg, k, klen, v, vlen) \
|
|
|
|
if (!initial_call) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
msg, k, klen, v, vlen); \
|
|
|
|
cur_opt_valid = false; \
|
|
|
|
}
|
|
|
|
#define CONF_CONTINUE { \
|
|
|
|
if (!initial_call && opt_confirm_conf \
|
|
|
|
&& cur_opt_valid) { \
|
2019-07-26 05:16:56 +08:00
|
|
|
malloc_printf("<jemalloc>: -- " \
|
|
|
|
"Set conf value: %.*s:%.*s" \
|
|
|
|
"\n", (int)klen, k, \
|
|
|
|
(int)vlen, v); \
|
2019-05-01 04:54:00 +08:00
|
|
|
} \
|
|
|
|
continue; \
|
|
|
|
}
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CONF_MATCH(n) \
|
2014-04-16 07:35:08 +08:00
|
|
|
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CONF_MATCH_VALUE(n) \
|
2014-12-09 05:12:41 +08:00
|
|
|
(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
|
2017-04-25 08:28:55 +08:00
|
|
|
#define CONF_HANDLE_BOOL(o, n) \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (CONF_MATCH(n)) { \
|
2017-01-16 08:56:30 +08:00
|
|
|
if (CONF_MATCH_VALUE("true")) { \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = true; \
|
2017-01-16 08:56:30 +08:00
|
|
|
} else if (CONF_MATCH_VALUE("false")) { \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = false; \
|
2017-01-16 08:56:30 +08:00
|
|
|
} else { \
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR("Invalid conf value",\
|
2010-10-24 09:37:06 +08:00
|
|
|
k, klen, v, vlen); \
|
|
|
|
} \
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE; \
|
2012-12-24 00:51:48 +08:00
|
|
|
}
|
2018-05-03 17:40:53 +08:00
|
|
|
/*
|
|
|
|
* One of the CONF_MIN macros below expands, in one of the use points,
|
|
|
|
* to "unsigned integer < 0", which is always false, triggering the
|
|
|
|
* GCC -Wtype-limits warning, which we disable here and re-enable below.
|
|
|
|
*/
|
|
|
|
JEMALLOC_DIAGNOSTIC_PUSH
|
|
|
|
JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
|
|
|
|
|
2019-05-03 07:22:10 +08:00
|
|
|
#define CONF_DONT_CHECK_MIN(um, min) false
|
|
|
|
#define CONF_CHECK_MIN(um, min) ((um) < (min))
|
|
|
|
#define CONF_DONT_CHECK_MAX(um, max) false
|
|
|
|
#define CONF_CHECK_MAX(um, max) ((um) > (max))
|
2020-01-14 14:29:17 +08:00
|
|
|
|
2020-10-20 13:48:26 +08:00
|
|
|
#define CONF_VALUE_READ(max_t, result) \
|
|
|
|
char *end; \
|
|
|
|
set_errno(0); \
|
|
|
|
result = (max_t)malloc_strtoumax(v, &end, 0);
|
|
|
|
#define CONF_VALUE_READ_FAIL() \
|
|
|
|
(get_errno() != 0 || (uintptr_t)end - (uintptr_t)v != vlen)
|
|
|
|
|
2020-01-14 14:29:17 +08:00
|
|
|
#define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (CONF_MATCH(n)) { \
|
2020-01-14 14:29:17 +08:00
|
|
|
max_t mv; \
|
2020-10-20 13:48:26 +08:00
|
|
|
CONF_VALUE_READ(max_t, mv) \
|
|
|
|
if (CONF_VALUE_READ_FAIL()) { \
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR("Invalid conf value",\
|
2010-10-24 09:37:06 +08:00
|
|
|
k, klen, v, vlen); \
|
2012-12-24 00:51:48 +08:00
|
|
|
} else if (clip) { \
|
2020-01-14 14:29:17 +08:00
|
|
|
if (check_min(mv, (t)(min))) { \
|
2016-02-25 03:03:40 +08:00
|
|
|
o = (t)(min); \
|
2017-01-16 08:56:30 +08:00
|
|
|
} else if ( \
|
2020-01-14 14:29:17 +08:00
|
|
|
check_max(mv, (t)(max))) { \
|
2016-02-25 03:03:40 +08:00
|
|
|
o = (t)(max); \
|
2017-01-16 08:56:30 +08:00
|
|
|
} else { \
|
2020-01-14 14:29:17 +08:00
|
|
|
o = (t)mv; \
|
2017-01-16 08:56:30 +08:00
|
|
|
} \
|
2012-12-24 00:51:48 +08:00
|
|
|
} else { \
|
2020-01-14 14:29:17 +08:00
|
|
|
if (check_min(mv, (t)(min)) || \
|
|
|
|
check_max(mv, (t)(max))) { \
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR( \
|
2012-12-24 00:51:48 +08:00
|
|
|
"Out-of-range " \
|
|
|
|
"conf value", \
|
|
|
|
k, klen, v, vlen); \
|
2017-01-16 08:56:30 +08:00
|
|
|
} else { \
|
2020-01-14 14:29:17 +08:00
|
|
|
o = (t)mv; \
|
2017-01-16 08:56:30 +08:00
|
|
|
} \
|
2012-12-24 00:51:48 +08:00
|
|
|
} \
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE; \
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2020-01-14 14:29:17 +08:00
|
|
|
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
|
|
|
|
CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min, \
|
|
|
|
check_max, clip)
|
|
|
|
#define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip)\
|
|
|
|
CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min, \
|
|
|
|
check_max, clip)
|
|
|
|
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
|
2016-11-17 10:28:38 +08:00
|
|
|
clip) \
|
|
|
|
CONF_HANDLE_T_U(unsigned, o, n, min, max, \
|
|
|
|
check_min, check_max, clip)
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
|
2016-11-17 10:28:38 +08:00
|
|
|
CONF_HANDLE_T_U(size_t, o, n, min, max, \
|
|
|
|
check_min, check_max, clip)
|
2020-01-14 14:29:17 +08:00
|
|
|
#define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip) \
|
|
|
|
CONF_HANDLE_T_SIGNED(int64_t, o, n, min, max, \
|
|
|
|
check_min, check_max, clip)
|
2021-06-15 05:53:23 +08:00
|
|
|
#define CONF_HANDLE_UINT64_T(o, n, min, max, check_min, check_max, clip)\
|
|
|
|
CONF_HANDLE_T_U(uint64_t, o, n, min, max, \
|
|
|
|
check_min, check_max, clip)
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
|
2020-01-14 14:29:17 +08:00
|
|
|
CONF_HANDLE_T_SIGNED(ssize_t, o, n, min, max, \
|
|
|
|
CONF_CHECK_MIN, CONF_CHECK_MAX, false)
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CONF_HANDLE_CHAR_P(o, n, d) \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (CONF_MATCH(n)) { \
|
2010-10-24 09:37:06 +08:00
|
|
|
size_t cpylen = (vlen <= \
|
2012-03-07 06:57:45 +08:00
|
|
|
sizeof(o)-1) ? vlen : \
|
|
|
|
sizeof(o)-1; \
|
|
|
|
strncpy(o, v, cpylen); \
|
|
|
|
o[cpylen] = '\0'; \
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE; \
|
|
|
|
}
|
|
|
|
|
|
|
|
bool cur_opt_valid = true;
|
|
|
|
|
|
|
|
CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf")
|
|
|
|
if (initial_call) {
|
|
|
|
continue;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
|
|
|
|
2017-04-25 08:28:55 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_abort, "abort")
|
2017-05-26 06:30:11 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
|
Add runtime detection for MADV_DONTNEED zeroes pages (mostly for qemu)
qemu does not support this, yet [1], and you can get very tricky assert
if you will run program with jemalloc in use under qemu:
<jemalloc>: ../contrib/jemalloc/src/extent.c:1195: Failed assertion: "p[i] == 0"
[1]: https://patchwork.kernel.org/patch/10576637/
Here is a simple example that shows the problem [2]:
// Gist to check possible issues with MADV_DONTNEED
// For example it does not supported by qemu user
// There is a patch for this [1], but it hasn't been applied.
// [1]: https://lists.gnu.org/archive/html/qemu-devel/2018-08/msg05422.html
#include <sys/mman.h>
#include <stdio.h>
#include <stddef.h>
#include <assert.h>
#include <string.h>
int main(int argc, char **argv)
{
void *addr = mmap(NULL, 1<<16, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
return 1;
}
memset(addr, 'A', 1<<16);
if (!madvise(addr, 1<<16, MADV_DONTNEED)) {
puts("MADV_DONTNEED does not return error. Check memory.");
for (int i = 0; i < 1<<16; ++i) {
assert(((unsigned char *)addr)[i] == 0);
}
} else {
perror("madvise");
}
if (munmap(addr, 1<<16)) {
perror("munmap");
return 1;
}
return 0;
}
### unpatched qemu
$ qemu-x86_64-static /tmp/test-MADV_DONTNEED
MADV_DONTNEED does not return error. Check memory.
test-MADV_DONTNEED: /tmp/test-MADV_DONTNEED.c:19: main: Assertion `((unsigned char *)addr)[i] == 0' failed.
qemu: uncaught target signal 6 (Aborted) - core dumped
Aborted (core dumped)
### patched qemu (by returning ENOSYS error)
$ qemu-x86_64 /tmp/test-MADV_DONTNEED
madvise: Success
### patch for qemu to return ENOSYS
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 897d20c076..5540792e0e 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -11775,7 +11775,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
turns private file-backed mappings into anonymous mappings.
This will break MADV_DONTNEED.
This is a hint, so ignoring and returning success is ok. */
- return 0;
+ return ENOSYS;
#endif
#ifdef TARGET_NR_fcntl64
case TARGET_NR_fcntl64:
[2]: https://gist.github.com/azat/12ba2c825b710653ece34dba7f926ece
v2:
- review fixes
- add opt_dont_trust_madvise
v3:
- review fixes
- rename opt_dont_trust_madvise to opt_trust_madvise
2020-12-19 03:23:35 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise")
|
2017-08-25 05:29:28 +08:00
|
|
|
if (strncmp("metadata_thp", k, klen) == 0) {
|
|
|
|
int i;
|
|
|
|
bool match = false;
|
|
|
|
for (i = 0; i < metadata_thp_mode_limit; i++) {
|
|
|
|
if (strncmp(metadata_thp_mode_names[i],
|
|
|
|
v, vlen) == 0) {
|
|
|
|
opt_metadata_thp = i;
|
|
|
|
match = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!match) {
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR("Invalid conf value",
|
2017-08-25 05:29:28 +08:00
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE;
|
2017-08-25 05:29:28 +08:00
|
|
|
}
|
2017-04-27 07:26:12 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_retain, "retain")
|
2012-10-12 04:53:15 +08:00
|
|
|
if (strncmp("dss", k, klen) == 0) {
|
|
|
|
int i;
|
|
|
|
bool match = false;
|
|
|
|
for (i = 0; i < dss_prec_limit; i++) {
|
|
|
|
if (strncmp(dss_prec_names[i], v, vlen)
|
|
|
|
== 0) {
|
2016-10-14 03:18:38 +08:00
|
|
|
if (extent_dss_prec_set(i)) {
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR(
|
2012-10-12 04:53:15 +08:00
|
|
|
"Error setting dss",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
} else {
|
|
|
|
opt_dss =
|
|
|
|
dss_prec_names[i];
|
|
|
|
match = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!match) {
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR("Invalid conf value",
|
2012-10-12 04:53:15 +08:00
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE;
|
2012-10-12 04:53:15 +08:00
|
|
|
}
|
2020-05-14 03:20:30 +08:00
|
|
|
if (CONF_MATCH("narenas")) {
|
|
|
|
if (CONF_MATCH_VALUE("default")) {
|
|
|
|
opt_narenas = 0;
|
|
|
|
CONF_CONTINUE;
|
|
|
|
} else {
|
|
|
|
CONF_HANDLE_UNSIGNED(opt_narenas,
|
|
|
|
"narenas", 1, UINT_MAX,
|
|
|
|
CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
|
|
|
|
/* clip */ false)
|
|
|
|
}
|
|
|
|
}
|
2020-08-13 03:07:42 +08:00
|
|
|
if (CONF_MATCH("narenas_ratio")) {
|
2020-12-02 05:13:55 +08:00
|
|
|
char *end;
|
|
|
|
bool err = fxp_parse(&opt_narenas_ratio, v,
|
|
|
|
&end);
|
|
|
|
if (err || (size_t)(end - v) != vlen) {
|
|
|
|
CONF_ERROR("Invalid conf value",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
|
|
|
CONF_CONTINUE;
|
2020-08-13 03:07:42 +08:00
|
|
|
}
|
2018-11-21 05:51:32 +08:00
|
|
|
if (CONF_MATCH("bin_shards")) {
|
|
|
|
const char *bin_shards_segment_cur = v;
|
|
|
|
size_t vlen_left = vlen;
|
|
|
|
do {
|
|
|
|
size_t size_start;
|
|
|
|
size_t size_end;
|
|
|
|
size_t nshards;
|
|
|
|
bool err = malloc_conf_multi_sizes_next(
|
|
|
|
&bin_shards_segment_cur, &vlen_left,
|
|
|
|
&size_start, &size_end, &nshards);
|
|
|
|
if (err || bin_update_shard_size(
|
|
|
|
bin_shard_sizes, size_start,
|
|
|
|
size_end, nshards)) {
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR(
|
2018-11-21 05:51:32 +08:00
|
|
|
"Invalid settings for "
|
|
|
|
"bin_shards", k, klen, v,
|
|
|
|
vlen);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (vlen_left > 0);
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE;
|
2018-11-21 05:51:32 +08:00
|
|
|
}
|
2021-08-05 03:53:39 +08:00
|
|
|
CONF_HANDLE_INT64_T(opt_mutex_max_spin,
|
|
|
|
"mutex_max_spin", -1, INT64_MAX, CONF_CHECK_MIN,
|
|
|
|
CONF_DONT_CHECK_MAX, false);
|
2017-05-18 01:47:00 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
|
|
|
|
"dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
|
|
|
|
QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
|
|
|
|
SSIZE_MAX);
|
|
|
|
CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
|
|
|
|
"muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
|
|
|
|
QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
|
|
|
|
SSIZE_MAX);
|
2017-04-25 08:28:55 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
|
2017-05-28 06:35:36 +08:00
|
|
|
if (CONF_MATCH("stats_print_opts")) {
|
2020-01-14 14:29:17 +08:00
|
|
|
init_opt_stats_opts(v, vlen,
|
|
|
|
opt_stats_print_opts);
|
|
|
|
CONF_CONTINUE;
|
|
|
|
}
|
|
|
|
CONF_HANDLE_INT64_T(opt_stats_interval,
|
|
|
|
"stats_interval", -1, INT64_MAX,
|
|
|
|
CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
|
|
|
|
if (CONF_MATCH("stats_interval_opts")) {
|
|
|
|
init_opt_stats_opts(v, vlen,
|
|
|
|
opt_stats_interval_opts);
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE;
|
2017-05-28 06:35:36 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_fill) {
|
2014-12-09 05:12:41 +08:00
|
|
|
if (CONF_MATCH("junk")) {
|
|
|
|
if (CONF_MATCH_VALUE("true")) {
|
|
|
|
opt_junk = "true";
|
|
|
|
opt_junk_alloc = opt_junk_free =
|
|
|
|
true;
|
|
|
|
} else if (CONF_MATCH_VALUE("false")) {
|
|
|
|
opt_junk = "false";
|
|
|
|
opt_junk_alloc = opt_junk_free =
|
|
|
|
false;
|
|
|
|
} else if (CONF_MATCH_VALUE("alloc")) {
|
|
|
|
opt_junk = "alloc";
|
|
|
|
opt_junk_alloc = true;
|
|
|
|
opt_junk_free = false;
|
|
|
|
} else if (CONF_MATCH_VALUE("free")) {
|
|
|
|
opt_junk = "free";
|
|
|
|
opt_junk_alloc = false;
|
|
|
|
opt_junk_free = true;
|
|
|
|
} else {
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR(
|
|
|
|
"Invalid conf value",
|
|
|
|
k, klen, v, vlen);
|
2014-12-09 05:12:41 +08:00
|
|
|
}
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE;
|
2014-12-09 05:12:41 +08:00
|
|
|
}
|
2017-04-25 08:28:55 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_zero, "zero")
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
if (config_utrace) {
|
2017-04-25 08:28:55 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_utrace, "utrace")
|
2012-04-06 04:36:17 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc) {
|
2017-04-25 08:28:55 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2021-06-22 04:40:30 +08:00
|
|
|
if (config_enable_cxx) {
|
|
|
|
CONF_HANDLE_BOOL(
|
|
|
|
opt_experimental_infallible_new,
|
|
|
|
"experimental_infallible_new")
|
|
|
|
}
|
|
|
|
|
2017-04-25 08:28:55 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_tcache, "tcache")
|
2020-10-20 13:48:26 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_tcache_max, "tcache_max",
|
|
|
|
0, TCACHE_MAXCLASS_LIMIT, CONF_DONT_CHECK_MIN,
|
|
|
|
CONF_CHECK_MAX, /* clip */ true)
|
|
|
|
if (CONF_MATCH("lg_tcache_max")) {
|
|
|
|
size_t m;
|
|
|
|
CONF_VALUE_READ(size_t, m)
|
|
|
|
if (CONF_VALUE_READ_FAIL()) {
|
|
|
|
CONF_ERROR("Invalid conf value",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
} else {
|
|
|
|
/* clip if necessary */
|
|
|
|
if (m > TCACHE_LG_MAXCLASS_LIMIT) {
|
|
|
|
m = TCACHE_LG_MAXCLASS_LIMIT;
|
|
|
|
}
|
|
|
|
opt_tcache_max = (size_t)1 << m;
|
|
|
|
}
|
|
|
|
CONF_CONTINUE;
|
|
|
|
}
|
2020-05-12 03:08:19 +08:00
|
|
|
/*
|
|
|
|
* Anyone trying to set a value outside -16 to 16 is
|
|
|
|
* deeply confused.
|
|
|
|
*/
|
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_tcache_nslots_mul,
|
|
|
|
"lg_tcache_nslots_mul", -16, 16)
|
2020-05-12 06:03:06 +08:00
|
|
|
/* Ditto with values past 2048. */
|
|
|
|
CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_min,
|
|
|
|
"tcache_nslots_small_min", 1, 2048,
|
|
|
|
CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
|
|
|
|
CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_max,
|
|
|
|
"tcache_nslots_small_max", 1, 2048,
|
|
|
|
CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
|
|
|
|
CONF_HANDLE_UNSIGNED(opt_tcache_nslots_large,
|
|
|
|
"tcache_nslots_large", 1, 2048,
|
|
|
|
CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
|
2020-05-12 06:53:30 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_tcache_gc_incr_bytes,
|
|
|
|
"tcache_gc_incr_bytes", 1024, SIZE_T_MAX,
|
|
|
|
CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
|
|
|
|
/* clip */ true)
|
2020-05-12 07:24:17 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_tcache_gc_delay_bytes,
|
|
|
|
"tcache_gc_delay_bytes", 0, SIZE_T_MAX,
|
|
|
|
CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
|
|
|
|
/* clip */ false)
|
2020-05-14 06:32:18 +08:00
|
|
|
CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_small_div,
|
|
|
|
"lg_tcache_flush_small_div", 1, 16,
|
|
|
|
CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
|
|
|
|
CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_large_div,
|
|
|
|
"lg_tcache_flush_large_div", 1, 16,
|
|
|
|
CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
|
2018-05-22 04:33:48 +08:00
|
|
|
|
2019-01-17 04:25:24 +08:00
|
|
|
/*
|
2019-01-25 08:15:04 +08:00
|
|
|
* The runtime option of oversize_threshold remains
|
2019-01-17 04:25:24 +08:00
|
|
|
* undocumented. It may be tweaked in the next major
|
|
|
|
* release (6.0). The default value 8M is rather
|
|
|
|
* conservative / safe. Tuning it further down may
|
|
|
|
* improve fragmentation a bit more, but may also cause
|
|
|
|
* contention on the huge arena.
|
|
|
|
*/
|
2019-01-25 08:15:04 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_oversize_threshold,
|
2019-05-03 07:22:10 +08:00
|
|
|
"oversize_threshold", 0, SC_LARGE_MAXCLASS,
|
|
|
|
CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false)
|
2017-11-10 05:51:39 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
|
|
|
|
"lg_extent_max_active_fit", 0,
|
2019-05-03 07:22:10 +08:00
|
|
|
(sizeof(size_t) << 3), CONF_DONT_CHECK_MIN,
|
|
|
|
CONF_CHECK_MAX, false)
|
2018-05-22 04:33:48 +08:00
|
|
|
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
if (strncmp("percpu_arena", k, klen) == 0) {
|
|
|
|
bool match = false;
|
2018-02-17 06:19:19 +08:00
|
|
|
for (int i = percpu_arena_mode_names_base; i <
|
2017-06-01 07:45:14 +08:00
|
|
|
percpu_arena_mode_names_limit; i++) {
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
if (strncmp(percpu_arena_mode_names[i],
|
2017-06-01 07:45:14 +08:00
|
|
|
v, vlen) == 0) {
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
if (!have_percpu_arena) {
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR(
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
"No getcpu support",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
2017-06-01 07:45:14 +08:00
|
|
|
opt_percpu_arena = i;
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
match = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!match) {
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR("Invalid conf value",
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE;
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
}
|
2017-03-18 03:42:33 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_background_thread,
|
|
|
|
"background_thread");
|
2018-03-30 03:58:13 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_max_background_threads,
|
|
|
|
"max_background_threads", 1,
|
2019-05-03 07:22:10 +08:00
|
|
|
opt_max_background_threads,
|
|
|
|
CONF_CHECK_MIN, CONF_CHECK_MAX,
|
2018-03-30 03:58:13 +08:00
|
|
|
true);
|
2020-08-15 04:36:41 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_hpa, "hpa")
|
2020-12-09 08:33:39 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_hpa_opts.slab_max_alloc,
|
|
|
|
"hpa_slab_max_alloc", PAGE, HUGEPAGE,
|
2020-10-17 04:14:59 +08:00
|
|
|
CONF_CHECK_MIN, CONF_CHECK_MAX, true);
|
|
|
|
|
2020-12-10 05:52:29 +08:00
|
|
|
/*
|
|
|
|
* Accept either a ratio-based or an exact hugification
|
|
|
|
* threshold.
|
|
|
|
*/
|
|
|
|
CONF_HANDLE_SIZE_T(opt_hpa_opts.hugification_threshold,
|
|
|
|
"hpa_hugification_threshold", PAGE, HUGEPAGE,
|
|
|
|
CONF_CHECK_MIN, CONF_CHECK_MAX, true);
|
|
|
|
if (CONF_MATCH("hpa_hugification_threshold_ratio")) {
|
|
|
|
fxp_t ratio;
|
|
|
|
char *end;
|
|
|
|
bool err = fxp_parse(&ratio, v,
|
|
|
|
&end);
|
|
|
|
if (err || (size_t)(end - v) != vlen
|
|
|
|
|| ratio > FXP_INIT_INT(1)) {
|
|
|
|
CONF_ERROR("Invalid conf value",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
} else {
|
|
|
|
opt_hpa_opts.hugification_threshold =
|
|
|
|
fxp_mul_frac(HUGEPAGE, ratio);
|
|
|
|
}
|
|
|
|
CONF_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2021-08-19 03:22:43 +08:00
|
|
|
CONF_HANDLE_UINT64_T(
|
2021-06-15 05:53:23 +08:00
|
|
|
opt_hpa_opts.hugify_delay_ms, "hpa_hugify_delay_ms",
|
2021-08-19 03:22:43 +08:00
|
|
|
0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
|
|
|
|
false);
|
|
|
|
|
|
|
|
CONF_HANDLE_UINT64_T(
|
|
|
|
opt_hpa_opts.min_purge_interval_ms,
|
|
|
|
"hpa_min_purge_interval_ms", 0, 0,
|
|
|
|
CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false);
|
2020-12-10 06:42:05 +08:00
|
|
|
|
2020-12-10 07:55:17 +08:00
|
|
|
if (CONF_MATCH("hpa_dirty_mult")) {
|
|
|
|
if (CONF_MATCH_VALUE("-1")) {
|
|
|
|
opt_hpa_opts.dirty_mult = (fxp_t)-1;
|
|
|
|
CONF_CONTINUE;
|
|
|
|
}
|
|
|
|
fxp_t ratio;
|
|
|
|
char *end;
|
|
|
|
bool err = fxp_parse(&ratio, v,
|
|
|
|
&end);
|
|
|
|
if (err || (size_t)(end - v) != vlen) {
|
|
|
|
CONF_ERROR("Invalid conf value",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
} else {
|
|
|
|
opt_hpa_opts.dirty_mult = ratio;
|
|
|
|
}
|
|
|
|
CONF_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2021-01-27 10:35:18 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.nshards,
|
|
|
|
"hpa_sec_nshards", 0, 0, CONF_CHECK_MIN,
|
|
|
|
CONF_DONT_CHECK_MAX, true);
|
|
|
|
CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_alloc,
|
|
|
|
"hpa_sec_max_alloc", PAGE, 0, CONF_CHECK_MIN,
|
|
|
|
CONF_DONT_CHECK_MAX, true);
|
|
|
|
CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_bytes,
|
|
|
|
"hpa_sec_max_bytes", PAGE, 0, CONF_CHECK_MIN,
|
|
|
|
CONF_DONT_CHECK_MAX, true);
|
|
|
|
CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.bytes_after_flush,
|
|
|
|
"hpa_sec_bytes_after_flush", PAGE, 0,
|
|
|
|
CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true);
|
|
|
|
CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.batch_fill_extra,
|
2021-06-04 08:14:43 +08:00
|
|
|
"hpa_sec_batch_fill_extra", 0, HUGEPAGE_PAGES,
|
|
|
|
CONF_CHECK_MIN, CONF_CHECK_MAX, true);
|
2020-09-05 06:22:47 +08:00
|
|
|
|
2018-04-21 12:11:03 +08:00
|
|
|
if (CONF_MATCH("slab_sizes")) {
|
2020-10-06 08:39:01 +08:00
|
|
|
if (CONF_MATCH_VALUE("default")) {
|
|
|
|
sc_data_init(sc_data);
|
|
|
|
CONF_CONTINUE;
|
|
|
|
}
|
2018-04-21 12:11:03 +08:00
|
|
|
bool err;
|
|
|
|
const char *slab_size_segment_cur = v;
|
|
|
|
size_t vlen_left = vlen;
|
|
|
|
do {
|
|
|
|
size_t slab_start;
|
|
|
|
size_t slab_end;
|
|
|
|
size_t pgs;
|
2018-11-21 05:51:32 +08:00
|
|
|
err = malloc_conf_multi_sizes_next(
|
2018-04-21 12:11:03 +08:00
|
|
|
&slab_size_segment_cur,
|
|
|
|
&vlen_left, &slab_start, &slab_end,
|
|
|
|
&pgs);
|
|
|
|
if (!err) {
|
|
|
|
sc_data_update_slab_size(
|
2018-07-20 08:08:10 +08:00
|
|
|
sc_data, slab_start,
|
2018-04-21 12:11:03 +08:00
|
|
|
slab_end, (int)pgs);
|
|
|
|
} else {
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR("Invalid settings "
|
|
|
|
"for slab_sizes",
|
|
|
|
k, klen, v, vlen);
|
2018-04-21 12:11:03 +08:00
|
|
|
}
|
|
|
|
} while (!err && vlen_left > 0);
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE;
|
2018-04-21 12:11:03 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof) {
|
2017-04-25 08:28:55 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof, "prof")
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_CHAR_P(opt_prof_prefix,
|
|
|
|
"prof_prefix", "jeprof")
|
2017-04-25 08:28:55 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
|
2014-10-04 14:25:30 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
|
2017-04-25 08:28:55 +08:00
|
|
|
"prof_thread_active_init")
|
2014-08-19 07:22:13 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
|
2016-11-17 10:28:38 +08:00
|
|
|
"lg_prof_sample", 0, (sizeof(uint64_t) << 3)
|
2019-05-03 07:22:10 +08:00
|
|
|
- 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX,
|
|
|
|
true)
|
2017-04-25 08:28:55 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
|
2012-03-07 06:57:45 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
|
2012-04-21 12:39:14 +08:00
|
|
|
"lg_prof_interval", -1,
|
2012-02-11 12:22:09 +08:00
|
|
|
(sizeof(uint64_t) << 3) - 1)
|
2017-04-25 08:28:55 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
|
|
|
|
CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
|
|
|
|
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
|
2018-07-06 01:56:33 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_log, "prof_log")
|
2019-12-19 05:38:14 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_prof_recent_alloc_max,
|
|
|
|
"prof_recent_alloc_max", -1, SSIZE_MAX)
|
2020-12-19 09:14:59 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_stats, "prof_stats")
|
2020-06-20 06:16:53 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_sys_thread_name,
|
|
|
|
"prof_sys_thread_name")
|
2020-06-02 21:42:44 +08:00
|
|
|
if (CONF_MATCH("prof_time_resolution")) {
|
|
|
|
if (CONF_MATCH_VALUE("default")) {
|
|
|
|
opt_prof_time_res =
|
|
|
|
prof_time_res_default;
|
|
|
|
} else if (CONF_MATCH_VALUE("high")) {
|
|
|
|
if (!config_high_res_timer) {
|
|
|
|
CONF_ERROR(
|
|
|
|
"No high resolution"
|
|
|
|
" timer support",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
} else {
|
|
|
|
opt_prof_time_res =
|
|
|
|
prof_time_res_high;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
CONF_ERROR("Invalid conf value",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
2020-06-18 06:20:51 +08:00
|
|
|
CONF_CONTINUE;
|
2020-06-02 21:42:44 +08:00
|
|
|
}
|
2020-08-04 04:05:34 +08:00
|
|
|
/*
|
|
|
|
* Undocumented. When set to false, don't
|
|
|
|
* correct for an unbiasing bug in jeprof
|
|
|
|
* attribution. This can be handy if you want
|
|
|
|
* to get consistent numbers from your binary
|
|
|
|
* across different jemalloc versions, even if
|
|
|
|
* those numbers are incorrect. The default is
|
|
|
|
* true.
|
|
|
|
*/
|
|
|
|
CONF_HANDLE_BOOL(opt_prof_unbias, "prof_unbias")
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2017-07-20 07:36:46 +08:00
|
|
|
if (config_log) {
|
2017-10-03 08:48:03 +08:00
|
|
|
if (CONF_MATCH("log")) {
|
2017-07-20 07:36:46 +08:00
|
|
|
size_t cpylen = (
|
|
|
|
vlen <= sizeof(log_var_names) ?
|
|
|
|
vlen : sizeof(log_var_names) - 1);
|
|
|
|
strncpy(log_var_names, v, cpylen);
|
|
|
|
log_var_names[cpylen] = '\0';
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE;
|
2017-07-20 07:36:46 +08:00
|
|
|
}
|
|
|
|
}
|
2018-02-17 06:19:19 +08:00
|
|
|
if (CONF_MATCH("thp")) {
|
|
|
|
bool match = false;
|
|
|
|
for (int i = 0; i < thp_mode_names_limit; i++) {
|
|
|
|
if (strncmp(thp_mode_names[i],v, vlen)
|
|
|
|
== 0) {
|
2020-07-04 23:09:27 +08:00
|
|
|
if (!have_madvise_huge && !have_memcntl) {
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR(
|
2018-02-17 06:19:19 +08:00
|
|
|
"No THP support",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
|
|
|
opt_thp = i;
|
|
|
|
match = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!match) {
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR("Invalid conf value",
|
2018-02-17 06:19:19 +08:00
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_CONTINUE;
|
2018-02-17 06:19:19 +08:00
|
|
|
}
|
2019-09-24 08:56:19 +08:00
|
|
|
if (CONF_MATCH("zero_realloc")) {
|
|
|
|
if (CONF_MATCH_VALUE("strict")) {
|
|
|
|
opt_zero_realloc_action
|
|
|
|
= zero_realloc_action_strict;
|
|
|
|
} else if (CONF_MATCH_VALUE("free")) {
|
|
|
|
opt_zero_realloc_action
|
|
|
|
= zero_realloc_action_free;
|
|
|
|
} else if (CONF_MATCH_VALUE("abort")) {
|
|
|
|
opt_zero_realloc_action
|
|
|
|
= zero_realloc_action_abort;
|
|
|
|
} else {
|
|
|
|
CONF_ERROR("Invalid conf value",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
|
|
|
CONF_CONTINUE;
|
|
|
|
}
|
2021-04-27 05:22:25 +08:00
|
|
|
|
|
|
|
CONF_HANDLE_SIZE_T(opt_san_guard_small,
|
|
|
|
"san_guard_small", 0, SIZE_T_MAX,
|
|
|
|
CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
|
|
|
|
CONF_HANDLE_SIZE_T(opt_san_guard_large,
|
|
|
|
"san_guard_large", 0, SIZE_T_MAX,
|
|
|
|
CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
|
|
|
|
|
2019-05-01 04:54:00 +08:00
|
|
|
CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
|
|
|
|
#undef CONF_ERROR
|
|
|
|
#undef CONF_CONTINUE
|
2014-04-16 07:35:08 +08:00
|
|
|
#undef CONF_MATCH
|
2016-11-17 10:28:38 +08:00
|
|
|
#undef CONF_MATCH_VALUE
|
2010-10-24 09:37:06 +08:00
|
|
|
#undef CONF_HANDLE_BOOL
|
2019-05-03 07:22:10 +08:00
|
|
|
#undef CONF_DONT_CHECK_MIN
|
|
|
|
#undef CONF_CHECK_MIN
|
|
|
|
#undef CONF_DONT_CHECK_MAX
|
|
|
|
#undef CONF_CHECK_MAX
|
2020-01-14 14:29:17 +08:00
|
|
|
#undef CONF_HANDLE_T
|
2016-11-17 10:28:38 +08:00
|
|
|
#undef CONF_HANDLE_T_U
|
2020-01-14 14:29:17 +08:00
|
|
|
#undef CONF_HANDLE_T_SIGNED
|
2016-11-17 10:28:38 +08:00
|
|
|
#undef CONF_HANDLE_UNSIGNED
|
2010-10-24 09:37:06 +08:00
|
|
|
#undef CONF_HANDLE_SIZE_T
|
|
|
|
#undef CONF_HANDLE_SSIZE_T
|
|
|
|
#undef CONF_HANDLE_CHAR_P
|
2018-05-03 17:40:53 +08:00
|
|
|
/* Re-enable diagnostic "-Wtype-limits" */
|
|
|
|
JEMALLOC_DIAGNOSTIC_POP
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2018-04-18 05:26:26 +08:00
|
|
|
if (opt_abort_conf && had_conf_error) {
|
|
|
|
malloc_abort_invalid_conf();
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2017-07-20 07:36:46 +08:00
|
|
|
atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
|
|
|
|
2019-05-01 04:54:00 +08:00
|
|
|
static void
|
|
|
|
malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
|
2020-03-30 01:41:23 +08:00
|
|
|
const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL,
|
|
|
|
NULL};
|
2019-05-01 04:54:00 +08:00
|
|
|
char buf[PATH_MAX + 1];
|
|
|
|
|
|
|
|
/* The first call only set the confirm_conf option and opts_cache */
|
|
|
|
malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf);
|
|
|
|
malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef MALLOC_CONF_NSOURCES
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_init_hard_needed(void) {
|
2015-01-21 07:37:51 +08:00
|
|
|
if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
|
|
|
|
malloc_init_recursible)) {
|
2010-10-24 09:37:06 +08:00
|
|
|
/*
|
|
|
|
* Another thread initialized the allocator before this one
|
|
|
|
* acquired init_lock, or this thread is the initializing
|
|
|
|
* thread, and it is recursively allocating.
|
|
|
|
*/
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifdef JEMALLOC_THREADED_INIT
|
2014-10-04 01:16:09 +08:00
|
|
|
if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
|
2010-10-24 09:37:06 +08:00
|
|
|
/* Busy-wait until the initializing thread completes. */
|
2017-02-06 15:57:16 +08:00
|
|
|
spin_t spinner = SPIN_INITIALIZER;
|
2010-10-24 09:37:06 +08:00
|
|
|
do {
|
2016-05-13 12:07:08 +08:00
|
|
|
malloc_mutex_unlock(TSDN_NULL, &init_lock);
|
2016-10-14 05:47:50 +08:00
|
|
|
spin_adaptive(&spinner);
|
2016-05-13 12:07:08 +08:00
|
|
|
malloc_mutex_lock(TSDN_NULL, &init_lock);
|
2015-01-21 07:37:51 +08:00
|
|
|
} while (!malloc_initialized());
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
#endif
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_init_hard_a0_locked() {
|
2015-01-21 07:37:51 +08:00
|
|
|
malloc_initializer = INITIALIZER;
|
2014-09-23 12:09:23 +08:00
|
|
|
|
2018-07-20 08:08:10 +08:00
|
|
|
JEMALLOC_DIAGNOSTIC_PUSH
|
|
|
|
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
|
|
|
sc_data_t sc_data = {0};
|
|
|
|
JEMALLOC_DIAGNOSTIC_POP
|
|
|
|
|
2018-04-21 10:12:45 +08:00
|
|
|
/*
|
|
|
|
* Ordering here is somewhat tricky; we need sc_boot() first, since that
|
|
|
|
* determines what the size classes will be, and then
|
|
|
|
* malloc_conf_init(), since any slab size tweaking will need to be done
|
2019-09-21 07:18:41 +08:00
|
|
|
* before sz_boot and bin_info_boot, which assume that the values they
|
|
|
|
* read out of sc_data_global are final.
|
2018-04-21 10:12:45 +08:00
|
|
|
*/
|
2018-07-20 08:08:10 +08:00
|
|
|
sc_boot(&sc_data);
|
2018-11-21 05:51:32 +08:00
|
|
|
unsigned bin_shard_sizes[SC_NBINS];
|
|
|
|
bin_shard_sizes_boot(bin_shard_sizes);
|
2018-09-13 06:32:16 +08:00
|
|
|
/*
|
|
|
|
* prof_boot0 only initializes opt_prof_prefix. We need to do it before
|
|
|
|
* we parse malloc_conf options, in case malloc_conf parsing overwrites
|
|
|
|
* it.
|
|
|
|
*/
|
|
|
|
if (config_prof) {
|
|
|
|
prof_boot0();
|
|
|
|
}
|
2018-11-21 05:51:32 +08:00
|
|
|
malloc_conf_init(&sc_data, bin_shard_sizes);
|
2021-02-10 14:24:35 +08:00
|
|
|
sz_boot(&sc_data, opt_cache_oblivious);
|
2019-09-21 07:18:41 +08:00
|
|
|
bin_info_boot(&sc_data, bin_shard_sizes);
|
2017-12-15 04:46:39 +08:00
|
|
|
|
2010-01-04 04:10:42 +08:00
|
|
|
if (opt_stats_print) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Print statistics at exit. */
|
2010-01-30 06:30:41 +08:00
|
|
|
if (atexit(stats_print_atexit) != 0) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in atexit()\n");
|
2017-01-16 08:56:30 +08:00
|
|
|
if (opt_abort) {
|
2010-01-30 06:30:41 +08:00
|
|
|
abort();
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-30 06:30:41 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2020-01-14 14:29:17 +08:00
|
|
|
|
|
|
|
if (stats_boot()) {
|
|
|
|
return true;
|
|
|
|
}
|
2017-04-17 07:23:32 +08:00
|
|
|
if (pages_boot()) {
|
|
|
|
return true;
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (base_boot(TSDN_NULL)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2020-02-18 04:24:09 +08:00
|
|
|
/* emap_global is static, hence zeroed. */
|
2020-03-15 01:49:34 +08:00
|
|
|
if (emap_init(&arena_emap_global, b0get(), /* zeroed */ true)) {
|
2020-01-28 05:55:46 +08:00
|
|
|
return true;
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (extent_boot()) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (ctl_boot()) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (config_prof) {
|
2012-02-11 12:22:09 +08:00
|
|
|
prof_boot1();
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2021-05-08 04:54:26 +08:00
|
|
|
if (opt_hpa && !hpa_supported()) {
|
|
|
|
malloc_printf("<jemalloc>: HPA not supported in the current "
|
|
|
|
"configuration; %s.",
|
|
|
|
opt_abort_conf ? "aborting" : "disabling");
|
|
|
|
if (opt_abort_conf) {
|
|
|
|
malloc_abort_invalid_conf();
|
|
|
|
} else {
|
|
|
|
opt_hpa = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (arena_boot(&sc_data, b0get(), opt_hpa)) {
|
|
|
|
return true;
|
|
|
|
}
|
2020-02-18 06:09:29 +08:00
|
|
|
if (tcache_boot(TSDN_NULL, b0get())) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-16 06:38:15 +08:00
|
|
|
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
|
|
|
|
malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2018-04-10 10:11:46 +08:00
|
|
|
hook_boot();
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Create enough scaffolding to allow recursive allocation in
|
|
|
|
* malloc_ncpus().
|
|
|
|
*/
|
2016-02-25 15:58:10 +08:00
|
|
|
narenas_auto = 1;
|
2018-06-02 06:06:36 +08:00
|
|
|
manual_arena_base = narenas_auto + 1;
|
2012-10-12 04:53:15 +08:00
|
|
|
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Initialize one arena here. The rest are lazily created in
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* arena_choose_hard().
|
2009-06-24 10:01:18 +08:00
|
|
|
*/
|
2021-08-23 20:03:35 +08:00
|
|
|
if (arena_init(TSDN_NULL, 0, &arena_config_default) == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
a0 = arena_get(TSDN_NULL, 0, false);
|
2020-08-15 04:36:41 +08:00
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
if (opt_hpa && !hpa_supported()) {
|
|
|
|
malloc_printf("<jemalloc>: HPA not supported in the current "
|
|
|
|
"configuration; %s.",
|
|
|
|
opt_abort_conf ? "aborting" : "disabling");
|
2020-08-15 04:36:41 +08:00
|
|
|
if (opt_abort_conf) {
|
2020-11-10 05:49:30 +08:00
|
|
|
malloc_abort_invalid_conf();
|
2020-08-15 04:36:41 +08:00
|
|
|
} else {
|
|
|
|
opt_hpa = false;
|
|
|
|
}
|
|
|
|
} else if (opt_hpa) {
|
2021-06-05 07:07:27 +08:00
|
|
|
hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
|
|
|
|
hpa_shard_opts.deferral_allowed = background_thread_enabled();
|
|
|
|
if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard,
|
2021-05-08 04:54:26 +08:00
|
|
|
&hpa_shard_opts, &opt_hpa_sec_opts)) {
|
2020-08-15 04:36:41 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
malloc_init_state = malloc_init_a0_initialized;
|
2016-05-08 03:42:31 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_init_hard_a0(void) {
|
2015-01-21 07:37:51 +08:00
|
|
|
bool ret;
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(TSDN_NULL, &init_lock);
|
|
|
|
ret = malloc_init_hard_a0_locked();
|
|
|
|
malloc_mutex_unlock(TSDN_NULL, &init_lock);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
/* Initialize data structures which may trigger recursive allocation. */
|
2016-01-12 03:05:00 +08:00
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_init_hard_recursible(void) {
|
2015-01-21 07:37:51 +08:00
|
|
|
malloc_init_state = malloc_init_recursible;
|
2016-01-12 03:05:00 +08:00
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
ncpus = malloc_ncpus();
|
2013-10-22 05:11:09 +08:00
|
|
|
|
2016-11-18 07:14:57 +08:00
|
|
|
#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
|
|
|
|
&& !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
|
|
|
|
!defined(__native_client__))
|
2016-01-12 03:05:00 +08:00
|
|
|
/* LinuxThreads' pthread_atfork() allocates. */
|
2013-10-22 05:11:09 +08:00
|
|
|
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
|
|
|
|
jemalloc_postfork_child) != 0) {
|
|
|
|
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
|
2017-01-16 08:56:30 +08:00
|
|
|
if (opt_abort) {
|
2013-10-22 05:11:09 +08:00
|
|
|
abort();
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2013-10-22 05:11:09 +08:00
|
|
|
}
|
|
|
|
#endif
|
2016-01-12 03:05:00 +08:00
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
if (background_thread_boot0()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
static unsigned
|
|
|
|
malloc_narenas_default(void) {
|
|
|
|
assert(ncpus > 0);
|
|
|
|
/*
|
|
|
|
* For SMP systems, create more than one arena per CPU by
|
|
|
|
* default.
|
|
|
|
*/
|
|
|
|
if (ncpus > 1) {
|
2020-12-02 05:13:55 +08:00
|
|
|
fxp_t fxp_ncpus = FXP_INIT_INT(ncpus);
|
|
|
|
fxp_t goal = fxp_mul(fxp_ncpus, opt_narenas_ratio);
|
|
|
|
uint32_t int_goal = fxp_round_nearest(goal);
|
|
|
|
if (int_goal == 0) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return int_goal;
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
} else {
|
|
|
|
return 1;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
}
|
2012-04-03 23:47:07 +08:00
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
static percpu_arena_mode_t
|
|
|
|
percpu_arena_as_initialized(percpu_arena_mode_t mode) {
|
|
|
|
assert(!malloc_initialized());
|
|
|
|
assert(mode <= percpu_arena_disabled);
|
|
|
|
|
|
|
|
if (mode != percpu_arena_disabled) {
|
|
|
|
mode += percpu_arena_mode_enabled_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
return mode;
|
|
|
|
}
|
|
|
|
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
static bool
|
|
|
|
malloc_init_narenas(void) {
|
|
|
|
assert(ncpus > 0);
|
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
if (opt_percpu_arena != percpu_arena_disabled) {
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
if (!have_percpu_arena || malloc_getcpu() < 0) {
|
2017-06-01 07:45:14 +08:00
|
|
|
opt_percpu_arena = percpu_arena_disabled;
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
malloc_printf("<jemalloc>: perCPU arena getcpu() not "
|
|
|
|
"available. Setting narenas to %u.\n", opt_narenas ?
|
|
|
|
opt_narenas : malloc_narenas_default());
|
|
|
|
if (opt_abort) {
|
|
|
|
abort();
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2017-05-14 06:20:48 +08:00
|
|
|
if (ncpus >= MALLOCX_ARENA_LIMIT) {
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
malloc_printf("<jemalloc>: narenas w/ percpu"
|
|
|
|
"arena beyond limit (%d)\n", ncpus);
|
|
|
|
if (opt_abort) {
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2017-06-01 07:45:14 +08:00
|
|
|
/* NB: opt_percpu_arena isn't fully initialized yet. */
|
|
|
|
if (percpu_arena_as_initialized(opt_percpu_arena) ==
|
|
|
|
per_phycpu_arena && ncpus % 2 != 0) {
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
malloc_printf("<jemalloc>: invalid "
|
|
|
|
"configuration -- per physical CPU arena "
|
|
|
|
"with odd number (%u) of CPUs (no hyper "
|
|
|
|
"threading?).\n", ncpus);
|
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
2017-06-01 07:45:14 +08:00
|
|
|
unsigned n = percpu_arena_ind_limit(
|
|
|
|
percpu_arena_as_initialized(opt_percpu_arena));
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
if (opt_narenas < n) {
|
|
|
|
/*
|
|
|
|
* If narenas is specified with percpu_arena
|
|
|
|
* enabled, actual narenas is set as the greater
|
|
|
|
* of the two. percpu_arena_choose will be free
|
|
|
|
* to use any of the arenas based on CPU
|
|
|
|
* id. This is conservative (at a small cost)
|
|
|
|
* but ensures correctness.
|
|
|
|
*
|
|
|
|
* If for some reason the ncpus determined at
|
|
|
|
* boot is not the actual number (e.g. because
|
|
|
|
* of affinity setting from numactl), reserving
|
|
|
|
* narenas this way provides a workaround for
|
|
|
|
* percpu_arena.
|
|
|
|
*/
|
|
|
|
opt_narenas = n;
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
if (opt_narenas == 0) {
|
|
|
|
opt_narenas = malloc_narenas_default();
|
|
|
|
}
|
|
|
|
assert(opt_narenas > 0);
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
narenas_auto = opt_narenas;
|
2010-10-24 09:37:06 +08:00
|
|
|
/*
|
2016-02-25 15:58:10 +08:00
|
|
|
* Limit the number of arenas to the indexing range of MALLOCX_ARENA().
|
2010-10-24 09:37:06 +08:00
|
|
|
*/
|
2017-05-14 06:20:48 +08:00
|
|
|
if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
|
|
|
|
narenas_auto = MALLOCX_ARENA_LIMIT - 1;
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
|
2012-10-12 04:53:15 +08:00
|
|
|
narenas_auto);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2016-02-25 15:58:10 +08:00
|
|
|
narenas_total_set(narenas_auto);
|
2018-05-22 04:33:48 +08:00
|
|
|
if (arena_init_huge()) {
|
|
|
|
narenas_total_inc();
|
|
|
|
}
|
2018-06-02 06:06:36 +08:00
|
|
|
manual_arena_base = narenas_total_get();
|
2009-06-23 03:08:42 +08:00
|
|
|
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
static void
|
|
|
|
malloc_init_percpu(void) {
|
|
|
|
opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
|
2017-03-18 03:42:33 +08:00
|
|
|
}
|
|
|
|
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
static bool
|
|
|
|
malloc_init_hard_finish(void) {
|
2017-06-01 07:45:14 +08:00
|
|
|
if (malloc_mutex_boot()) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-06-01 07:45:14 +08:00
|
|
|
}
|
2015-01-21 07:37:51 +08:00
|
|
|
|
|
|
|
malloc_init_state = malloc_init_initialized;
|
2015-10-28 06:12:10 +08:00
|
|
|
malloc_slow_flag_init();
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
2017-06-01 06:21:10 +08:00
|
|
|
static void
|
|
|
|
malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &init_lock);
|
|
|
|
malloc_mutex_unlock(tsdn, &init_lock);
|
|
|
|
if (reentrancy_set) {
|
|
|
|
assert(!tsdn_null(tsdn));
|
|
|
|
tsd_t *tsd = tsdn_tsd(tsdn);
|
|
|
|
assert(tsd_reentrancy_level_get(tsd) > 0);
|
|
|
|
post_reentrancy(tsd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_init_hard(void) {
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd_t *tsd;
|
2015-01-21 07:37:51 +08:00
|
|
|
|
2015-09-03 14:48:48 +08:00
|
|
|
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
|
|
|
|
_init_init_lock();
|
|
|
|
#endif
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(TSDN_NULL, &init_lock);
|
2017-06-01 06:21:10 +08:00
|
|
|
|
|
|
|
#define UNLOCK_RETURN(tsdn, ret, reentrancy) \
|
|
|
|
malloc_init_hard_cleanup(tsdn, reentrancy); \
|
|
|
|
return ret;
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (!malloc_init_hard_needed()) {
|
2017-06-01 06:21:10 +08:00
|
|
|
UNLOCK_RETURN(TSDN_NULL, false, false)
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (malloc_init_state != malloc_init_a0_initialized &&
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_init_hard_a0_locked()) {
|
2017-06-01 06:21:10 +08:00
|
|
|
UNLOCK_RETURN(TSDN_NULL, true, false)
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
2016-01-12 03:05:00 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(TSDN_NULL, &init_lock);
|
|
|
|
/* Recursive allocation relies on functional tsd. */
|
|
|
|
tsd = malloc_tsd_boot0();
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tsd == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (malloc_init_hard_recursible()) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-01-12 03:05:00 +08:00
|
|
|
|
2017-06-01 06:21:10 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
|
|
|
|
/* Set reentrancy level to 1 during init. */
|
2017-06-23 07:18:30 +08:00
|
|
|
pre_reentrancy(tsd, NULL);
|
2017-03-18 03:42:33 +08:00
|
|
|
/* Initialize narenas before prof_boot2 (for allocation). */
|
2020-02-18 06:13:38 +08:00
|
|
|
if (malloc_init_narenas()
|
|
|
|
|| background_thread_boot1(tsd_tsdn(tsd), b0get())) {
|
2017-06-01 06:21:10 +08:00
|
|
|
UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
}
|
2020-02-18 06:09:29 +08:00
|
|
|
if (config_prof && prof_boot2(tsd, b0get())) {
|
2017-06-01 06:21:10 +08:00
|
|
|
UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
malloc_init_percpu();
|
|
|
|
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
if (malloc_init_hard_finish()) {
|
2017-06-01 06:21:10 +08:00
|
|
|
UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
2017-06-01 06:21:10 +08:00
|
|
|
post_reentrancy(tsd);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
|
2017-03-18 03:42:33 +08:00
|
|
|
|
2017-10-05 07:39:33 +08:00
|
|
|
witness_assert_lockless(witness_tsd_tsdn(
|
|
|
|
tsd_witness_tsdp_get_unsafe(tsd)));
|
2017-06-01 06:21:10 +08:00
|
|
|
malloc_tsd_boot1();
|
2017-03-18 03:42:33 +08:00
|
|
|
/* Update TSD after tsd_boot1. */
|
|
|
|
tsd = tsd_fetch();
|
|
|
|
if (opt_background_thread) {
|
|
|
|
assert(have_background_thread);
|
|
|
|
/*
|
|
|
|
* Need to finish init & unlock first before creating background
|
2017-10-05 07:39:33 +08:00
|
|
|
* threads (pthread_create depends on malloc). ctl_init (which
|
|
|
|
* sets isthreaded) needs to be called without holding any lock.
|
2017-03-18 03:42:33 +08:00
|
|
|
*/
|
2017-10-05 07:39:33 +08:00
|
|
|
background_thread_ctl_init(tsd_tsdn(tsd));
|
2019-01-15 06:16:09 +08:00
|
|
|
if (background_thread_create(tsd, 0)) {
|
2017-03-18 03:42:33 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2017-06-01 06:21:10 +08:00
|
|
|
#undef UNLOCK_RETURN
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* End initialization functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
2017-01-19 06:04:24 +08:00
|
|
|
* Begin allocation-path internal functions and data structures.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/*
|
|
|
|
* Settings determined by the documented behavior of the allocation functions.
|
|
|
|
*/
|
|
|
|
typedef struct static_opts_s static_opts_t;
|
|
|
|
struct static_opts_s {
|
2017-02-04 07:33:37 +08:00
|
|
|
/* Whether or not allocation size may overflow. */
|
|
|
|
bool may_overflow;
|
2018-10-09 01:13:02 +08:00
|
|
|
|
2019-07-17 05:35:53 +08:00
|
|
|
/*
|
|
|
|
* Whether or not allocations (with alignment) of size 0 should be
|
|
|
|
* treated as size 1.
|
|
|
|
*/
|
|
|
|
bool bump_empty_aligned_alloc;
|
2017-01-19 06:04:24 +08:00
|
|
|
/*
|
|
|
|
* Whether to assert that allocations are not of size 0 (after any
|
|
|
|
* bumping).
|
|
|
|
*/
|
|
|
|
bool assert_nonempty_alloc;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/*
|
|
|
|
* Whether or not to modify the 'result' argument to malloc in case of
|
|
|
|
* error.
|
|
|
|
*/
|
|
|
|
bool null_out_result_on_error;
|
|
|
|
/* Whether to set errno when we encounter an error condition. */
|
|
|
|
bool set_errno_on_error;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/*
|
|
|
|
* The minimum valid alignment for functions requesting aligned storage.
|
|
|
|
*/
|
|
|
|
size_t min_alignment;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/* The error string to use if we oom. */
|
|
|
|
const char *oom_string;
|
|
|
|
/* The error string to use if the passed-in alignment is invalid. */
|
|
|
|
const char *invalid_alignment_string;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/*
|
|
|
|
* False if we're configured to skip some time-consuming operations.
|
|
|
|
*
|
|
|
|
* This isn't really a malloc "behavior", but it acts as a useful
|
|
|
|
* summary of several other static (or at least, static after program
|
|
|
|
* initialization) options.
|
|
|
|
*/
|
|
|
|
bool slow;
|
2017-11-16 01:26:49 +08:00
|
|
|
/*
|
2018-10-05 19:11:21 +08:00
|
|
|
* Return size.
|
2017-11-16 01:26:49 +08:00
|
|
|
*/
|
|
|
|
bool usize;
|
2017-01-19 06:04:24 +08:00
|
|
|
};
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts_init(static_opts_t *static_opts) {
|
2017-02-04 07:33:37 +08:00
|
|
|
static_opts->may_overflow = false;
|
2019-07-17 05:35:53 +08:00
|
|
|
static_opts->bump_empty_aligned_alloc = false;
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts->assert_nonempty_alloc = false;
|
|
|
|
static_opts->null_out_result_on_error = false;
|
|
|
|
static_opts->set_errno_on_error = false;
|
|
|
|
static_opts->min_alignment = 0;
|
|
|
|
static_opts->oom_string = "";
|
|
|
|
static_opts->invalid_alignment_string = "";
|
|
|
|
static_opts->slow = false;
|
2017-11-16 01:26:49 +08:00
|
|
|
static_opts->usize = false;
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
/*
|
2017-01-19 06:04:24 +08:00
|
|
|
* These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
|
|
|
|
* should have one constant here per magic value there. Note however that the
|
|
|
|
* representations need not be related.
|
2016-05-07 03:16:00 +08:00
|
|
|
*/
|
2017-01-19 06:04:24 +08:00
|
|
|
#define TCACHE_IND_NONE ((unsigned)-1)
|
|
|
|
#define TCACHE_IND_AUTOMATIC ((unsigned)-2)
|
|
|
|
#define ARENA_IND_AUTOMATIC ((unsigned)-1)
|
|
|
|
|
|
|
|
typedef struct dynamic_opts_s dynamic_opts_t;
|
|
|
|
struct dynamic_opts_s {
|
|
|
|
void **result;
|
2017-11-16 01:26:49 +08:00
|
|
|
size_t usize;
|
2017-01-19 06:04:24 +08:00
|
|
|
size_t num_items;
|
|
|
|
size_t item_size;
|
|
|
|
size_t alignment;
|
|
|
|
bool zero;
|
|
|
|
unsigned tcache_ind;
|
|
|
|
unsigned arena_ind;
|
|
|
|
};
|
2014-04-23 09:41:15 +08:00
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-19 06:04:24 +08:00
|
|
|
dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
|
|
|
|
dynamic_opts->result = NULL;
|
2017-11-16 01:26:49 +08:00
|
|
|
dynamic_opts->usize = 0;
|
2017-01-19 06:04:24 +08:00
|
|
|
dynamic_opts->num_items = 0;
|
|
|
|
dynamic_opts->item_size = 0;
|
|
|
|
dynamic_opts->alignment = 0;
|
|
|
|
dynamic_opts->zero = false;
|
|
|
|
dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
|
|
|
|
dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
|
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2020-05-14 02:16:07 +08:00
|
|
|
/*
|
|
|
|
* ind parameter is optional and is only checked and filled if alignment == 0;
|
|
|
|
* return true if result is out of range.
|
|
|
|
*/
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
aligned_usize_get(size_t size, size_t alignment, size_t *usize, szind_t *ind,
|
|
|
|
bool bump_empty_aligned_alloc) {
|
|
|
|
assert(usize != NULL);
|
|
|
|
if (alignment == 0) {
|
|
|
|
if (ind != NULL) {
|
|
|
|
*ind = sz_size2index(size);
|
|
|
|
if (unlikely(*ind >= SC_NSIZES)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
*usize = sz_index2size(*ind);
|
|
|
|
assert(*usize > 0 && *usize <= SC_LARGE_MAXCLASS);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*usize = sz_s2u(size);
|
|
|
|
} else {
|
|
|
|
if (bump_empty_aligned_alloc && unlikely(size == 0)) {
|
|
|
|
size = 1;
|
|
|
|
}
|
|
|
|
*usize = sz_sa2u(size, alignment);
|
|
|
|
}
|
|
|
|
if (unlikely(*usize == 0 || *usize > SC_LARGE_MAXCLASS)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-05-14 02:19:09 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
zero_get(bool guarantee, bool slow) {
|
|
|
|
if (config_fill && slow && unlikely(opt_zero)) {
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return guarantee;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 05:06:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE tcache_t *
|
|
|
|
tcache_get_from_ind(tsd_t *tsd, unsigned tcache_ind, bool slow, bool is_alloc) {
|
2017-01-19 06:04:24 +08:00
|
|
|
tcache_t *tcache;
|
2020-05-14 05:06:43 +08:00
|
|
|
if (tcache_ind == TCACHE_IND_AUTOMATIC) {
|
|
|
|
if (likely(!slow)) {
|
2017-04-12 14:13:45 +08:00
|
|
|
/* Getting tcache ptr unconditionally. */
|
|
|
|
tcache = tsd_tcachep_get(tsd);
|
|
|
|
assert(tcache == tcache_get(tsd));
|
2020-05-14 05:06:43 +08:00
|
|
|
} else if (is_alloc ||
|
|
|
|
likely(tsd_reentrancy_level_get(tsd) == 0)) {
|
2017-04-12 14:13:45 +08:00
|
|
|
tcache = tcache_get(tsd);
|
2020-05-14 05:06:43 +08:00
|
|
|
} else {
|
|
|
|
tcache = NULL;
|
2017-04-12 14:13:45 +08:00
|
|
|
}
|
2017-01-19 06:04:24 +08:00
|
|
|
} else {
|
2020-05-14 05:06:43 +08:00
|
|
|
/*
|
|
|
|
* Should not specify tcache on deallocation path when being
|
|
|
|
* reentrant.
|
|
|
|
*/
|
|
|
|
assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0 ||
|
|
|
|
tsd_state_nocleanup(tsd));
|
|
|
|
if (tcache_ind == TCACHE_IND_NONE) {
|
|
|
|
tcache = NULL;
|
|
|
|
} else {
|
|
|
|
tcache = tcaches_get(tsd, tcache_ind);
|
|
|
|
}
|
2017-01-19 06:04:24 +08:00
|
|
|
}
|
2020-05-14 05:06:43 +08:00
|
|
|
return tcache;
|
|
|
|
}
|
|
|
|
|
2020-05-14 05:49:41 +08:00
|
|
|
/* Return true if a manual arena is specified and arena_get() OOMs. */
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
arena_get_from_ind(tsd_t *tsd, unsigned arena_ind, arena_t **arena_p) {
|
|
|
|
if (arena_ind == ARENA_IND_AUTOMATIC) {
|
|
|
|
/*
|
|
|
|
* In case of automatic arena management, we defer arena
|
|
|
|
* computation until as late as we can, hoping to fill the
|
|
|
|
* allocation out of the tcache.
|
|
|
|
*/
|
|
|
|
*arena_p = NULL;
|
|
|
|
} else {
|
|
|
|
*arena_p = arena_get(tsd_tsdn(tsd), arena_ind, true);
|
|
|
|
if (unlikely(*arena_p == NULL) && arena_ind >= narenas_auto) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-05-14 05:06:43 +08:00
|
|
|
/* ind is ignored if dopts->alignment > 0. */
|
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
|
|
|
imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
|
|
|
size_t size, size_t usize, szind_t ind) {
|
|
|
|
/* Fill in the tcache. */
|
|
|
|
tcache_t *tcache = tcache_get_from_ind(tsd, dopts->tcache_ind,
|
|
|
|
sopts->slow, /* is_alloc */ true);
|
2015-10-28 06:12:10 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/* Fill in the arena. */
|
2020-05-14 05:49:41 +08:00
|
|
|
arena_t *arena;
|
|
|
|
if (arena_get_from_ind(tsd, dopts->arena_ind, &arena)) {
|
|
|
|
return NULL;
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
2014-04-23 09:41:15 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
if (unlikely(dopts->alignment != 0)) {
|
|
|
|
return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
|
|
|
|
dopts->zero, tcache, arena);
|
|
|
|
}
|
2014-04-23 09:41:15 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
|
|
|
|
arena, sopts->slow);
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2017-01-19 06:04:24 +08:00
|
|
|
imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
|
|
|
size_t usize, szind_t ind) {
|
|
|
|
void *ret;
|
2016-05-11 13:21:10 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/*
|
|
|
|
* For small allocations, sampling bumps the usize. If so, we allocate
|
|
|
|
* from the ind_large bucket.
|
|
|
|
*/
|
|
|
|
szind_t ind_large;
|
|
|
|
size_t bumped_usize = usize;
|
|
|
|
|
2020-01-29 09:32:45 +08:00
|
|
|
dopts->alignment = prof_sample_align(dopts->alignment);
|
2018-07-12 07:05:58 +08:00
|
|
|
if (usize <= SC_SMALL_MAXCLASS) {
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(((dopts->alignment == 0) ?
|
2018-07-12 07:05:58 +08:00
|
|
|
sz_s2u(SC_LARGE_MINCLASS) :
|
|
|
|
sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment))
|
|
|
|
== SC_LARGE_MINCLASS);
|
|
|
|
ind_large = sz_size2index(SC_LARGE_MINCLASS);
|
|
|
|
bumped_usize = sz_s2u(SC_LARGE_MINCLASS);
|
2017-01-19 06:04:24 +08:00
|
|
|
ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
|
|
|
|
bumped_usize, ind_large);
|
|
|
|
if (unlikely(ret == NULL)) {
|
|
|
|
return NULL;
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_prof_promote(tsd_tsdn(tsd), ret, usize);
|
2017-01-19 06:04:24 +08:00
|
|
|
} else {
|
|
|
|
ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
2020-01-29 09:32:45 +08:00
|
|
|
assert(prof_sample_aligned(ret));
|
2017-01-19 06:04:24 +08:00
|
|
|
|
|
|
|
return ret;
|
2014-04-23 09:41:15 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/*
|
|
|
|
* Returns true if the allocation will overflow, and false otherwise. Sets
|
|
|
|
* *size to the product either way.
|
|
|
|
*/
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2017-02-04 07:33:37 +08:00
|
|
|
compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
|
|
|
|
size_t *size) {
|
2017-01-19 06:04:24 +08:00
|
|
|
/*
|
2017-02-04 07:33:37 +08:00
|
|
|
* This function is just num_items * item_size, except that we may have
|
|
|
|
* to check for overflow.
|
2017-01-19 06:04:24 +08:00
|
|
|
*/
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2017-02-04 07:33:37 +08:00
|
|
|
if (!may_overflow) {
|
|
|
|
assert(dopts->num_items == 1);
|
|
|
|
*size = dopts->item_size;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/* A size_t with its high-half bits all set to 1. */
|
2017-09-19 05:36:43 +08:00
|
|
|
static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
*size = dopts->item_size * dopts->num_items;
|
|
|
|
|
|
|
|
if (unlikely(*size == 0)) {
|
|
|
|
return (dopts->num_items != 0 && dopts->item_size != 0);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2015-10-28 06:12:10 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/*
|
|
|
|
* We got a non-zero size, but we don't know if we overflowed to get
|
|
|
|
* there. To avoid having to do a divide, we'll be clever and note that
|
|
|
|
* if both A and B can be represented in N/2 bits, then their product
|
|
|
|
* can be represented in N bits (without the possibility of overflow).
|
|
|
|
*/
|
|
|
|
if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (likely(*size / dopts->item_size == dopts->num_items)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE int
|
2017-04-12 14:13:45 +08:00
|
|
|
imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
|
2017-01-19 06:04:24 +08:00
|
|
|
/* Where the actual allocated memory will live. */
|
|
|
|
void *allocation = NULL;
|
|
|
|
/* Filled in by compute_size_with_overflow below. */
|
|
|
|
size_t size = 0;
|
|
|
|
/*
|
2019-08-23 06:56:47 +08:00
|
|
|
* The zero initialization for ind is actually dead store, in that its
|
|
|
|
* value is reset before any branch on its value is taken. Sometimes
|
|
|
|
* though, it's convenient to pass it as arguments before this point.
|
|
|
|
* To avoid undefined behavior then, we initialize it with dummy stores.
|
2017-01-19 06:04:24 +08:00
|
|
|
*/
|
|
|
|
szind_t ind = 0;
|
2019-08-23 06:56:47 +08:00
|
|
|
/* usize will always be properly initialized. */
|
|
|
|
size_t usize;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2017-04-13 07:16:27 +08:00
|
|
|
/* Reentrancy is only checked on slow path. */
|
|
|
|
int8_t reentrancy_level;
|
2017-04-01 10:59:45 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/* Compute the amount of memory the user wants. */
|
2017-02-04 07:33:37 +08:00
|
|
|
if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
|
|
|
|
&size))) {
|
2017-01-19 06:04:24 +08:00
|
|
|
goto label_oom;
|
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
if (unlikely(dopts->alignment < sopts->min_alignment
|
|
|
|
|| (dopts->alignment & (dopts->alignment - 1)) != 0)) {
|
|
|
|
goto label_invalid_alignment;
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/* This is the beginning of the "core" algorithm. */
|
2020-05-14 02:19:09 +08:00
|
|
|
dopts->zero = zero_get(dopts->zero, sopts->slow);
|
2020-05-14 02:16:07 +08:00
|
|
|
if (aligned_usize_get(size, dopts->alignment, &usize, &ind,
|
|
|
|
sopts->bump_empty_aligned_alloc)) {
|
|
|
|
goto label_oom;
|
2015-06-23 09:48:58 +08:00
|
|
|
}
|
2020-05-14 02:16:07 +08:00
|
|
|
dopts->usize = usize;
|
2019-07-17 05:35:53 +08:00
|
|
|
/* Validate the user input. */
|
|
|
|
if (sopts->assert_nonempty_alloc) {
|
|
|
|
assert (size != 0);
|
|
|
|
}
|
2017-01-19 06:04:24 +08:00
|
|
|
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-04-01 10:59:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we need to handle reentrancy, we can do it out of a
|
|
|
|
* known-initialized arena (i.e. arena 0).
|
|
|
|
*/
|
2017-04-13 07:16:27 +08:00
|
|
|
reentrancy_level = tsd_reentrancy_level_get(tsd);
|
|
|
|
if (sopts->slow && unlikely(reentrancy_level > 0)) {
|
2017-04-01 10:59:45 +08:00
|
|
|
/*
|
|
|
|
* We should never specify particular arenas or tcaches from
|
|
|
|
* within our internal allocations.
|
|
|
|
*/
|
2017-06-07 12:44:39 +08:00
|
|
|
assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
|
|
|
|
dopts->tcache_ind == TCACHE_IND_NONE);
|
2017-06-24 01:40:02 +08:00
|
|
|
assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
|
2017-04-01 10:59:45 +08:00
|
|
|
dopts->tcache_ind = TCACHE_IND_NONE;
|
|
|
|
/* We know that arena 0 has already been initialized. */
|
|
|
|
dopts->arena_ind = 0;
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2019-08-23 06:56:47 +08:00
|
|
|
/*
|
|
|
|
* If dopts->alignment > 0, then ind is still 0, but usize was computed
|
|
|
|
* in the previous if statement. Down the positive alignment path,
|
|
|
|
* imalloc_no_sample and imalloc_sample will ignore ind.
|
|
|
|
*/
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/* If profiling is on, get our profiling context. */
|
|
|
|
if (config_prof && opt_prof) {
|
2020-03-10 06:49:15 +08:00
|
|
|
bool prof_active = prof_active_get_unlocked();
|
2020-03-11 05:21:05 +08:00
|
|
|
bool sample_event = te_prof_sample_event_lookahead(tsd, usize);
|
|
|
|
prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active,
|
|
|
|
sample_event);
|
2017-04-12 09:13:10 +08:00
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2017-01-19 06:04:24 +08:00
|
|
|
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
2019-08-23 06:56:47 +08:00
|
|
|
alloc_ctx.slab = (usize <= SC_SMALL_MAXCLASS);
|
2017-01-19 06:04:24 +08:00
|
|
|
allocation = imalloc_no_sample(
|
|
|
|
sopts, dopts, tsd, usize, usize, ind);
|
|
|
|
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
|
|
|
|
allocation = imalloc_sample(
|
|
|
|
sopts, dopts, tsd, usize, ind);
|
2017-04-12 09:13:10 +08:00
|
|
|
alloc_ctx.slab = false;
|
2017-01-19 06:04:24 +08:00
|
|
|
} else {
|
|
|
|
allocation = NULL;
|
2011-03-23 15:37:29 +08:00
|
|
|
}
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
if (unlikely(allocation == NULL)) {
|
2020-03-10 06:49:15 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx);
|
2017-01-19 06:04:24 +08:00
|
|
|
goto label_oom;
|
|
|
|
}
|
2020-01-10 02:20:34 +08:00
|
|
|
prof_malloc(tsd, allocation, size, usize, &alloc_ctx, tctx);
|
2017-01-19 06:04:24 +08:00
|
|
|
} else {
|
2019-08-27 05:41:32 +08:00
|
|
|
assert(!opt_prof);
|
2017-01-26 07:50:59 +08:00
|
|
|
allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
|
2017-01-19 06:04:24 +08:00
|
|
|
ind);
|
|
|
|
if (unlikely(allocation == NULL)) {
|
|
|
|
goto label_oom;
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
/*
|
|
|
|
* Allocation has been done at this point. We still have some
|
|
|
|
* post-allocation work to do though.
|
|
|
|
*/
|
2020-03-10 06:49:15 +08:00
|
|
|
|
|
|
|
thread_alloc_event(tsd, usize);
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
assert(dopts->alignment == 0
|
|
|
|
|| ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
|
2015-06-23 09:48:58 +08:00
|
|
|
|
2019-08-23 06:56:47 +08:00
|
|
|
assert(usize == isalloc(tsd_tsdn(tsd), allocation));
|
2017-01-19 06:04:24 +08:00
|
|
|
|
2020-04-30 00:05:57 +08:00
|
|
|
if (config_fill && sopts->slow && !dopts->zero
|
|
|
|
&& unlikely(opt_junk_alloc)) {
|
|
|
|
junk_alloc_callback(allocation, usize);
|
2020-02-29 03:37:39 +08:00
|
|
|
}
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
if (sopts->slow) {
|
|
|
|
UTRACE(0, size, allocation);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Success! */
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-01-19 06:04:24 +08:00
|
|
|
*dopts->result = allocation;
|
|
|
|
return 0;
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
label_oom:
|
2017-01-19 06:04:24 +08:00
|
|
|
if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
|
|
|
|
malloc_write(sopts->oom_string);
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sopts->slow) {
|
|
|
|
UTRACE(NULL, size, NULL);
|
|
|
|
}
|
|
|
|
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-01-19 06:04:24 +08:00
|
|
|
|
|
|
|
if (sopts->set_errno_on_error) {
|
|
|
|
set_errno(ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sopts->null_out_result_on_error) {
|
|
|
|
*dopts->result = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This label is only jumped to by one goto; we move it out of line
|
|
|
|
* anyways to avoid obscuring the non-error paths, and for symmetry with
|
|
|
|
* the oom case.
|
|
|
|
*/
|
|
|
|
label_invalid_alignment:
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
2017-01-19 06:04:24 +08:00
|
|
|
malloc_write(sopts->invalid_alignment_string);
|
2014-01-13 07:05:44 +08:00
|
|
|
abort();
|
|
|
|
}
|
2017-01-19 06:04:24 +08:00
|
|
|
|
|
|
|
if (sopts->set_errno_on_error) {
|
|
|
|
set_errno(EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sopts->slow) {
|
|
|
|
UTRACE(NULL, size, NULL);
|
|
|
|
}
|
|
|
|
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-01-19 06:04:24 +08:00
|
|
|
|
|
|
|
if (sopts->null_out_result_on_error) {
|
|
|
|
*dopts->result = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-10-04 05:47:31 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) {
|
2017-04-12 14:13:45 +08:00
|
|
|
if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
|
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
|
|
|
malloc_write(sopts->oom_string);
|
|
|
|
abort();
|
|
|
|
}
|
2017-06-09 03:55:59 +08:00
|
|
|
UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
|
2017-04-12 14:13:45 +08:00
|
|
|
set_errno(ENOMEM);
|
|
|
|
*dopts->result = NULL;
|
|
|
|
|
2018-10-04 05:47:31 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns the errno-style error code of the allocation. */
|
|
|
|
JEMALLOC_ALWAYS_INLINE int
|
|
|
|
imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
|
|
|
|
if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
|
2017-04-12 14:13:45 +08:00
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We always need the tsd. Let's grab it right away. */
|
|
|
|
tsd_t *tsd = tsd_fetch();
|
|
|
|
assert(tsd);
|
|
|
|
if (likely(tsd_fast(tsd))) {
|
|
|
|
/* Fast and common path. */
|
|
|
|
tsd_assert_fast(tsd);
|
2017-01-19 06:04:24 +08:00
|
|
|
sopts->slow = false;
|
2017-04-12 14:13:45 +08:00
|
|
|
return imalloc_body(sopts, dopts, tsd);
|
|
|
|
} else {
|
2018-10-04 05:47:31 +08:00
|
|
|
if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
2018-10-19 04:14:04 +08:00
|
|
|
|
2017-04-12 14:13:45 +08:00
|
|
|
sopts->slow = true;
|
|
|
|
return imalloc_body(sopts, dopts, tsd);
|
2017-01-19 06:04:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-19 04:14:04 +08:00
|
|
|
JEMALLOC_NOINLINE
|
2018-10-11 02:54:58 +08:00
|
|
|
void *
|
|
|
|
malloc_default(size_t size) {
|
2017-01-19 06:04:24 +08:00
|
|
|
void *ret;
|
|
|
|
static_opts_t sopts;
|
|
|
|
dynamic_opts_t dopts;
|
|
|
|
|
2020-06-13 11:12:15 +08:00
|
|
|
/*
|
|
|
|
* This variant has logging hook on exit but not on entry. It's callled
|
|
|
|
* only by je_malloc, below, which emits the entry one for us (and, if
|
|
|
|
* it calls us, does so only via tail call).
|
|
|
|
*/
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts_init(&sopts);
|
|
|
|
dynamic_opts_init(&dopts);
|
|
|
|
|
|
|
|
sopts.null_out_result_on_error = true;
|
|
|
|
sopts.set_errno_on_error = true;
|
|
|
|
sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
|
|
|
|
|
|
|
|
dopts.result = &ret;
|
|
|
|
dopts.num_items = 1;
|
|
|
|
dopts.item_size = size;
|
|
|
|
|
|
|
|
imalloc(&sopts, &dopts);
|
2018-04-20 04:14:22 +08:00
|
|
|
/*
|
|
|
|
* Note that this branch gets optimized away -- it immediately follows
|
|
|
|
* the check on tsd_fast that sets sopts.slow.
|
|
|
|
*/
|
|
|
|
if (sopts.slow) {
|
|
|
|
uintptr_t args[3] = {size};
|
|
|
|
hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args);
|
|
|
|
}
|
2017-01-19 06:04:24 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.malloc.exit", "result: %p", ret);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
return ret;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2018-10-11 02:54:58 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
|
|
|
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
|
|
|
|
je_malloc(size_t size) {
|
2021-02-09 00:49:34 +08:00
|
|
|
return imalloc_fastpath(size, &malloc_default);
|
2018-10-11 02:54:58 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
|
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
2017-01-16 08:56:30 +08:00
|
|
|
je_posix_memalign(void **memptr, size_t alignment, size_t size) {
|
2017-01-19 06:04:24 +08:00
|
|
|
int ret;
|
|
|
|
static_opts_t sopts;
|
|
|
|
dynamic_opts_t dopts;
|
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
|
2017-07-20 09:05:28 +08:00
|
|
|
"size: %zu", memptr, alignment, size);
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts_init(&sopts);
|
|
|
|
dynamic_opts_init(&dopts);
|
|
|
|
|
2019-07-17 05:35:53 +08:00
|
|
|
sopts.bump_empty_aligned_alloc = true;
|
2017-01-19 06:04:24 +08:00
|
|
|
sopts.min_alignment = sizeof(void *);
|
|
|
|
sopts.oom_string =
|
|
|
|
"<jemalloc>: Error allocating aligned memory: out of memory\n";
|
|
|
|
sopts.invalid_alignment_string =
|
|
|
|
"<jemalloc>: Error allocating aligned memory: invalid alignment\n";
|
|
|
|
|
|
|
|
dopts.result = memptr;
|
|
|
|
dopts.num_items = 1;
|
|
|
|
dopts.item_size = size;
|
|
|
|
dopts.alignment = alignment;
|
|
|
|
|
|
|
|
ret = imalloc(&sopts, &dopts);
|
2018-04-20 04:14:22 +08:00
|
|
|
if (sopts.slow) {
|
|
|
|
uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment,
|
|
|
|
(uintptr_t)size};
|
|
|
|
hook_invoke_alloc(hook_alloc_posix_memalign, *memptr,
|
|
|
|
(uintptr_t)ret, args);
|
|
|
|
}
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
|
2017-07-20 09:05:28 +08:00
|
|
|
*memptr);
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
return ret;
|
2012-03-14 03:55:21 +08:00
|
|
|
}
|
|
|
|
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
|
2017-01-16 08:56:30 +08:00
|
|
|
je_aligned_alloc(size_t alignment, size_t size) {
|
2012-03-14 03:55:21 +08:00
|
|
|
void *ret;
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts_t sopts;
|
|
|
|
dynamic_opts_t dopts;
|
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
|
2017-07-20 09:05:28 +08:00
|
|
|
alignment, size);
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts_init(&sopts);
|
|
|
|
dynamic_opts_init(&dopts);
|
|
|
|
|
2019-07-17 05:35:53 +08:00
|
|
|
sopts.bump_empty_aligned_alloc = true;
|
2017-01-19 06:04:24 +08:00
|
|
|
sopts.null_out_result_on_error = true;
|
|
|
|
sopts.set_errno_on_error = true;
|
|
|
|
sopts.min_alignment = 1;
|
|
|
|
sopts.oom_string =
|
|
|
|
"<jemalloc>: Error allocating aligned memory: out of memory\n";
|
|
|
|
sopts.invalid_alignment_string =
|
|
|
|
"<jemalloc>: Error allocating aligned memory: invalid alignment\n";
|
|
|
|
|
|
|
|
dopts.result = &ret;
|
|
|
|
dopts.num_items = 1;
|
|
|
|
dopts.item_size = size;
|
|
|
|
dopts.alignment = alignment;
|
|
|
|
|
|
|
|
imalloc(&sopts, &dopts);
|
2018-04-20 04:14:22 +08:00
|
|
|
if (sopts.slow) {
|
|
|
|
uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size};
|
|
|
|
hook_invoke_alloc(hook_alloc_aligned_alloc, ret,
|
|
|
|
(uintptr_t)ret, args);
|
|
|
|
}
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.aligned_alloc.exit", "result: %p", ret);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2011-08-13 04:48:27 +08:00
|
|
|
}
|
|
|
|
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
|
2017-01-16 08:56:30 +08:00
|
|
|
je_calloc(size_t num, size_t size) {
|
2009-06-23 03:08:42 +08:00
|
|
|
void *ret;
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts_t sopts;
|
|
|
|
dynamic_opts_t dopts;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts_init(&sopts);
|
|
|
|
dynamic_opts_init(&dopts);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2017-02-04 07:33:37 +08:00
|
|
|
sopts.may_overflow = true;
|
2017-01-19 06:04:24 +08:00
|
|
|
sopts.null_out_result_on_error = true;
|
|
|
|
sopts.set_errno_on_error = true;
|
|
|
|
sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
dopts.result = &ret;
|
|
|
|
dopts.num_items = num;
|
|
|
|
dopts.item_size = size;
|
|
|
|
dopts.zero = true;
|
|
|
|
|
|
|
|
imalloc(&sopts, &dopts);
|
2018-04-20 04:14:22 +08:00
|
|
|
if (sopts.slow) {
|
|
|
|
uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size};
|
|
|
|
hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args);
|
|
|
|
}
|
2017-01-19 06:04:24 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.calloc.exit", "result: %p", ret);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
return ret;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
2017-04-27 09:37:44 +08:00
|
|
|
if (!slow_path) {
|
|
|
|
tsd_assert_fast(tsd);
|
|
|
|
}
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
|
|
|
if (tsd_reentrancy_level_get(tsd) != 0) {
|
2017-04-13 07:16:27 +08:00
|
|
|
assert(slow_path);
|
2017-04-01 10:59:45 +08:00
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
|
|
|
|
&alloc_ctx);
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(alloc_ctx.szind != SC_NSIZES);
|
2017-04-08 05:12:30 +08:00
|
|
|
|
2019-08-23 06:56:47 +08:00
|
|
|
size_t usize = sz_index2size(alloc_ctx.szind);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2017-04-12 09:13:10 +08:00
|
|
|
prof_free(tsd, ptr, usize, &alloc_ctx);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-10-28 06:12:10 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (likely(!slow_path)) {
|
2017-04-12 09:13:10 +08:00
|
|
|
idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
|
2017-04-08 05:12:30 +08:00
|
|
|
false);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2020-02-29 03:37:39 +08:00
|
|
|
if (config_fill && slow_path && opt_junk_free) {
|
|
|
|
junk_free_callback(ptr, usize);
|
|
|
|
}
|
2017-04-12 09:13:10 +08:00
|
|
|
idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
|
2017-04-08 05:12:30 +08:00
|
|
|
true);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2020-01-29 13:12:06 +08:00
|
|
|
thread_dalloc_event(tsd, usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
2020-08-04 09:23:36 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) {
|
|
|
|
if (config_opt_size_checks) {
|
|
|
|
emap_alloc_ctx_t dbg_ctx;
|
|
|
|
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
|
|
|
|
&dbg_ctx);
|
|
|
|
if (alloc_ctx->szind != dbg_ctx.szind) {
|
|
|
|
safety_check_fail_sized_dealloc(
|
2021-02-06 09:26:45 +08:00
|
|
|
/* current_dealloc */ true, ptr,
|
|
|
|
/* true_size */ sz_size2index(dbg_ctx.szind),
|
|
|
|
/* input_size */ sz_size2index(alloc_ctx->szind));
|
2020-08-04 09:23:36 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (alloc_ctx->slab != dbg_ctx.slab) {
|
|
|
|
safety_check_fail(
|
|
|
|
"Internal heap corruption detected: "
|
|
|
|
"mismatch in slab bit");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-03-17 17:45:12 +08:00
|
|
|
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
2017-04-27 09:37:44 +08:00
|
|
|
if (!slow_path) {
|
|
|
|
tsd_assert_fast(tsd);
|
|
|
|
}
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
|
|
|
if (tsd_reentrancy_level_get(tsd) != 0) {
|
2017-04-13 07:16:27 +08:00
|
|
|
assert(slow_path);
|
2017-04-01 10:59:45 +08:00
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2014-08-29 03:41:48 +08:00
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2014-08-29 03:41:48 +08:00
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2020-01-29 09:32:45 +08:00
|
|
|
if (!config_prof) {
|
2020-02-06 10:58:19 +08:00
|
|
|
alloc_ctx.szind = sz_size2index(usize);
|
|
|
|
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
2020-01-29 09:32:45 +08:00
|
|
|
} else {
|
|
|
|
if (likely(!prof_sample_aligned(ptr))) {
|
|
|
|
/*
|
|
|
|
* When the ptr is not page aligned, it was not sampled.
|
|
|
|
* usize can be trusted to determine szind and slab.
|
|
|
|
*/
|
2020-02-06 10:58:19 +08:00
|
|
|
alloc_ctx.szind = sz_size2index(usize);
|
2021-02-10 14:24:35 +08:00
|
|
|
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
2020-01-29 09:32:45 +08:00
|
|
|
} else if (opt_prof) {
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global,
|
2020-02-06 10:58:19 +08:00
|
|
|
ptr, &alloc_ctx);
|
|
|
|
|
|
|
|
if (config_opt_safety_checks) {
|
|
|
|
/* Small alloc may have !slab (sampled). */
|
2021-02-06 09:26:45 +08:00
|
|
|
if (unlikely(alloc_ctx.szind !=
|
|
|
|
sz_size2index(usize))) {
|
|
|
|
safety_check_fail_sized_dealloc(
|
|
|
|
/* current_dealloc */ true, ptr,
|
|
|
|
/* true_size */ sz_index2size(
|
|
|
|
alloc_ctx.szind),
|
|
|
|
/* input_size */ usize);
|
2020-02-06 10:58:19 +08:00
|
|
|
}
|
2020-01-31 06:35:54 +08:00
|
|
|
}
|
2020-01-29 09:32:45 +08:00
|
|
|
} else {
|
2020-02-06 10:58:19 +08:00
|
|
|
alloc_ctx.szind = sz_size2index(usize);
|
|
|
|
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
2017-08-01 05:35:33 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2020-08-04 09:23:36 +08:00
|
|
|
bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
|
|
|
|
if (fail) {
|
|
|
|
/*
|
|
|
|
* This is a heap corruption bug. In real life we'll crash; for
|
|
|
|
* the unit test we just want to avoid breaking anything too
|
|
|
|
* badly to get a test result out. Let's leak instead of trying
|
|
|
|
* to free.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
2017-04-12 05:56:43 +08:00
|
|
|
|
2017-08-01 05:35:33 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2020-02-06 10:58:19 +08:00
|
|
|
prof_free(tsd, ptr, usize, &alloc_ctx);
|
2017-08-01 05:35:33 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (likely(!slow_path)) {
|
2020-02-06 10:58:19 +08:00
|
|
|
isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
|
|
|
|
false);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2020-02-29 03:37:39 +08:00
|
|
|
if (config_fill && slow_path && opt_junk_free) {
|
|
|
|
junk_free_callback(ptr, usize);
|
|
|
|
}
|
2020-02-06 10:58:19 +08:00
|
|
|
isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
|
|
|
|
true);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2020-01-29 13:12:06 +08:00
|
|
|
thread_dalloc_event(tsd, usize);
|
2014-08-29 03:41:48 +08:00
|
|
|
}
|
|
|
|
|
2018-10-19 04:14:04 +08:00
|
|
|
JEMALLOC_NOINLINE
|
|
|
|
void
|
|
|
|
free_default(void *ptr) {
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, 0, 0);
|
2015-01-30 07:30:47 +08:00
|
|
|
if (likely(ptr != NULL)) {
|
2017-06-16 07:53:22 +08:00
|
|
|
/*
|
|
|
|
* We avoid setting up tsd fully (e.g. tcache, arena binding)
|
|
|
|
* based on only free() calls -- other activities trigger the
|
|
|
|
* minimal to full transition. This is because free() may
|
|
|
|
* happen during thread shutdown after tls deallocation: if a
|
|
|
|
* thread never had any malloc activities until then, a
|
|
|
|
* fully-setup tsd won't be destructed properly.
|
|
|
|
*/
|
|
|
|
tsd_t *tsd = tsd_fetch_min();
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-04-13 07:16:27 +08:00
|
|
|
|
2017-04-12 14:13:45 +08:00
|
|
|
if (likely(tsd_fast(tsd))) {
|
2020-05-14 05:06:43 +08:00
|
|
|
tcache_t *tcache = tcache_get_from_ind(tsd,
|
|
|
|
TCACHE_IND_AUTOMATIC, /* slow */ false,
|
|
|
|
/* is_alloc */ false);
|
|
|
|
ifree(tsd, ptr, tcache, /* slow */ false);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2020-05-14 05:06:43 +08:00
|
|
|
tcache_t *tcache = tcache_get_from_ind(tsd,
|
|
|
|
TCACHE_IND_AUTOMATIC, /* slow */ true,
|
|
|
|
/* is_alloc */ false);
|
2018-04-20 06:02:53 +08:00
|
|
|
uintptr_t args_raw[3] = {(uintptr_t)ptr};
|
|
|
|
hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
|
2020-05-14 05:06:43 +08:00
|
|
|
ifree(tsd, ptr, tcache, /* slow */ true);
|
2017-04-01 10:59:45 +08:00
|
|
|
}
|
2020-05-14 05:06:43 +08:00
|
|
|
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2015-01-30 07:30:47 +08:00
|
|
|
}
|
2018-10-19 04:14:04 +08:00
|
|
|
}
|
|
|
|
|
2020-02-07 02:59:48 +08:00
|
|
|
/* Returns whether or not the free attempt was successful. */
|
2018-10-19 04:14:04 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE
|
|
|
|
bool free_fastpath(void *ptr, size_t size, bool size_hint) {
|
|
|
|
tsd_t *tsd = tsd_get(false);
|
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2020-01-29 09:32:45 +08:00
|
|
|
if (!size_hint) {
|
2020-02-07 02:59:48 +08:00
|
|
|
if (unlikely(tsd == NULL || !tsd_fast(tsd))) {
|
2020-01-29 13:12:06 +08:00
|
|
|
return false;
|
|
|
|
}
|
2020-03-20 08:58:44 +08:00
|
|
|
bool err = emap_alloc_ctx_try_lookup_fast(tsd,
|
2020-03-15 01:49:34 +08:00
|
|
|
&arena_emap_global, ptr, &alloc_ctx);
|
2018-10-19 04:14:04 +08:00
|
|
|
|
|
|
|
/* Note: profiled objects will have alloc_ctx.slab set */
|
2020-03-20 08:58:44 +08:00
|
|
|
if (unlikely(err || !alloc_ctx.slab)) {
|
2018-10-19 04:14:04 +08:00
|
|
|
return false;
|
|
|
|
}
|
2020-02-07 02:59:48 +08:00
|
|
|
assert(alloc_ctx.szind != SC_NSIZES);
|
2018-10-19 04:14:04 +08:00
|
|
|
} else {
|
2020-01-29 13:12:06 +08:00
|
|
|
/*
|
|
|
|
* The size hinted fastpath does not involve rtree lookup, thus
|
|
|
|
* can tolerate an uninitialized tsd. This allows the tsd_fast
|
|
|
|
* check to be folded into the branch testing fast_threshold
|
|
|
|
* (set to 0 when !tsd_fast).
|
|
|
|
*/
|
2020-02-07 02:59:48 +08:00
|
|
|
if (unlikely(tsd == NULL)) {
|
2020-01-29 13:12:06 +08:00
|
|
|
return false;
|
|
|
|
}
|
2018-10-19 04:14:04 +08:00
|
|
|
/*
|
2019-11-16 14:47:49 +08:00
|
|
|
* Check for both sizes that are too large, and for sampled
|
|
|
|
* objects. Sampled objects are always page-aligned. The
|
|
|
|
* sampled object check will also check for null ptr.
|
2018-10-19 04:14:04 +08:00
|
|
|
*/
|
2019-11-16 14:47:49 +08:00
|
|
|
if (unlikely(size > SC_LOOKUP_MAXCLASS ||
|
2020-01-29 09:32:45 +08:00
|
|
|
(config_prof && prof_sample_aligned(ptr)))) {
|
2018-10-19 04:14:04 +08:00
|
|
|
return false;
|
|
|
|
}
|
2020-02-07 02:59:48 +08:00
|
|
|
alloc_ctx.szind = sz_size2index_lookup(size);
|
2020-08-04 09:23:36 +08:00
|
|
|
/* This is a dead store, except when opt size checking is on. */
|
|
|
|
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
|
|
|
}
|
|
|
|
|
2020-01-29 13:12:06 +08:00
|
|
|
uint64_t deallocated, threshold;
|
2020-01-31 08:31:45 +08:00
|
|
|
te_free_fastpath_ctx(tsd, &deallocated, &threshold, size_hint);
|
2018-10-19 04:14:04 +08:00
|
|
|
|
2020-02-07 02:59:48 +08:00
|
|
|
size_t usize = sz_index2size(alloc_ctx.szind);
|
2020-01-29 13:12:06 +08:00
|
|
|
uint64_t deallocated_after = deallocated + usize;
|
|
|
|
/*
|
|
|
|
* Check for events and tsd non-nominal (fast_threshold will be set to
|
2020-10-13 07:11:51 +08:00
|
|
|
* 0) in a single branch. Note that this handles the uninitialized case
|
|
|
|
* as well (TSD init will be triggered on the non-fastpath). Therefore
|
|
|
|
* anything depends on a functional TSD (e.g. the alloc_ctx sanity check
|
|
|
|
* below) needs to be after this branch.
|
2020-01-29 13:12:06 +08:00
|
|
|
*/
|
|
|
|
if (unlikely(deallocated_after >= threshold)) {
|
2018-10-19 04:14:04 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-10-13 07:11:51 +08:00
|
|
|
bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
|
|
|
|
if (fail) {
|
|
|
|
/* See the comment in isfree. */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-05-14 05:06:43 +08:00
|
|
|
tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC,
|
|
|
|
/* slow */ false, /* is_alloc */ false);
|
2020-04-08 11:04:46 +08:00
|
|
|
cache_bin_t *bin = &tcache->bins[alloc_ctx.szind];
|
2020-02-29 03:37:39 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If junking were enabled, this is where we would do it. It's not
|
|
|
|
* though, since we ensured above that we're on the fast path. Assert
|
|
|
|
* that to double-check.
|
|
|
|
*/
|
|
|
|
assert(!opt_junk_free);
|
|
|
|
|
2019-08-10 13:12:47 +08:00
|
|
|
if (!cache_bin_dalloc_easy(bin, ptr)) {
|
2018-10-19 04:14:04 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-01-29 13:12:06 +08:00
|
|
|
*tsd_thread_deallocatedp_get(tsd) = deallocated_after;
|
2018-10-19 04:14:04 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
|
|
|
|
je_free(void *ptr) {
|
|
|
|
LOG("core.free.entry", "ptr: %p", ptr);
|
|
|
|
|
|
|
|
if (!free_fastpath(ptr, 0, false)) {
|
|
|
|
free_default(ptr);
|
|
|
|
}
|
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.free.exit", "");
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* End malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* Begin non-standard override functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-11 05:33:00 +08:00
|
|
|
JEMALLOC_ATTR(malloc)
|
2017-01-16 08:56:30 +08:00
|
|
|
je_memalign(size_t alignment, size_t size) {
|
2017-01-19 06:04:24 +08:00
|
|
|
void *ret;
|
|
|
|
static_opts_t sopts;
|
|
|
|
dynamic_opts_t dopts;
|
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
|
2017-07-20 09:05:28 +08:00
|
|
|
size);
|
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts_init(&sopts);
|
|
|
|
dynamic_opts_init(&dopts);
|
|
|
|
|
|
|
|
sopts.min_alignment = 1;
|
|
|
|
sopts.oom_string =
|
|
|
|
"<jemalloc>: Error allocating aligned memory: out of memory\n";
|
|
|
|
sopts.invalid_alignment_string =
|
|
|
|
"<jemalloc>: Error allocating aligned memory: invalid alignment\n";
|
|
|
|
sopts.null_out_result_on_error = true;
|
|
|
|
|
|
|
|
dopts.result = &ret;
|
|
|
|
dopts.num_items = 1;
|
|
|
|
dopts.item_size = size;
|
|
|
|
dopts.alignment = alignment;
|
|
|
|
|
|
|
|
imalloc(&sopts, &dopts);
|
2018-04-20 04:14:22 +08:00
|
|
|
if (sopts.slow) {
|
|
|
|
uintptr_t args[3] = {alignment, size};
|
|
|
|
hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret,
|
|
|
|
args);
|
|
|
|
}
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.memalign.exit", "result: %p", ret);
|
2017-01-19 06:04:24 +08:00
|
|
|
return ret;
|
2010-09-21 07:44:23 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-11 05:33:00 +08:00
|
|
|
JEMALLOC_ATTR(malloc)
|
2017-01-16 08:56:30 +08:00
|
|
|
je_valloc(size_t size) {
|
2017-01-19 06:04:24 +08:00
|
|
|
void *ret;
|
|
|
|
|
|
|
|
static_opts_t sopts;
|
|
|
|
dynamic_opts_t dopts;
|
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.valloc.entry", "size: %zu\n", size);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts_init(&sopts);
|
|
|
|
dynamic_opts_init(&dopts);
|
|
|
|
|
|
|
|
sopts.null_out_result_on_error = true;
|
|
|
|
sopts.min_alignment = PAGE;
|
|
|
|
sopts.oom_string =
|
|
|
|
"<jemalloc>: Error allocating aligned memory: out of memory\n";
|
|
|
|
sopts.invalid_alignment_string =
|
|
|
|
"<jemalloc>: Error allocating aligned memory: invalid alignment\n";
|
|
|
|
|
|
|
|
dopts.result = &ret;
|
|
|
|
dopts.num_items = 1;
|
|
|
|
dopts.item_size = size;
|
|
|
|
dopts.alignment = PAGE;
|
|
|
|
|
|
|
|
imalloc(&sopts, &dopts);
|
2018-04-20 04:14:22 +08:00
|
|
|
if (sopts.slow) {
|
|
|
|
uintptr_t args[3] = {size};
|
|
|
|
hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args);
|
|
|
|
}
|
2017-01-19 06:04:24 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.valloc.exit", "result: %p\n", ret);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-09-21 07:44:23 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-04-22 04:47:49 +08:00
|
|
|
#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
|
2012-03-01 02:37:27 +08:00
|
|
|
/*
|
|
|
|
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
|
|
|
|
* to inconsistently reference libc's malloc(3)-compatible functions
|
|
|
|
* (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
|
|
|
|
*
|
2012-03-27 20:20:13 +08:00
|
|
|
* These definitions interpose hooks in glibc. The functions are actually
|
2012-03-01 02:37:27 +08:00
|
|
|
* passed an extra argument for the caller return address, which will be
|
|
|
|
* ignored.
|
|
|
|
*/
|
2014-05-02 06:51:30 +08:00
|
|
|
JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
|
|
|
|
JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
|
|
|
|
JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
|
2017-05-05 02:20:43 +08:00
|
|
|
# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
|
2014-05-02 06:51:30 +08:00
|
|
|
JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
|
2012-04-30 18:38:29 +08:00
|
|
|
je_memalign;
|
2017-05-05 02:20:43 +08:00
|
|
|
# endif
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
|
2017-05-05 02:20:43 +08:00
|
|
|
# ifdef CPU_COUNT
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
/*
|
|
|
|
* To enable static linking with glibc, the libc specific malloc interface must
|
|
|
|
* be implemented also, so none of glibc's malloc.o functions are added to the
|
|
|
|
* link.
|
|
|
|
*/
|
2017-05-05 02:20:43 +08:00
|
|
|
# define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
/* To force macro expansion of je_ prefix before stringification. */
|
2017-05-05 02:20:43 +08:00
|
|
|
# define PREALIAS(je_fn) ALIAS(je_fn)
|
|
|
|
# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
|
|
|
|
void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
|
|
|
|
# endif
|
|
|
|
# ifdef JEMALLOC_OVERRIDE___LIBC_FREE
|
|
|
|
void __libc_free(void* ptr) PREALIAS(je_free);
|
|
|
|
# endif
|
|
|
|
# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
|
|
|
|
void *__libc_malloc(size_t size) PREALIAS(je_malloc);
|
|
|
|
# endif
|
|
|
|
# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
|
|
|
void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
|
|
|
|
# endif
|
|
|
|
# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
|
|
|
|
void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
|
|
|
|
# endif
|
|
|
|
# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
|
|
|
|
void *__libc_valloc(size_t size) PREALIAS(je_valloc);
|
|
|
|
# endif
|
|
|
|
# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
|
|
|
|
int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
|
|
|
|
# endif
|
|
|
|
# undef PREALIAS
|
|
|
|
# undef ALIAS
|
|
|
|
# endif
|
2012-03-01 02:37:27 +08:00
|
|
|
#endif
|
|
|
|
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* End non-standard override functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Begin non-standard functions.
|
|
|
|
*/
|
|
|
|
|
2020-05-14 05:06:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE unsigned
|
|
|
|
mallocx_tcache_get(int flags) {
|
|
|
|
if (likely((flags & MALLOCX_TCACHE_MASK) == 0)) {
|
|
|
|
return TCACHE_IND_AUTOMATIC;
|
|
|
|
} else if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
|
|
|
|
return TCACHE_IND_NONE;
|
|
|
|
} else {
|
|
|
|
return MALLOCX_TCACHE_GET(flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 05:49:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE unsigned
|
|
|
|
mallocx_arena_get(int flags) {
|
|
|
|
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
|
|
|
return MALLOCX_ARENA_GET(flags);
|
|
|
|
} else {
|
|
|
|
return ARENA_IND_AUTOMATIC;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-16 01:26:49 +08:00
|
|
|
#ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
|
2018-10-05 19:11:21 +08:00
|
|
|
|
|
|
|
#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y
|
|
|
|
#define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \
|
|
|
|
JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y)
|
|
|
|
|
2018-07-12 03:39:44 +08:00
|
|
|
typedef struct {
|
|
|
|
void *ptr;
|
|
|
|
size_t size;
|
|
|
|
} smallocx_return_t;
|
|
|
|
|
2017-11-16 01:26:49 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
smallocx_return_t JEMALLOC_NOTHROW
|
|
|
|
/*
|
|
|
|
* The attribute JEMALLOC_ATTR(malloc) cannot be used due to:
|
|
|
|
* - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488
|
|
|
|
*/
|
2018-10-05 19:11:21 +08:00
|
|
|
JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT)
|
|
|
|
(size_t size, int flags) {
|
2017-11-16 01:26:49 +08:00
|
|
|
/*
|
|
|
|
* Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be
|
|
|
|
* used here because it makes writing beyond the `size`
|
|
|
|
* of the `ptr` undefined behavior, but the objective
|
|
|
|
* of this function is to allow writing beyond `size`
|
|
|
|
* up to `smallocx_return_t::size`.
|
|
|
|
*/
|
|
|
|
smallocx_return_t ret;
|
|
|
|
static_opts_t sopts;
|
|
|
|
dynamic_opts_t dopts;
|
|
|
|
|
|
|
|
LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags);
|
|
|
|
|
|
|
|
static_opts_init(&sopts);
|
|
|
|
dynamic_opts_init(&dopts);
|
|
|
|
|
|
|
|
sopts.assert_nonempty_alloc = true;
|
|
|
|
sopts.null_out_result_on_error = true;
|
|
|
|
sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
|
|
|
|
sopts.usize = true;
|
|
|
|
|
|
|
|
dopts.result = &ret.ptr;
|
|
|
|
dopts.num_items = 1;
|
|
|
|
dopts.item_size = size;
|
|
|
|
if (unlikely(flags != 0)) {
|
2020-05-14 02:16:07 +08:00
|
|
|
dopts.alignment = MALLOCX_ALIGN_GET(flags);
|
2017-11-16 01:26:49 +08:00
|
|
|
dopts.zero = MALLOCX_ZERO_GET(flags);
|
2020-05-14 05:06:43 +08:00
|
|
|
dopts.tcache_ind = mallocx_tcache_get(flags);
|
2020-05-14 05:49:41 +08:00
|
|
|
dopts.arena_ind = mallocx_arena_get(flags);
|
2017-11-16 01:26:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
imalloc(&sopts, &dopts);
|
|
|
|
assert(dopts.usize == je_nallocx(size, flags));
|
|
|
|
ret.size = dopts.usize;
|
|
|
|
|
|
|
|
LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-10-05 19:11:21 +08:00
|
|
|
#undef JEMALLOC_SMALLOCX_CONCAT_HELPER
|
|
|
|
#undef JEMALLOC_SMALLOCX_CONCAT_HELPER2
|
2017-11-16 01:26:49 +08:00
|
|
|
#endif
|
|
|
|
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
|
2017-01-16 08:56:30 +08:00
|
|
|
je_mallocx(size_t size, int flags) {
|
2017-01-19 06:04:24 +08:00
|
|
|
void *ret;
|
|
|
|
static_opts_t sopts;
|
|
|
|
dynamic_opts_t dopts;
|
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
static_opts_init(&sopts);
|
|
|
|
dynamic_opts_init(&dopts);
|
|
|
|
|
|
|
|
sopts.assert_nonempty_alloc = true;
|
|
|
|
sopts.null_out_result_on_error = true;
|
|
|
|
sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
|
|
|
|
|
|
|
|
dopts.result = &ret;
|
|
|
|
dopts.num_items = 1;
|
|
|
|
dopts.item_size = size;
|
|
|
|
if (unlikely(flags != 0)) {
|
2020-05-14 02:16:07 +08:00
|
|
|
dopts.alignment = MALLOCX_ALIGN_GET(flags);
|
2017-01-19 06:04:24 +08:00
|
|
|
dopts.zero = MALLOCX_ZERO_GET(flags);
|
2020-05-14 05:06:43 +08:00
|
|
|
dopts.tcache_ind = mallocx_tcache_get(flags);
|
2020-05-14 05:49:41 +08:00
|
|
|
dopts.arena_ind = mallocx_arena_get(flags);
|
2016-04-14 14:36:15 +08:00
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2017-01-19 06:04:24 +08:00
|
|
|
imalloc(&sopts, &dopts);
|
2018-04-20 04:14:22 +08:00
|
|
|
if (sopts.slow) {
|
|
|
|
uintptr_t args[3] = {size, flags};
|
|
|
|
hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret,
|
|
|
|
args);
|
|
|
|
}
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallocx.exit", "result: %p", ret);
|
2017-01-19 06:04:24 +08:00
|
|
|
return ret;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2017-03-21 02:00:07 +08:00
|
|
|
irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
|
|
|
|
size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
|
2018-04-24 09:07:40 +08:00
|
|
|
prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
|
2014-01-13 07:05:44 +08:00
|
|
|
void *p;
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tctx == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2020-01-29 09:32:45 +08:00
|
|
|
|
|
|
|
alignment = prof_sample_align(alignment);
|
2018-07-12 07:05:58 +08:00
|
|
|
if (usize <= SC_SMALL_MAXCLASS) {
|
2017-12-15 04:46:39 +08:00
|
|
|
p = iralloct(tsdn, old_ptr, old_usize,
|
2018-07-12 07:05:58 +08:00
|
|
|
SC_LARGE_MINCLASS, alignment, zero, tcache,
|
2017-12-15 04:46:39 +08:00
|
|
|
arena, hook_args);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (p == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_prof_promote(tsdn, p, usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else {
|
2017-03-21 02:00:07 +08:00
|
|
|
p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
|
2018-04-24 09:07:40 +08:00
|
|
|
tcache, arena, hook_args);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
2020-01-29 09:32:45 +08:00
|
|
|
assert(prof_sample_aligned(p));
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return p;
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2017-03-21 02:00:07 +08:00
|
|
|
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
2020-08-26 02:31:58 +08:00
|
|
|
size_t alignment, size_t usize, bool zero, tcache_t *tcache,
|
2020-02-07 05:45:04 +08:00
|
|
|
arena_t *arena, emap_alloc_ctx_t *alloc_ctx,
|
|
|
|
hook_ralloc_args_t *hook_args) {
|
2019-11-20 08:24:57 +08:00
|
|
|
prof_info_t old_prof_info;
|
2019-12-19 05:38:14 +08:00
|
|
|
prof_info_get_and_reset_recent(tsd, old_ptr, alloc_ctx, &old_prof_info);
|
2019-11-20 08:24:57 +08:00
|
|
|
bool prof_active = prof_active_get_unlocked();
|
2020-08-26 02:31:58 +08:00
|
|
|
bool sample_event = te_prof_sample_event_lookahead(tsd, usize);
|
2020-03-11 05:21:05 +08:00
|
|
|
prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event);
|
2014-01-13 07:05:44 +08:00
|
|
|
void *p;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
2017-03-21 02:00:07 +08:00
|
|
|
p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
|
2020-08-26 02:31:58 +08:00
|
|
|
usize, alignment, zero, tcache, arena, tctx, hook_args);
|
2014-09-10 10:37:26 +08:00
|
|
|
} else {
|
2017-03-21 02:00:07 +08:00
|
|
|
p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
|
2018-04-24 09:07:40 +08:00
|
|
|
zero, tcache, arena, hook_args);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
2020-03-10 06:49:15 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2020-08-26 02:31:58 +08:00
|
|
|
assert(usize == isalloc(tsd_tsdn(tsd), p));
|
|
|
|
prof_realloc(tsd, p, size, usize, tctx, prof_active, old_ptr,
|
2020-03-11 05:21:05 +08:00
|
|
|
old_usize, &old_prof_info, sample_event);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return p;
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
2020-12-18 04:16:38 +08:00
|
|
|
static void *
|
2019-10-22 09:44:42 +08:00
|
|
|
do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
void *p;
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
2014-09-12 07:20:44 +08:00
|
|
|
size_t usize;
|
2014-10-25 01:18:57 +08:00
|
|
|
size_t old_usize;
|
2014-09-08 05:40:19 +08:00
|
|
|
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(size != 0);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2014-09-23 12:09:23 +08:00
|
|
|
|
2020-05-14 02:19:09 +08:00
|
|
|
bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
|
2020-04-30 00:05:57 +08:00
|
|
|
|
2020-05-14 05:49:41 +08:00
|
|
|
unsigned arena_ind = mallocx_arena_get(flags);
|
|
|
|
if (arena_get_from_ind(tsd, arena_ind, &arena)) {
|
|
|
|
goto label_oom;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-01-30 07:30:47 +08:00
|
|
|
|
2020-05-14 05:06:43 +08:00
|
|
|
unsigned tcache_ind = mallocx_tcache_get(flags);
|
|
|
|
tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind,
|
|
|
|
/* slow */ true, /* is_alloc */ true);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
|
|
|
|
&alloc_ctx);
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(alloc_ctx.szind != SC_NSIZES);
|
2017-05-31 01:45:37 +08:00
|
|
|
old_usize = sz_index2size(alloc_ctx.szind);
|
2017-04-12 09:13:10 +08:00
|
|
|
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
|
2020-08-26 02:31:58 +08:00
|
|
|
if (aligned_usize_get(size, alignment, &usize, NULL, false)) {
|
|
|
|
goto label_oom;
|
|
|
|
}
|
2018-04-24 09:07:40 +08:00
|
|
|
|
2019-10-22 09:44:42 +08:00
|
|
|
hook_ralloc_args_t hook_args = {is_realloc, {(uintptr_t)ptr, size,
|
|
|
|
flags, 0}};
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2020-08-26 02:31:58 +08:00
|
|
|
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, usize,
|
2018-04-24 09:07:40 +08:00
|
|
|
zero, tcache, arena, &alloc_ctx, &hook_args);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
goto label_oom;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
} else {
|
2017-03-21 02:00:07 +08:00
|
|
|
p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
|
2018-04-24 09:07:40 +08:00
|
|
|
zero, tcache, arena, &hook_args);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
goto label_oom;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2020-08-26 02:31:58 +08:00
|
|
|
assert(usize == isalloc(tsd_tsdn(tsd), p));
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
2015-06-23 09:48:58 +08:00
|
|
|
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
|
2020-03-10 06:49:15 +08:00
|
|
|
thread_alloc_event(tsd, usize);
|
2020-01-29 13:12:06 +08:00
|
|
|
thread_dalloc_event(tsd, old_usize);
|
2019-08-23 06:56:47 +08:00
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
UTRACE(ptr, size, p);
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2020-04-30 00:05:57 +08:00
|
|
|
if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize
|
|
|
|
&& !zero) {
|
2020-02-29 03:37:39 +08:00
|
|
|
size_t excess_len = usize - old_usize;
|
|
|
|
void *excess_start = (void *)((uintptr_t)p + old_usize);
|
2020-04-30 00:05:57 +08:00
|
|
|
junk_alloc_callback(excess_start, excess_len);
|
2020-02-29 03:37:39 +08:00
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return p;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
label_oom:
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
UTRACE(ptr, size, 0);
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2019-10-22 09:44:42 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
|
|
|
JEMALLOC_ALLOC_SIZE(2)
|
|
|
|
je_rallocx(void *ptr, size_t size, int flags) {
|
|
|
|
LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
|
|
|
|
size, flags);
|
|
|
|
void *ret = do_rallocx(ptr, size, flags, false);
|
|
|
|
LOG("core.rallocx.exit", "result: %p", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-24 08:56:19 +08:00
|
|
|
static void *
|
|
|
|
do_realloc_nonnull_zero(void *ptr) {
|
2019-10-27 02:04:46 +08:00
|
|
|
if (config_stats) {
|
|
|
|
atomic_fetch_add_zu(&zero_realloc_count, 1, ATOMIC_RELAXED);
|
|
|
|
}
|
2019-09-24 08:56:19 +08:00
|
|
|
if (opt_zero_realloc_action == zero_realloc_action_strict) {
|
|
|
|
/*
|
|
|
|
* The user might have gotten a strict setting while expecting a
|
|
|
|
* free setting. If that's the case, we at least try to
|
|
|
|
* reduce the harm, and turn off the tcache while allocating, so
|
|
|
|
* that we'll get a true first fit.
|
|
|
|
*/
|
|
|
|
return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true);
|
|
|
|
} else if (opt_zero_realloc_action == zero_realloc_action_free) {
|
2019-10-22 09:44:42 +08:00
|
|
|
UTRACE(ptr, 0, 0);
|
|
|
|
tsd_t *tsd = tsd_fetch();
|
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
|
|
|
|
2020-05-14 05:06:43 +08:00
|
|
|
tcache_t *tcache = tcache_get_from_ind(tsd,
|
|
|
|
TCACHE_IND_AUTOMATIC, /* slow */ true,
|
|
|
|
/* is_alloc */ false);
|
2019-09-24 08:56:19 +08:00
|
|
|
uintptr_t args[3] = {(uintptr_t)ptr, 0};
|
2019-10-22 09:44:42 +08:00
|
|
|
hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
|
|
|
|
ifree(tsd, ptr, tcache, true);
|
|
|
|
|
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
|
|
|
return NULL;
|
2019-09-24 08:56:19 +08:00
|
|
|
} else {
|
|
|
|
safety_check_fail("Called realloc(non-null-ptr, 0) with "
|
|
|
|
"zero_realloc:abort set\n");
|
|
|
|
/* In real code, this will never run; the safety check failure
|
|
|
|
* will call abort. In the unit test, we just want to bail out
|
|
|
|
* without corrupting internal state that the test needs to
|
|
|
|
* finish.
|
|
|
|
*/
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
|
|
|
JEMALLOC_ALLOC_SIZE(2)
|
|
|
|
je_realloc(void *ptr, size_t size) {
|
|
|
|
LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
|
|
|
|
|
|
|
|
if (likely(ptr != NULL && size != 0)) {
|
|
|
|
void *ret = do_rallocx(ptr, size, 0, true);
|
|
|
|
LOG("core.realloc.exit", "result: %p", ret);
|
|
|
|
return ret;
|
|
|
|
} else if (ptr != NULL && size == 0) {
|
|
|
|
void *ret = do_realloc_nonnull_zero(ptr);
|
|
|
|
LOG("core.realloc.exit", "result: %p", ret);
|
|
|
|
return ret;
|
2019-10-22 09:44:42 +08:00
|
|
|
} else {
|
|
|
|
/* realloc(NULL, size) is equivalent to malloc(size). */
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
static_opts_t sopts;
|
|
|
|
dynamic_opts_t dopts;
|
|
|
|
|
|
|
|
static_opts_init(&sopts);
|
|
|
|
dynamic_opts_init(&dopts);
|
|
|
|
|
|
|
|
sopts.null_out_result_on_error = true;
|
|
|
|
sopts.set_errno_on_error = true;
|
|
|
|
sopts.oom_string =
|
|
|
|
"<jemalloc>: Error in realloc(): out of memory\n";
|
|
|
|
|
|
|
|
dopts.result = &ret;
|
|
|
|
dopts.num_items = 1;
|
|
|
|
dopts.item_size = size;
|
|
|
|
|
|
|
|
imalloc(&sopts, &dopts);
|
|
|
|
if (sopts.slow) {
|
|
|
|
uintptr_t args[3] = {(uintptr_t)ptr, size};
|
|
|
|
hook_invoke_alloc(hook_alloc_realloc, ret,
|
|
|
|
(uintptr_t)ret, args);
|
|
|
|
}
|
|
|
|
LOG("core.realloc.exit", "result: %p", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2017-03-21 02:00:07 +08:00
|
|
|
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
|
|
|
|
size_t extra, size_t alignment, bool zero) {
|
2018-06-05 04:36:06 +08:00
|
|
|
size_t newsize;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2018-06-05 04:36:06 +08:00
|
|
|
if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero,
|
|
|
|
&newsize)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return old_usize;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2018-06-05 04:36:06 +08:00
|
|
|
return newsize;
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
2017-03-21 02:00:07 +08:00
|
|
|
ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
|
|
|
|
size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
|
2020-01-29 09:32:45 +08:00
|
|
|
/* Sampled allocation needs to be page aligned. */
|
|
|
|
if (tctx == NULL || !prof_sample_aligned(ptr)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return old_usize;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2020-01-29 09:32:45 +08:00
|
|
|
return ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
|
|
|
|
zero);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2017-03-21 02:00:07 +08:00
|
|
|
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
2020-02-07 05:45:04 +08:00
|
|
|
size_t extra, size_t alignment, bool zero, emap_alloc_ctx_t *alloc_ctx) {
|
2019-12-19 05:38:14 +08:00
|
|
|
/*
|
|
|
|
* old_prof_info is only used for asserting that the profiling info
|
|
|
|
* isn't changed by the ixalloc() call.
|
|
|
|
*/
|
2019-11-20 08:24:57 +08:00
|
|
|
prof_info_t old_prof_info;
|
2019-11-23 03:42:01 +08:00
|
|
|
prof_info_get(tsd, ptr, alloc_ctx, &old_prof_info);
|
2019-12-19 05:38:14 +08:00
|
|
|
|
2014-09-10 10:37:26 +08:00
|
|
|
/*
|
|
|
|
* usize isn't knowable before ixalloc() returns when extra is non-zero.
|
|
|
|
* Therefore, compute its maximum possible value and use that in
|
|
|
|
* prof_alloc_prep() to decide whether to capture a backtrace.
|
|
|
|
* prof_realloc() will use the actual usize to decide whether to sample.
|
|
|
|
*/
|
2019-11-20 08:24:57 +08:00
|
|
|
size_t usize_max;
|
2020-05-14 02:16:07 +08:00
|
|
|
if (aligned_usize_get(size + extra, alignment, &usize_max, NULL,
|
|
|
|
false)) {
|
|
|
|
/*
|
|
|
|
* usize_max is out of range, and chances are that allocation
|
|
|
|
* will fail, but use the maximum possible value and carry on
|
|
|
|
* with prof_alloc_prep(), just in case allocation succeeds.
|
|
|
|
*/
|
|
|
|
usize_max = SC_LARGE_MAXCLASS;
|
2016-02-26 08:42:15 +08:00
|
|
|
}
|
2019-11-20 08:24:57 +08:00
|
|
|
bool prof_active = prof_active_get_unlocked();
|
2020-03-11 05:21:05 +08:00
|
|
|
bool sample_event = te_prof_sample_event_lookahead(tsd, usize_max);
|
|
|
|
prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event);
|
2016-02-26 08:42:15 +08:00
|
|
|
|
2019-11-20 08:24:57 +08:00
|
|
|
size_t usize;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
2017-03-21 02:00:07 +08:00
|
|
|
usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
|
|
|
|
size, extra, alignment, zero, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else {
|
2017-03-21 02:00:07 +08:00
|
|
|
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
|
|
|
|
extra, alignment, zero);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
2019-12-19 05:38:14 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point we can still safely get the original profiling
|
|
|
|
* information associated with the ptr, because (a) the edata_t object
|
|
|
|
* associated with the ptr still lives and (b) the profiling info
|
|
|
|
* fields are not touched. "(a)" is asserted in the outer je_xallocx()
|
|
|
|
* function, and "(b)" is indirectly verified below by checking that
|
|
|
|
* the alloc_tctx field is unchanged.
|
|
|
|
*/
|
|
|
|
prof_info_t prof_info;
|
2015-09-18 01:05:56 +08:00
|
|
|
if (usize == old_usize) {
|
2019-12-19 05:38:14 +08:00
|
|
|
prof_info_get(tsd, ptr, alloc_ctx, &prof_info);
|
2020-03-10 06:49:15 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx);
|
2019-12-19 05:38:14 +08:00
|
|
|
} else {
|
|
|
|
prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
|
2020-03-10 06:49:15 +08:00
|
|
|
assert(usize <= usize_max);
|
2020-03-11 05:21:05 +08:00
|
|
|
sample_event = te_prof_sample_event_lookahead(tsd, usize);
|
2020-01-10 02:20:34 +08:00
|
|
|
prof_realloc(tsd, ptr, size, usize, tctx, prof_active, ptr,
|
2020-03-11 05:21:05 +08:00
|
|
|
old_usize, &prof_info, sample_event);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(old_prof_info.alloc_tctx == prof_info.alloc_tctx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return usize;
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
2017-01-16 08:56:30 +08:00
|
|
|
je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
2013-12-16 08:21:30 +08:00
|
|
|
size_t usize, old_usize;
|
2014-09-08 05:40:19 +08:00
|
|
|
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
2020-05-14 02:19:09 +08:00
|
|
|
bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
|
2017-07-20 09:05:28 +08:00
|
|
|
"flags: %d", ptr, size, extra, flags);
|
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(size != 0);
|
|
|
|
assert(SIZE_T_MAX - size >= extra);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
/*
|
|
|
|
* old_edata is only for verifying that xallocx() keeps the edata_t
|
|
|
|
* object associated with the ptr (though the content of the edata_t
|
|
|
|
* object can be changed).
|
|
|
|
*/
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *old_edata = emap_edata_lookup(tsd_tsdn(tsd),
|
|
|
|
&arena_emap_global, ptr);
|
2019-12-19 05:38:14 +08:00
|
|
|
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
|
|
|
|
&alloc_ctx);
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(alloc_ctx.szind != SC_NSIZES);
|
2017-05-31 01:45:37 +08:00
|
|
|
old_usize = sz_index2size(alloc_ctx.szind);
|
2017-04-12 09:13:10 +08:00
|
|
|
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
|
2016-02-26 08:42:15 +08:00
|
|
|
/*
|
|
|
|
* The API explicitly absolves itself of protecting against (size +
|
|
|
|
* extra) numerical overflow, but we may need to clamp extra to avoid
|
2018-07-12 07:05:58 +08:00
|
|
|
* exceeding SC_LARGE_MAXCLASS.
|
2016-02-26 08:42:15 +08:00
|
|
|
*
|
|
|
|
* Ordinarily, size limit checking is handled deeper down, but here we
|
|
|
|
* have to check as part of (size + extra) clamping, since we need the
|
|
|
|
* clamped value in the above helper functions.
|
|
|
|
*/
|
2018-07-12 07:05:58 +08:00
|
|
|
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
2016-02-26 08:42:15 +08:00
|
|
|
usize = old_usize;
|
|
|
|
goto label_not_resized;
|
2015-09-16 05:39:58 +08:00
|
|
|
}
|
2018-07-12 07:05:58 +08:00
|
|
|
if (unlikely(SC_LARGE_MAXCLASS - size < extra)) {
|
|
|
|
extra = SC_LARGE_MAXCLASS - size;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-09-16 05:39:58 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2017-03-21 02:00:07 +08:00
|
|
|
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
|
2017-04-12 09:13:10 +08:00
|
|
|
alignment, zero, &alloc_ctx);
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2017-03-21 02:00:07 +08:00
|
|
|
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
|
|
|
|
extra, alignment, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
2019-12-19 05:38:14 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* xallocx() should keep using the same edata_t object (though its
|
|
|
|
* content can be changed).
|
|
|
|
*/
|
2020-03-15 01:49:34 +08:00
|
|
|
assert(emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr)
|
2020-02-07 05:45:04 +08:00
|
|
|
== old_edata);
|
2019-12-19 05:38:14 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(usize == old_usize)) {
|
2014-01-13 07:05:44 +08:00
|
|
|
goto label_not_resized;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2020-03-10 06:49:15 +08:00
|
|
|
thread_alloc_event(tsd, usize);
|
2020-01-29 13:12:06 +08:00
|
|
|
thread_dalloc_event(tsd, old_usize);
|
2020-02-29 03:37:39 +08:00
|
|
|
|
2020-04-30 00:05:57 +08:00
|
|
|
if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize &&
|
|
|
|
!zero) {
|
|
|
|
size_t excess_len = usize - old_usize;
|
|
|
|
void *excess_start = (void *)((uintptr_t)ptr + old_usize);
|
|
|
|
junk_alloc_callback(excess_start, excess_len);
|
2020-02-29 03:37:39 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
label_not_resized:
|
2018-04-20 07:19:38 +08:00
|
|
|
if (unlikely(!tsd_fast(tsd))) {
|
|
|
|
uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags};
|
|
|
|
hook_invoke_expand(hook_expand_xallocx, ptr, old_usize,
|
|
|
|
usize, (uintptr_t)usize, args);
|
|
|
|
}
|
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
UTRACE(ptr, size, ptr);
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.xallocx.exit", "result: %zu", usize);
|
2017-01-20 10:15:45 +08:00
|
|
|
return usize;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
|
|
|
JEMALLOC_ATTR(pure)
|
2018-05-03 17:40:53 +08:00
|
|
|
je_sallocx(const void *ptr, int flags) {
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
size_t usize;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2017-04-22 02:00:36 +08:00
|
|
|
assert(ptr != NULL);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsdn);
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2017-04-22 02:00:36 +08:00
|
|
|
if (config_debug || force_ivsalloc) {
|
2016-05-28 15:17:28 +08:00
|
|
|
usize = ivsalloc(tsdn, ptr);
|
2017-04-22 02:00:36 +08:00
|
|
|
assert(force_ivsalloc || usize != 0);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2017-03-17 16:25:12 +08:00
|
|
|
usize = isalloc(tsdn, ptr);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsdn);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.sallocx.exit", "result: %zu", usize);
|
2017-01-20 10:15:45 +08:00
|
|
|
return usize;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
|
2017-01-16 08:56:30 +08:00
|
|
|
je_dallocx(void *ptr, int flags) {
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2019-12-18 03:57:08 +08:00
|
|
|
tsd_t *tsd = tsd_fetch_min();
|
2017-04-12 14:13:45 +08:00
|
|
|
bool fast = tsd_fast(tsd);
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-04-13 07:16:27 +08:00
|
|
|
|
2020-05-14 05:06:43 +08:00
|
|
|
unsigned tcache_ind = mallocx_tcache_get(flags);
|
|
|
|
tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast,
|
|
|
|
/* is_alloc */ false);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, 0, 0);
|
2017-04-12 14:13:45 +08:00
|
|
|
if (likely(fast)) {
|
|
|
|
tsd_assert_fast(tsd);
|
2016-05-07 03:16:00 +08:00
|
|
|
ifree(tsd, ptr, tcache, false);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2018-04-20 06:02:53 +08:00
|
|
|
uintptr_t args_raw[3] = {(uintptr_t)ptr, flags};
|
|
|
|
hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw);
|
2016-05-07 03:16:00 +08:00
|
|
|
ifree(tsd, ptr, tcache, true);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.dallocx.exit", "");
|
2012-03-01 04:56:37 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2017-01-16 08:56:30 +08:00
|
|
|
inallocx(tsdn_t *tsdn, size_t size, int flags) {
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsdn);
|
2017-04-13 07:16:27 +08:00
|
|
|
size_t usize;
|
2020-05-14 02:16:07 +08:00
|
|
|
/* In case of out of range, let the user see it rather than fail. */
|
|
|
|
aligned_usize_get(size, MALLOCX_ALIGN_GET(flags), &usize, NULL, false);
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsdn);
|
2017-01-20 10:15:45 +08:00
|
|
|
return usize;
|
2014-09-10 01:29:26 +08:00
|
|
|
}
|
|
|
|
|
2018-10-19 04:14:04 +08:00
|
|
|
JEMALLOC_NOINLINE void
|
|
|
|
sdallocx_default(void *ptr, size_t size, int flags) {
|
2014-08-29 03:41:48 +08:00
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2017-04-13 07:16:27 +08:00
|
|
|
|
2019-12-18 03:57:08 +08:00
|
|
|
tsd_t *tsd = tsd_fetch_min();
|
2017-04-12 14:13:45 +08:00
|
|
|
bool fast = tsd_fast(tsd);
|
2017-04-13 07:16:27 +08:00
|
|
|
size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-04-13 07:16:27 +08:00
|
|
|
|
2020-05-14 05:06:43 +08:00
|
|
|
unsigned tcache_ind = mallocx_tcache_get(flags);
|
|
|
|
tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast,
|
|
|
|
/* is_alloc */ false);
|
2014-08-29 03:41:48 +08:00
|
|
|
|
|
|
|
UTRACE(ptr, 0, 0);
|
2017-04-12 14:13:45 +08:00
|
|
|
if (likely(fast)) {
|
|
|
|
tsd_assert_fast(tsd);
|
2017-03-17 17:45:12 +08:00
|
|
|
isfree(tsd, ptr, usize, tcache, false);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2018-04-20 06:02:53 +08:00
|
|
|
uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags};
|
|
|
|
hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw);
|
2017-03-17 17:45:12 +08:00
|
|
|
isfree(tsd, ptr, usize, tcache, true);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2018-10-19 04:14:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
|
|
|
|
je_sdallocx(void *ptr, size_t size, int flags) {
|
|
|
|
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
|
|
|
|
size, flags);
|
|
|
|
|
2019-11-16 14:47:49 +08:00
|
|
|
if (flags != 0 || !free_fastpath(ptr, size, true)) {
|
2018-10-19 04:14:04 +08:00
|
|
|
sdallocx_default(ptr, size, flags);
|
|
|
|
}
|
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.sdallocx.exit", "");
|
2014-08-29 03:41:48 +08:00
|
|
|
}
|
|
|
|
|
2019-03-09 03:50:30 +08:00
|
|
|
void JEMALLOC_NOTHROW
|
|
|
|
je_sdallocx_noflags(void *ptr, size_t size) {
|
|
|
|
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr,
|
|
|
|
size);
|
|
|
|
|
|
|
|
if (!free_fastpath(ptr, size, true)) {
|
|
|
|
sdallocx_default(ptr, size, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG("core.sdallocx.exit", "");
|
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
|
|
|
JEMALLOC_ATTR(pure)
|
2017-01-16 08:56:30 +08:00
|
|
|
je_nallocx(size_t size, int flags) {
|
2016-02-26 07:29:49 +08:00
|
|
|
size_t usize;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
2012-03-01 04:56:37 +08:00
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(malloc_init())) {
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.nallocx.exit", "result: %zu", ZU(0));
|
2017-01-20 10:15:45 +08:00
|
|
|
return 0;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-03-01 04:56:37 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsdn);
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = inallocx(tsdn, size, flags);
|
2018-07-12 07:05:58 +08:00
|
|
|
if (unlikely(usize > SC_LARGE_MAXCLASS)) {
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.nallocx.exit", "result: %zu", ZU(0));
|
2017-01-20 10:15:45 +08:00
|
|
|
return 0;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-02-26 07:29:49 +08:00
|
|
|
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsdn);
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.nallocx.exit", "result: %zu", usize);
|
2017-01-20 10:15:45 +08:00
|
|
|
return usize;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t newlen) {
|
2016-04-14 14:36:15 +08:00
|
|
|
int ret;
|
|
|
|
tsd_t *tsd;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallctl.entry", "name: %s", name);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(malloc_init())) {
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallctl.exit", "result: %d", EAGAIN);
|
2017-01-20 10:15:45 +08:00
|
|
|
return EAGAIN;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = tsd_fetch();
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2016-04-14 14:36:15 +08:00
|
|
|
ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-07-22 04:34:45 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallctl.exit", "result: %d", ret);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
|
2017-01-16 08:56:30 +08:00
|
|
|
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
|
2016-04-14 14:36:15 +08:00
|
|
|
int ret;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallctlnametomib.entry", "name: %s", name);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(malloc_init())) {
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
|
2017-01-20 10:15:45 +08:00
|
|
|
return EAGAIN;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2017-06-23 09:58:40 +08:00
|
|
|
tsd_t *tsd = tsd_fetch();
|
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
|
|
|
ret = ctl_nametomib(tsd, name, mibp, miblenp);
|
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallctlnametomib.exit", "result: %d", ret);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
2017-01-16 08:56:30 +08:00
|
|
|
void *newp, size_t newlen) {
|
2016-04-14 14:36:15 +08:00
|
|
|
int ret;
|
|
|
|
tsd_t *tsd;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallctlbymib.entry", "");
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(malloc_init())) {
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
|
2017-01-20 10:15:45 +08:00
|
|
|
return EAGAIN;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = tsd_fetch();
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2016-04-14 14:36:15 +08:00
|
|
|
ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.mallctlbymib.exit", "result: %d", ret);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2019-07-19 01:10:45 +08:00
|
|
|
#define STATS_PRINT_BUFSIZE 65536
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-01-16 08:56:30 +08:00
|
|
|
const char *opts) {
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.malloc_stats_print.entry", "");
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsdn);
|
2019-07-19 01:10:45 +08:00
|
|
|
|
|
|
|
if (config_debug) {
|
|
|
|
stats_print(write_cb, cbopaque, opts);
|
|
|
|
} else {
|
2020-02-04 07:56:13 +08:00
|
|
|
buf_writer_t buf_writer;
|
|
|
|
buf_writer_init(tsdn, &buf_writer, write_cb, cbopaque, NULL,
|
|
|
|
STATS_PRINT_BUFSIZE);
|
2020-03-21 01:48:55 +08:00
|
|
|
stats_print(buf_writer_cb, &buf_writer, opts);
|
2020-02-04 07:56:13 +08:00
|
|
|
buf_writer_terminate(tsdn, &buf_writer);
|
2019-07-19 01:10:45 +08:00
|
|
|
}
|
|
|
|
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsdn);
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.malloc_stats_print.exit", "");
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
2019-07-19 01:10:45 +08:00
|
|
|
#undef STATS_PRINT_BUFSIZE
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
2017-01-16 08:56:30 +08:00
|
|
|
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
size_t ret;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
|
2017-07-20 09:05:28 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsdn);
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2017-04-22 02:00:36 +08:00
|
|
|
if (unlikely(ptr == NULL)) {
|
|
|
|
ret = 0;
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2017-04-22 02:00:36 +08:00
|
|
|
if (config_debug || force_ivsalloc) {
|
|
|
|
ret = ivsalloc(tsdn, ptr);
|
|
|
|
assert(force_ivsalloc || ret != 0);
|
|
|
|
} else {
|
|
|
|
ret = isalloc(tsdn, ptr);
|
|
|
|
}
|
2016-03-24 11:29:33 +08:00
|
|
|
}
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2017-06-01 00:43:43 +08:00
|
|
|
check_entry_exit_locking(tsdn);
|
2017-10-03 08:48:03 +08:00
|
|
|
LOG("core.malloc_usable_size.exit", "result: %zu", ret);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2020-04-24 06:46:45 +08:00
|
|
|
static void
|
|
|
|
batch_alloc_prof_sample_assert(tsd_t *tsd, size_t batch, size_t usize) {
|
|
|
|
assert(config_prof && opt_prof);
|
|
|
|
bool prof_sample_event = te_prof_sample_event_lookahead(tsd,
|
|
|
|
batch * usize);
|
|
|
|
assert(!prof_sample_event);
|
|
|
|
size_t surplus;
|
|
|
|
prof_sample_event = te_prof_sample_event_lookahead_surplus(tsd,
|
|
|
|
(batch + 1) * usize, &surplus);
|
|
|
|
assert(prof_sample_event);
|
|
|
|
assert(surplus < usize);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
batch_alloc(void **ptrs, size_t num, size_t size, int flags) {
|
|
|
|
LOG("core.batch_alloc.entry",
|
|
|
|
"ptrs: %p, num: %zu, size: %zu, flags: %d", ptrs, num, size, flags);
|
|
|
|
|
|
|
|
tsd_t *tsd = tsd_fetch();
|
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
|
|
|
|
|
|
|
size_t filled = 0;
|
|
|
|
|
|
|
|
if (unlikely(tsd == NULL || tsd_reentrancy_level_get(tsd) > 0)) {
|
|
|
|
goto label_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
|
|
|
size_t usize;
|
|
|
|
if (aligned_usize_get(size, alignment, &usize, NULL, false)) {
|
|
|
|
goto label_done;
|
|
|
|
}
|
|
|
|
szind_t ind = sz_size2index(usize);
|
|
|
|
bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
|
|
|
|
|
2020-11-14 09:15:35 +08:00
|
|
|
/*
|
|
|
|
* The cache bin and arena will be lazily initialized; it's hard to
|
|
|
|
* know in advance whether each of them needs to be initialized.
|
|
|
|
*/
|
2020-11-13 06:54:25 +08:00
|
|
|
cache_bin_t *bin = NULL;
|
|
|
|
arena_t *arena = NULL;
|
2020-11-14 09:15:35 +08:00
|
|
|
|
2020-11-13 06:54:25 +08:00
|
|
|
size_t nregs = 0;
|
|
|
|
if (likely(ind < SC_NBINS)) {
|
|
|
|
nregs = bin_infos[ind].nregs;
|
|
|
|
assert(nregs > 0);
|
2020-04-24 06:46:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
while (filled < num) {
|
|
|
|
size_t batch = num - filled;
|
|
|
|
size_t surplus = SIZE_MAX; /* Dead store. */
|
|
|
|
bool prof_sample_event = config_prof && opt_prof
|
|
|
|
&& te_prof_sample_event_lookahead_surplus(tsd,
|
|
|
|
batch * usize, &surplus);
|
|
|
|
|
|
|
|
if (prof_sample_event) {
|
|
|
|
/*
|
|
|
|
* Adjust so that the batch does not trigger prof
|
|
|
|
* sampling.
|
|
|
|
*/
|
|
|
|
batch -= surplus / usize + 1;
|
|
|
|
batch_alloc_prof_sample_assert(tsd, batch, usize);
|
|
|
|
}
|
|
|
|
|
2020-11-13 06:54:25 +08:00
|
|
|
size_t progress = 0;
|
|
|
|
|
|
|
|
if (likely(ind < SC_NBINS) && batch >= nregs) {
|
|
|
|
if (arena == NULL) {
|
|
|
|
unsigned arena_ind = mallocx_arena_get(flags);
|
|
|
|
if (arena_get_from_ind(tsd, arena_ind,
|
|
|
|
&arena)) {
|
|
|
|
goto label_done;
|
|
|
|
}
|
|
|
|
if (arena == NULL) {
|
|
|
|
arena = arena_choose(tsd, NULL);
|
|
|
|
}
|
|
|
|
if (unlikely(arena == NULL)) {
|
|
|
|
goto label_done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
size_t arena_batch = batch - batch % nregs;
|
|
|
|
size_t n = arena_fill_small_fresh(tsd_tsdn(tsd), arena,
|
|
|
|
ind, ptrs + filled, arena_batch, zero);
|
|
|
|
progress += n;
|
|
|
|
filled += n;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(ind < nhbins) && progress < batch) {
|
|
|
|
if (bin == NULL) {
|
|
|
|
unsigned tcache_ind = mallocx_tcache_get(flags);
|
|
|
|
tcache_t *tcache = tcache_get_from_ind(tsd,
|
|
|
|
tcache_ind, /* slow */ true,
|
|
|
|
/* is_alloc */ true);
|
|
|
|
if (tcache != NULL) {
|
|
|
|
bin = &tcache->bins[ind];
|
|
|
|
}
|
|
|
|
}
|
2020-11-14 09:15:35 +08:00
|
|
|
/*
|
|
|
|
* If we don't have a tcache bin, we don't want to
|
|
|
|
* immediately give up, because there's the possibility
|
|
|
|
* that the user explicitly requested to bypass the
|
|
|
|
* tcache, or that the user explicitly turned off the
|
|
|
|
* tcache; in such cases, we go through the slow path,
|
|
|
|
* i.e. the mallocx() call at the end of the while loop.
|
|
|
|
*/
|
2020-11-13 06:54:25 +08:00
|
|
|
if (bin != NULL) {
|
|
|
|
size_t bin_batch = batch - progress;
|
2020-11-14 09:15:35 +08:00
|
|
|
/*
|
|
|
|
* n can be less than bin_batch, meaning that
|
|
|
|
* the cache bin does not have enough memory.
|
|
|
|
* In such cases, we rely on the slow path,
|
|
|
|
* i.e. the mallocx() call at the end of the
|
|
|
|
* while loop, to fill in the cache, and in the
|
|
|
|
* next iteration of the while loop, the tcache
|
|
|
|
* will contain a lot of memory, and we can
|
|
|
|
* harvest them here. Compared to the
|
|
|
|
* alternative approach where we directly go to
|
|
|
|
* the arena bins here, the overhead of our
|
|
|
|
* current approach should usually be minimal,
|
|
|
|
* since we never try to fetch more memory than
|
|
|
|
* what a slab contains via the tcache. An
|
|
|
|
* additional benefit is that the tcache will
|
|
|
|
* not be empty for the next allocation request.
|
|
|
|
*/
|
2020-11-13 06:54:25 +08:00
|
|
|
size_t n = cache_bin_alloc_batch(bin, bin_batch,
|
|
|
|
ptrs + filled);
|
|
|
|
if (config_stats) {
|
|
|
|
bin->tstats.nrequests += n;
|
|
|
|
}
|
|
|
|
if (zero) {
|
|
|
|
for (size_t i = 0; i < n; ++i) {
|
|
|
|
memset(ptrs[filled + i], 0,
|
|
|
|
usize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (config_prof && opt_prof
|
|
|
|
&& unlikely(ind >= SC_NBINS)) {
|
|
|
|
for (size_t i = 0; i < n; ++i) {
|
|
|
|
prof_tctx_reset_sampled(tsd,
|
|
|
|
ptrs[filled + i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
progress += n;
|
|
|
|
filled += n;
|
|
|
|
}
|
|
|
|
}
|
2020-04-24 06:46:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For thread events other than prof sampling, trigger them as
|
|
|
|
* if there's a single allocation of size (n * usize). This is
|
|
|
|
* fine because:
|
|
|
|
* (a) these events do not alter the allocation itself, and
|
|
|
|
* (b) it's possible that some event would have been triggered
|
|
|
|
* multiple times, instead of only once, if the allocations
|
|
|
|
* were handled individually, but it would do no harm (or
|
|
|
|
* even be beneficial) to coalesce the triggerings.
|
|
|
|
*/
|
2020-11-13 06:54:25 +08:00
|
|
|
thread_alloc_event(tsd, progress * usize);
|
2020-04-24 06:46:45 +08:00
|
|
|
|
2020-11-13 06:54:25 +08:00
|
|
|
if (progress < batch || prof_sample_event) {
|
2020-04-24 06:46:45 +08:00
|
|
|
void *p = je_mallocx(size, flags);
|
|
|
|
if (p == NULL) { /* OOM */
|
|
|
|
break;
|
|
|
|
}
|
2020-11-13 06:54:25 +08:00
|
|
|
if (progress == batch) {
|
|
|
|
assert(prof_sampled(tsd, p));
|
|
|
|
}
|
2020-04-24 06:46:45 +08:00
|
|
|
ptrs[filled++] = p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
label_done:
|
|
|
|
check_entry_exit_locking(tsd_tsdn(tsd));
|
|
|
|
LOG("core.batch_alloc.exit", "result: %zu", filled);
|
|
|
|
return filled;
|
|
|
|
}
|
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
/*
|
|
|
|
* End non-standard functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* The following functions are used by threading libraries for protection of
|
2010-09-21 02:24:24 +08:00
|
|
|
* malloc during fork().
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2012-10-10 05:46:22 +08:00
|
|
|
/*
|
|
|
|
* If an application creates a thread before doing any allocation in the main
|
|
|
|
* thread, then calls fork(2) in the main thread followed by memory allocation
|
|
|
|
* in the child process, a race can occur that results in deadlock within the
|
|
|
|
* child: the main thread may have forked while the created thread had
|
|
|
|
* partially initialized the allocator. Ordinarily jemalloc prevents
|
|
|
|
* fork/malloc races via the following functions it registers during
|
|
|
|
* initialization using pthread_atfork(), but of course that does no good if
|
|
|
|
* the allocator isn't fully initialized at fork time. The following library
|
2014-10-11 09:19:20 +08:00
|
|
|
* constructor is a partial solution to this problem. It may still be possible
|
|
|
|
* to trigger the deadlock described above, but doing so would involve forking
|
|
|
|
* via a library constructor that runs before jemalloc's runs.
|
2012-10-10 05:46:22 +08:00
|
|
|
*/
|
2016-05-08 03:42:31 +08:00
|
|
|
#ifndef JEMALLOC_JET
|
2012-10-10 05:46:22 +08:00
|
|
|
JEMALLOC_ATTR(constructor)
|
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
jemalloc_constructor(void) {
|
2012-10-10 05:46:22 +08:00
|
|
|
malloc_init();
|
|
|
|
}
|
2016-05-08 03:42:31 +08:00
|
|
|
#endif
|
2012-10-10 05:46:22 +08:00
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifndef JEMALLOC_MUTEX_INIT_CB
|
2010-09-06 01:35:13 +08:00
|
|
|
void
|
2009-06-23 08:44:33 +08:00
|
|
|
jemalloc_prefork(void)
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-30 18:38:29 +08:00
|
|
|
JEMALLOC_EXPORT void
|
2012-02-03 14:04:57 +08:00
|
|
|
_malloc_prefork(void)
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd_t *tsd;
|
2016-04-26 14:14:40 +08:00
|
|
|
unsigned i, j, narenas;
|
|
|
|
arena_t *arena;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-05-12 08:40:16 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!malloc_initialized()) {
|
2012-05-12 08:40:16 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-05-12 08:40:16 +08:00
|
|
|
#endif
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized());
|
2012-05-12 08:40:16 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = tsd_fetch();
|
|
|
|
|
2016-04-26 14:14:40 +08:00
|
|
|
narenas = narenas_total_get();
|
|
|
|
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_prefork(tsd_witness_tsdp_get(tsd));
|
2016-04-27 01:47:22 +08:00
|
|
|
/* Acquire all mutexes in a safe order. */
|
2016-05-11 13:21:10 +08:00
|
|
|
ctl_prefork(tsd_tsdn(tsd));
|
2017-01-30 13:32:39 +08:00
|
|
|
tcache_prefork(tsd_tsdn(tsd));
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
|
2017-03-18 03:42:33 +08:00
|
|
|
if (have_background_thread) {
|
|
|
|
background_thread_prefork0(tsd_tsdn(tsd));
|
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_prefork0(tsd_tsdn(tsd));
|
2017-03-18 03:42:33 +08:00
|
|
|
if (have_background_thread) {
|
|
|
|
background_thread_prefork1(tsd_tsdn(tsd));
|
|
|
|
}
|
2017-03-09 05:00:42 +08:00
|
|
|
/* Break arena prefork into stages to preserve lock order. */
|
2020-10-17 04:14:59 +08:00
|
|
|
for (i = 0; i < 9; i++) {
|
2016-04-26 14:14:40 +08:00
|
|
|
for (j = 0; j < narenas; j++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
|
|
|
|
NULL) {
|
2016-04-26 14:14:40 +08:00
|
|
|
switch (i) {
|
2016-05-11 13:21:10 +08:00
|
|
|
case 0:
|
|
|
|
arena_prefork0(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
arena_prefork1(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
arena_prefork2(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
2017-03-09 05:00:42 +08:00
|
|
|
case 3:
|
|
|
|
arena_prefork3(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
arena_prefork4(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
arena_prefork5(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
arena_prefork6(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
2017-06-30 07:01:35 +08:00
|
|
|
case 7:
|
|
|
|
arena_prefork7(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
2020-10-17 04:14:59 +08:00
|
|
|
case 8:
|
|
|
|
arena_prefork8(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
2016-04-26 14:14:40 +08:00
|
|
|
default: not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-08-15 04:36:41 +08:00
|
|
|
|
2010-01-25 09:56:48 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_prefork1(tsd_tsdn(tsd));
|
2020-04-16 06:09:32 +08:00
|
|
|
stats_prefork(tsd_tsdn(tsd));
|
2018-07-27 05:42:37 +08:00
|
|
|
tsd_prefork(tsd);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifndef JEMALLOC_MUTEX_INIT_CB
|
2010-09-06 01:35:13 +08:00
|
|
|
void
|
2012-03-14 07:31:41 +08:00
|
|
|
jemalloc_postfork_parent(void)
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-30 18:38:29 +08:00
|
|
|
JEMALLOC_EXPORT void
|
2012-02-03 14:04:57 +08:00
|
|
|
_malloc_postfork(void)
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd_t *tsd;
|
2016-02-25 15:58:10 +08:00
|
|
|
unsigned i, narenas;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-05-12 08:40:16 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!malloc_initialized()) {
|
2012-05-12 08:40:16 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-05-12 08:40:16 +08:00
|
|
|
#endif
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized());
|
2012-05-12 08:40:16 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = tsd_fetch();
|
|
|
|
|
2018-07-27 05:42:37 +08:00
|
|
|
tsd_postfork_parent(tsd);
|
|
|
|
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_postfork_parent(tsd_witness_tsdp_get(tsd));
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Release all mutexes, now that fork() has completed. */
|
2020-04-16 06:09:32 +08:00
|
|
|
stats_postfork_parent(tsd_tsdn(tsd));
|
2016-02-25 15:58:10 +08:00
|
|
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
|
|
|
arena_t *arena;
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_postfork_parent(tsd_tsdn(tsd), arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_postfork_parent(tsd_tsdn(tsd));
|
2017-03-18 03:42:33 +08:00
|
|
|
if (have_background_thread) {
|
|
|
|
background_thread_postfork_parent(tsd_tsdn(tsd));
|
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
|
2017-01-30 13:32:39 +08:00
|
|
|
tcache_postfork_parent(tsd_tsdn(tsd));
|
2016-05-11 13:21:10 +08:00
|
|
|
ctl_postfork_parent(tsd_tsdn(tsd));
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-03-14 07:31:41 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
jemalloc_postfork_child(void) {
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd_t *tsd;
|
2016-02-25 15:58:10 +08:00
|
|
|
unsigned i, narenas;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized());
|
2012-05-12 08:40:16 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = tsd_fetch();
|
|
|
|
|
2018-07-27 05:42:37 +08:00
|
|
|
tsd_postfork_child(tsd);
|
|
|
|
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_postfork_child(tsd_witness_tsdp_get(tsd));
|
2012-03-14 07:31:41 +08:00
|
|
|
/* Release all mutexes, now that fork() has completed. */
|
2020-04-16 06:09:32 +08:00
|
|
|
stats_postfork_child(tsd_tsdn(tsd));
|
2016-02-25 15:58:10 +08:00
|
|
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
|
|
|
arena_t *arena;
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_postfork_child(tsd_tsdn(tsd), arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_postfork_child(tsd_tsdn(tsd));
|
2017-03-18 03:42:33 +08:00
|
|
|
if (have_background_thread) {
|
|
|
|
background_thread_postfork_child(tsd_tsdn(tsd));
|
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
|
2017-01-30 13:32:39 +08:00
|
|
|
tcache_postfork_child(tsd_tsdn(tsd));
|
2016-05-11 13:21:10 +08:00
|
|
|
ctl_postfork_child(tsd_tsdn(tsd));
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|