2017-04-11 09:17:55 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/assert.h"
|
2020-03-10 05:44:11 +08:00
|
|
|
#include "jemalloc/internal/decay.h"
|
2017-12-21 09:21:56 +08:00
|
|
|
#include "jemalloc/internal/div.h"
|
2019-12-03 06:19:22 +08:00
|
|
|
#include "jemalloc/internal/ehooks.h"
|
2017-05-24 05:36:09 +08:00
|
|
|
#include "jemalloc/internal/extent_dss.h"
|
2017-05-24 05:42:32 +08:00
|
|
|
#include "jemalloc/internal/extent_mmap.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-05-24 05:26:31 +08:00
|
|
|
#include "jemalloc/internal/rtree.h"
|
2019-03-23 03:53:11 +08:00
|
|
|
#include "jemalloc/internal/safety_check.h"
|
2017-04-12 04:31:16 +08:00
|
|
|
#include "jemalloc/internal/util.h"
|
|
|
|
|
2018-05-03 17:40:53 +08:00
|
|
|
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
/*
|
|
|
|
* Define names for both unininitialized and initialized phases, so that
|
|
|
|
* options and mallctl processing are straightforward.
|
|
|
|
*/
|
2017-03-18 03:42:33 +08:00
|
|
|
const char *percpu_arena_mode_names[] = {
|
2017-06-01 07:45:14 +08:00
|
|
|
"percpu",
|
|
|
|
"phycpu",
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
"disabled",
|
|
|
|
"percpu",
|
|
|
|
"phycpu"
|
|
|
|
};
|
2017-06-01 07:45:14 +08:00
|
|
|
percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
|
|
|
|
ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
|
2017-04-05 08:22:24 +08:00
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
static atomic_zd_t dirty_decay_ms_default;
|
|
|
|
static atomic_zd_t muzzy_decay_ms_default;
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_t arena_emap_global;
|
2020-08-15 04:36:41 +08:00
|
|
|
hpa_t arena_hpa_global;
|
2020-03-15 01:49:34 +08:00
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
|
2017-05-26 04:33:34 +08:00
|
|
|
#define STEP(step, h, x, y) \
|
2017-03-18 03:42:33 +08:00
|
|
|
h,
|
|
|
|
SMOOTHSTEP
|
|
|
|
#undef STEP
|
|
|
|
};
|
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
static div_info_t arena_binind_div_info[SC_NBINS];
|
2017-12-21 09:21:56 +08:00
|
|
|
|
2019-01-25 08:15:04 +08:00
|
|
|
size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
|
|
|
|
size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
|
2018-05-22 04:33:48 +08:00
|
|
|
static unsigned huge_arena_ind;
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
2014-01-15 08:23:03 +08:00
|
|
|
/*
|
|
|
|
* Function prototypes for static functions that are referenced prior to
|
|
|
|
* definition.
|
|
|
|
*/
|
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
bool is_background_thread, bool all);
|
2019-12-10 06:36:45 +08:00
|
|
|
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
|
2017-10-02 08:22:06 +08:00
|
|
|
bin_t *bin);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2017-02-13 09:43:33 +08:00
|
|
|
void
|
2018-05-03 17:40:53 +08:00
|
|
|
arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
2017-05-18 01:47:00 +08:00
|
|
|
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
|
|
|
size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
|
2017-02-13 09:43:33 +08:00
|
|
|
*nthreads += arena_nthreads_get(arena, false);
|
|
|
|
*dss = dss_prec_names[arena_dss_prec_get(arena)];
|
2020-06-04 09:30:33 +08:00
|
|
|
*dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty);
|
|
|
|
*muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy);
|
2020-03-13 00:28:13 +08:00
|
|
|
pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy);
|
2017-02-13 09:43:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
2017-05-18 01:47:00 +08:00
|
|
|
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
2019-11-06 12:43:59 +08:00
|
|
|
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
|
2020-06-02 08:42:27 +08:00
|
|
|
pac_estats_t *estats) {
|
2017-02-13 09:43:33 +08:00
|
|
|
cassert(config_stats);
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
|
|
|
|
muzzy_decay_ms, nactive, ndirty, nmuzzy);
|
2017-02-13 09:43:33 +08:00
|
|
|
|
2017-08-26 04:24:49 +08:00
|
|
|
size_t base_allocated, base_resident, base_mapped, metadata_thp;
|
2017-02-13 09:43:33 +08:00
|
|
|
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
|
2017-08-26 04:24:49 +08:00
|
|
|
&base_mapped, &metadata_thp);
|
2020-06-02 08:42:27 +08:00
|
|
|
size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac);
|
|
|
|
astats->mapped += base_mapped + pac_mapped_sz;
|
2020-03-13 01:28:18 +08:00
|
|
|
astats->resident += base_resident;
|
2017-02-13 09:43:33 +08:00
|
|
|
|
2020-03-09 11:43:41 +08:00
|
|
|
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
|
2017-02-13 09:43:33 +08:00
|
|
|
|
2020-03-12 10:24:05 +08:00
|
|
|
astats->base += base_allocated;
|
2020-03-09 11:43:41 +08:00
|
|
|
atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
|
2020-03-12 10:24:05 +08:00
|
|
|
astats->metadata_thp += metadata_thp;
|
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
|
2020-03-09 11:43:41 +08:00
|
|
|
uint64_t nmalloc = locked_read_u64(tsdn,
|
|
|
|
LOCKEDINT_MTX(arena->stats.mtx),
|
2017-02-13 09:43:33 +08:00
|
|
|
&arena->stats.lstats[i].nmalloc);
|
2020-03-09 11:43:41 +08:00
|
|
|
locked_inc_u64_unsynchronized(&lstats[i].nmalloc, nmalloc);
|
2020-03-12 23:34:47 +08:00
|
|
|
astats->nmalloc_large += nmalloc;
|
2017-02-14 01:44:46 +08:00
|
|
|
|
2020-03-09 11:43:41 +08:00
|
|
|
uint64_t ndalloc = locked_read_u64(tsdn,
|
|
|
|
LOCKEDINT_MTX(arena->stats.mtx),
|
2017-02-13 09:43:33 +08:00
|
|
|
&arena->stats.lstats[i].ndalloc);
|
2020-03-09 11:43:41 +08:00
|
|
|
locked_inc_u64_unsynchronized(&lstats[i].ndalloc, ndalloc);
|
2020-03-12 23:34:47 +08:00
|
|
|
astats->ndalloc_large += ndalloc;
|
2017-02-14 01:44:46 +08:00
|
|
|
|
2020-03-09 11:43:41 +08:00
|
|
|
uint64_t nrequests = locked_read_u64(tsdn,
|
|
|
|
LOCKEDINT_MTX(arena->stats.mtx),
|
2017-02-13 09:43:33 +08:00
|
|
|
&arena->stats.lstats[i].nrequests);
|
2020-03-09 11:43:41 +08:00
|
|
|
locked_inc_u64_unsynchronized(&lstats[i].nrequests,
|
2017-03-09 07:56:31 +08:00
|
|
|
nmalloc + nrequests);
|
2020-03-12 23:34:47 +08:00
|
|
|
astats->nrequests_large += nmalloc + nrequests;
|
2017-02-14 01:44:46 +08:00
|
|
|
|
2019-05-07 07:36:55 +08:00
|
|
|
/* nfill == nmalloc for large currently. */
|
2020-03-09 11:43:41 +08:00
|
|
|
locked_inc_u64_unsynchronized(&lstats[i].nfills, nmalloc);
|
2020-03-12 23:34:47 +08:00
|
|
|
astats->nfills_large += nmalloc;
|
2019-05-07 07:36:55 +08:00
|
|
|
|
2020-03-09 11:43:41 +08:00
|
|
|
uint64_t nflush = locked_read_u64(tsdn,
|
|
|
|
LOCKEDINT_MTX(arena->stats.mtx),
|
2019-05-07 07:36:55 +08:00
|
|
|
&arena->stats.lstats[i].nflushes);
|
2020-03-09 11:43:41 +08:00
|
|
|
locked_inc_u64_unsynchronized(&lstats[i].nflushes, nflush);
|
2020-03-12 23:34:47 +08:00
|
|
|
astats->nflushes_large += nflush;
|
2019-05-07 07:36:55 +08:00
|
|
|
|
2017-03-04 12:44:39 +08:00
|
|
|
assert(nmalloc >= ndalloc);
|
|
|
|
assert(nmalloc - ndalloc <= SIZE_T_MAX);
|
|
|
|
size_t curlextents = (size_t)(nmalloc - ndalloc);
|
2017-02-14 01:44:46 +08:00
|
|
|
lstats[i].curlextents += curlextents;
|
2020-03-12 23:34:47 +08:00
|
|
|
astats->allocated_large +=
|
|
|
|
curlextents * sz_index2size(SC_NBINS + i);
|
2017-02-13 09:43:33 +08:00
|
|
|
}
|
2018-08-01 00:49:49 +08:00
|
|
|
|
2020-03-13 01:28:18 +08:00
|
|
|
pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats,
|
|
|
|
estats, &astats->resident);
|
2017-02-13 09:43:33 +08:00
|
|
|
|
2020-03-09 11:43:41 +08:00
|
|
|
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
2017-02-13 09:43:33 +08:00
|
|
|
|
2017-04-21 08:21:37 +08:00
|
|
|
/* tcache_bytes counts currently cached bytes. */
|
2020-03-12 23:34:47 +08:00
|
|
|
astats->tcache_bytes = 0;
|
2017-04-21 08:21:37 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
2017-08-12 08:34:21 +08:00
|
|
|
cache_bin_array_descriptor_t *descriptor;
|
|
|
|
ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
|
2020-04-08 11:04:46 +08:00
|
|
|
for (szind_t i = 0; i < nhbins; i++) {
|
|
|
|
cache_bin_t *cache_bin = &descriptor->bins[i];
|
2020-03-12 23:34:47 +08:00
|
|
|
astats->tcache_bytes +=
|
2020-04-08 11:04:46 +08:00
|
|
|
cache_bin_ncached_get(cache_bin,
|
|
|
|
&tcache_bin_info[i]) * sz_index2size(i);
|
2017-02-13 09:43:33 +08:00
|
|
|
}
|
|
|
|
}
|
2017-04-21 08:21:37 +08:00
|
|
|
malloc_mutex_prof_read(tsdn,
|
|
|
|
&astats->mutex_prof_data[arena_prof_mutex_tcache_list],
|
|
|
|
&arena->tcache_ql_mtx);
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
|
2017-02-13 09:43:33 +08:00
|
|
|
|
2017-03-22 02:56:38 +08:00
|
|
|
#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
|
2017-03-12 12:28:31 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->mtx); \
|
2017-03-22 02:56:38 +08:00
|
|
|
malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
|
|
|
|
&arena->mtx); \
|
2017-03-12 12:28:31 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->mtx);
|
2017-03-11 04:14:05 +08:00
|
|
|
|
|
|
|
/* Gather per arena mutex profiling data. */
|
2017-03-22 02:56:38 +08:00
|
|
|
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
|
2017-03-22 16:49:56 +08:00
|
|
|
READ_ARENA_MUTEX_PROF_DATA(base->mtx,
|
2020-03-13 06:26:50 +08:00
|
|
|
arena_prof_mutex_base);
|
2017-03-11 04:14:05 +08:00
|
|
|
#undef READ_ARENA_MUTEX_PROF_DATA
|
2020-03-13 06:26:50 +08:00
|
|
|
pa_shard_mtx_stats_read(tsdn, &arena->pa_shard,
|
|
|
|
astats->mutex_prof_data);
|
2017-03-11 04:14:05 +08:00
|
|
|
|
2017-05-17 04:56:00 +08:00
|
|
|
nstime_copy(&astats->uptime, &arena->create_time);
|
|
|
|
nstime_update(&astats->uptime);
|
|
|
|
nstime_subtract(&astats->uptime, &arena->create_time);
|
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
for (szind_t i = 0; i < SC_NBINS; i++) {
|
2018-11-13 07:56:04 +08:00
|
|
|
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
|
|
|
|
bin_stats_merge(tsdn, &bstats[i],
|
|
|
|
&arena->bins[i].bin_shards[j]);
|
|
|
|
}
|
2017-02-13 09:43:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-12 02:36:38 +08:00
|
|
|
void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena) {
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
2015-02-16 10:04:46 +08:00
|
|
|
|
2020-06-04 09:30:33 +08:00
|
|
|
if (arena_decay_ms_get(arena, extent_state_dirty) == 0) {
|
2017-03-18 03:42:33 +08:00
|
|
|
arena_decay_dirty(tsdn, arena, false, true);
|
2017-05-23 06:26:25 +08:00
|
|
|
} else {
|
2017-06-23 04:57:50 +08:00
|
|
|
arena_background_thread_inactivity_check(tsdn, arena, false);
|
2017-03-02 07:25:48 +08:00
|
|
|
}
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static void *
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
|
2010-01-17 01:53:50 +08:00
|
|
|
void *ret;
|
2019-12-10 06:36:45 +08:00
|
|
|
slab_data_t *slab_data = edata_slab_data_get(slab);
|
2016-02-26 12:51:00 +08:00
|
|
|
size_t regind;
|
2016-05-30 09:34:50 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(edata_nfree_get(slab) > 0);
|
2016-05-30 09:34:50 +08:00
|
|
|
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
|
|
|
|
|
2016-11-16 15:56:29 +08:00
|
|
|
regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
|
2019-12-10 06:36:45 +08:00
|
|
|
ret = (void *)((uintptr_t)edata_addr_get(slab) +
|
2016-05-30 09:34:50 +08:00
|
|
|
(uintptr_t)(bin_info->reg_size * regind));
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_nfree_dec(slab);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
|
|
|
|
2018-10-19 00:49:45 +08:00
|
|
|
static void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
|
2018-10-19 00:49:45 +08:00
|
|
|
unsigned cnt, void** ptrs) {
|
2019-12-10 06:36:45 +08:00
|
|
|
slab_data_t *slab_data = edata_slab_data_get(slab);
|
2018-10-19 00:49:45 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(edata_nfree_get(slab) >= cnt);
|
2018-10-19 00:49:45 +08:00
|
|
|
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
|
|
|
|
|
2018-10-30 07:01:09 +08:00
|
|
|
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
|
2018-10-19 00:49:45 +08:00
|
|
|
for (unsigned i = 0; i < cnt; i++) {
|
2018-10-30 07:01:09 +08:00
|
|
|
size_t regind = bitmap_sfu(slab_data->bitmap,
|
|
|
|
&bin_info->bitmap_info);
|
2019-12-10 06:36:45 +08:00
|
|
|
*(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) +
|
2018-10-19 00:49:45 +08:00
|
|
|
(uintptr_t)(bin_info->reg_size * regind));
|
2018-10-30 07:01:09 +08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
unsigned group = 0;
|
|
|
|
bitmap_t g = slab_data->bitmap[group];
|
|
|
|
unsigned i = 0;
|
|
|
|
while (i < cnt) {
|
|
|
|
while (g == 0) {
|
|
|
|
g = slab_data->bitmap[++group];
|
|
|
|
}
|
|
|
|
size_t shift = group << LG_BITMAP_GROUP_NBITS;
|
|
|
|
size_t pop = popcount_lu(g);
|
|
|
|
if (pop > (cnt - i)) {
|
|
|
|
pop = cnt - i;
|
|
|
|
}
|
2018-10-19 00:49:45 +08:00
|
|
|
|
2018-10-30 07:01:09 +08:00
|
|
|
/*
|
|
|
|
* Load from memory locations only once, outside the
|
|
|
|
* hot loop below.
|
|
|
|
*/
|
2019-12-10 06:36:45 +08:00
|
|
|
uintptr_t base = (uintptr_t)edata_addr_get(slab);
|
2018-10-30 07:01:09 +08:00
|
|
|
uintptr_t regsize = (uintptr_t)bin_info->reg_size;
|
|
|
|
while (pop--) {
|
|
|
|
size_t bit = cfs_lu(&g);
|
|
|
|
size_t regind = shift + bit;
|
|
|
|
*(ptrs + i) = (void *)(base + regsize * regind);
|
|
|
|
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
slab_data->bitmap[group] = g;
|
2018-10-19 00:49:45 +08:00
|
|
|
}
|
2018-10-30 07:01:09 +08:00
|
|
|
#endif
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_nfree_sub(slab, cnt);
|
2018-10-19 00:49:45 +08:00
|
|
|
}
|
|
|
|
|
2016-12-22 04:33:17 +08:00
|
|
|
#ifndef JEMALLOC_JET
|
2017-04-22 00:37:34 +08:00
|
|
|
static
|
2016-12-22 04:33:17 +08:00
|
|
|
#endif
|
|
|
|
size_t
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr) {
|
2016-12-22 04:33:17 +08:00
|
|
|
size_t diff, regind;
|
2016-05-26 07:21:37 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Freeing a pointer outside the slab can cause assertion failure. */
|
2019-12-10 06:36:45 +08:00
|
|
|
assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
|
|
|
|
assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Freeing an interior pointer can cause assertion failure. */
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
|
2017-10-02 08:22:06 +08:00
|
|
|
(uintptr_t)bin_infos[binind].reg_size == 0);
|
2016-05-26 07:21:37 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
|
2017-12-21 09:21:56 +08:00
|
|
|
|
|
|
|
/* Avoid doing division with a variable divisor. */
|
|
|
|
regind = div_compute(&arena_binind_div_info[binind], diff);
|
2016-12-22 04:33:17 +08:00
|
|
|
|
2017-10-02 08:22:06 +08:00
|
|
|
assert(regind < bin_infos[binind].nregs);
|
2016-05-26 07:21:37 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return regind;
|
2016-05-26 07:21:37 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_slab_reg_dalloc(edata_t *slab, slab_data_t *slab_data, void *ptr) {
|
|
|
|
szind_t binind = edata_szind_get(slab);
|
2017-10-02 08:22:06 +08:00
|
|
|
const bin_info_t *bin_info = &bin_infos[binind];
|
2016-12-22 04:33:17 +08:00
|
|
|
size_t regind = arena_slab_regind(slab, binind, ptr);
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(edata_nfree_get(slab) < bin_info->nregs);
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
/* Freeing an unallocated pointer can cause assertion failure. */
|
2016-05-30 09:34:50 +08:00
|
|
|
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
|
2013-01-22 12:04:42 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_nfree_inc(slab);
|
2010-10-19 08:45:40 +08:00
|
|
|
}
|
|
|
|
|
2014-10-15 13:20:00 +08:00
|
|
|
static void
|
2017-02-13 09:43:33 +08:00
|
|
|
arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
2017-01-07 10:56:02 +08:00
|
|
|
szind_t index, hindex;
|
2014-10-15 13:20:00 +08:00
|
|
|
|
|
|
|
cassert(config_stats);
|
|
|
|
|
2018-07-12 07:05:58 +08:00
|
|
|
if (usize < SC_LARGE_MINCLASS) {
|
|
|
|
usize = SC_LARGE_MINCLASS;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-31 01:45:37 +08:00
|
|
|
index = sz_size2index(usize);
|
2017-12-15 04:46:39 +08:00
|
|
|
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
|
2017-01-07 10:56:02 +08:00
|
|
|
|
2020-03-09 11:43:41 +08:00
|
|
|
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
|
2017-02-13 09:43:33 +08:00
|
|
|
&arena->stats.lstats[hindex].nmalloc, 1);
|
2014-10-15 13:20:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-02-13 09:43:33 +08:00
|
|
|
arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
2017-01-07 10:56:02 +08:00
|
|
|
szind_t index, hindex;
|
2014-10-15 13:20:00 +08:00
|
|
|
|
|
|
|
cassert(config_stats);
|
|
|
|
|
2018-07-12 07:05:58 +08:00
|
|
|
if (usize < SC_LARGE_MINCLASS) {
|
|
|
|
usize = SC_LARGE_MINCLASS;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-31 01:45:37 +08:00
|
|
|
index = sz_size2index(usize);
|
2017-12-15 04:46:39 +08:00
|
|
|
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
|
2017-01-07 10:56:02 +08:00
|
|
|
|
2020-03-09 11:43:41 +08:00
|
|
|
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
|
2017-02-13 09:43:33 +08:00
|
|
|
&arena->stats.lstats[hindex].ndalloc, 1);
|
2016-04-26 04:26:54 +08:00
|
|
|
}
|
|
|
|
|
2014-10-15 13:20:00 +08:00
|
|
|
static void
|
2017-02-13 09:43:33 +08:00
|
|
|
arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
|
|
|
|
size_t usize) {
|
|
|
|
arena_large_dalloc_stats_update(tsdn, arena, oldusize);
|
|
|
|
arena_large_malloc_stats_update(tsdn, arena, usize);
|
2015-02-19 08:40:53 +08:00
|
|
|
}
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *
|
2016-06-02 03:59:02 +08:00
|
|
|
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
2020-03-15 09:10:29 +08:00
|
|
|
size_t alignment, bool zero) {
|
2017-05-31 01:45:37 +08:00
|
|
|
szind_t szind = sz_size2index(usize);
|
2020-03-09 01:11:02 +08:00
|
|
|
size_t esize = usize + sz_large_pad;
|
2020-03-11 03:29:12 +08:00
|
|
|
|
|
|
|
edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
|
2020-03-12 09:49:15 +08:00
|
|
|
/* slab */ false, szind, zero);
|
2017-02-13 09:43:33 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
if (edata != NULL) {
|
2017-02-14 02:35:41 +08:00
|
|
|
if (config_stats) {
|
2020-03-09 11:43:41 +08:00
|
|
|
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
|
2017-02-14 02:35:41 +08:00
|
|
|
arena_large_malloc_stats_update(tsdn, arena, usize);
|
2020-03-09 11:43:41 +08:00
|
|
|
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
2017-02-14 02:35:41 +08:00
|
|
|
}
|
2020-03-09 01:11:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (edata != NULL && sz_large_pad != 0) {
|
|
|
|
arena_cache_oblivious_randomize(tsdn, arena, edata, alignment);
|
2014-10-15 13:20:00 +08:00
|
|
|
}
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
return edata;
|
2014-05-16 13:22:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
|
2014-05-16 13:22:27 +08:00
|
|
|
if (config_stats) {
|
2020-03-09 11:43:41 +08:00
|
|
|
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
|
2017-02-13 09:43:33 +08:00
|
|
|
arena_large_dalloc_stats_update(tsdn, arena,
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_usize_get(edata));
|
2020-03-09 11:43:41 +08:00
|
|
|
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
|
2014-10-15 13:20:00 +08:00
|
|
|
void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t oldusize) {
|
2019-12-10 06:36:45 +08:00
|
|
|
size_t usize = edata_usize_get(edata);
|
2010-03-15 10:43:56 +08:00
|
|
|
|
2014-10-15 13:20:00 +08:00
|
|
|
if (config_stats) {
|
2020-03-09 11:43:41 +08:00
|
|
|
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
|
2017-02-13 09:43:33 +08:00
|
|
|
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
|
2020-03-09 11:43:41 +08:00
|
|
|
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
2014-10-15 13:20:00 +08:00
|
|
|
}
|
2015-02-19 08:40:53 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t oldusize) {
|
2019-12-10 06:36:45 +08:00
|
|
|
size_t usize = edata_usize_get(edata);
|
2014-10-15 13:20:00 +08:00
|
|
|
|
|
|
|
if (config_stats) {
|
2020-03-09 11:43:41 +08:00
|
|
|
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
|
2017-02-13 09:43:33 +08:00
|
|
|
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
|
2020-03-09 11:43:41 +08:00
|
|
|
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
2014-10-15 13:20:00 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2020-03-12 06:42:29 +08:00
|
|
|
/*
|
|
|
|
* In situations where we're not forcing a decay (i.e. because the user
|
|
|
|
* specifically requested it), should we purge ourselves, or wait for the
|
|
|
|
* background thread to get to it.
|
|
|
|
*/
|
2020-06-04 09:30:33 +08:00
|
|
|
static pac_purge_eagerness_t
|
|
|
|
arena_decide_unforced_purge_eagerness(bool is_background_thread) {
|
2020-03-12 06:42:29 +08:00
|
|
|
if (is_background_thread) {
|
2020-06-04 09:30:33 +08:00
|
|
|
return PAC_PURGE_ALWAYS;
|
2020-03-12 06:42:29 +08:00
|
|
|
} else if (!is_background_thread && background_thread_enabled()) {
|
2020-06-04 09:30:33 +08:00
|
|
|
return PAC_PURGE_NEVER;
|
2020-03-12 06:42:29 +08:00
|
|
|
} else {
|
2020-06-04 09:30:33 +08:00
|
|
|
return PAC_PURGE_ON_EPOCH_ADVANCE;
|
2020-03-12 06:42:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
bool
|
2020-06-04 09:30:33 +08:00
|
|
|
arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
|
2017-05-18 01:47:00 +08:00
|
|
|
ssize_t decay_ms) {
|
2020-06-04 09:30:33 +08:00
|
|
|
pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness(
|
|
|
|
/* is_background_thread */ false);
|
|
|
|
return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms,
|
|
|
|
eagerness);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
|
2020-06-04 09:30:33 +08:00
|
|
|
ssize_t
|
|
|
|
arena_decay_ms_get(arena_t *arena, extent_state_t state) {
|
|
|
|
return pa_decay_ms_get(&arena->pa_shard, state);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 06:23:14 +08:00
|
|
|
static bool
|
2020-03-10 04:11:35 +08:00
|
|
|
arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
|
2020-06-02 08:42:27 +08:00
|
|
|
pac_decay_stats_t *decay_stats, ecache_t *ecache,
|
2020-03-10 04:11:35 +08:00
|
|
|
bool is_background_thread, bool all) {
|
2017-01-16 08:56:30 +08:00
|
|
|
if (all) {
|
2020-03-12 07:13:36 +08:00
|
|
|
malloc_mutex_lock(tsdn, &decay->mtx);
|
2020-06-04 05:43:28 +08:00
|
|
|
pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats,
|
|
|
|
ecache, /* fully_decay */ all);
|
2020-03-12 07:13:36 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &decay->mtx);
|
2017-03-18 03:42:33 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
|
|
|
/* No need to wait if another thread is in progress. */
|
|
|
|
return true;
|
|
|
|
}
|
2020-06-04 09:30:33 +08:00
|
|
|
pac_purge_eagerness_t eagerness =
|
|
|
|
arena_decide_unforced_purge_eagerness(is_background_thread);
|
2020-06-04 05:43:28 +08:00
|
|
|
bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac,
|
2020-06-04 09:30:33 +08:00
|
|
|
decay, decay_stats, ecache, eagerness);
|
2018-05-03 17:40:53 +08:00
|
|
|
size_t npages_new;
|
2017-03-18 03:42:33 +08:00
|
|
|
if (epoch_advanced) {
|
|
|
|
/* Backlog is updated on epoch advance. */
|
2020-03-10 05:44:11 +08:00
|
|
|
npages_new = decay_epoch_npages_delta(decay);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &decay->mtx);
|
2017-04-22 06:23:14 +08:00
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
if (have_background_thread && background_thread_enabled() &&
|
|
|
|
epoch_advanced && !is_background_thread) {
|
2018-04-11 06:18:58 +08:00
|
|
|
background_thread_interval_check(tsdn, arena, decay,
|
|
|
|
npages_new);
|
2017-03-18 03:42:33 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 06:23:14 +08:00
|
|
|
return false;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 06:23:14 +08:00
|
|
|
static bool
|
2017-03-18 03:42:33 +08:00
|
|
|
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
|
|
|
bool all) {
|
2020-06-02 07:35:17 +08:00
|
|
|
return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
|
2020-06-02 08:42:27 +08:00
|
|
|
&arena->pa_shard.pac.stats->decay_dirty,
|
2020-05-30 07:57:31 +08:00
|
|
|
&arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 06:23:14 +08:00
|
|
|
static bool
|
2017-03-18 03:42:33 +08:00
|
|
|
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
|
|
|
bool all) {
|
2020-03-13 02:21:22 +08:00
|
|
|
if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) {
|
2019-11-07 15:09:20 +08:00
|
|
|
return false;
|
|
|
|
}
|
2020-06-02 07:35:17 +08:00
|
|
|
return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
|
2020-06-02 08:42:27 +08:00
|
|
|
&arena->pa_shard.pac.stats->decay_muzzy,
|
2020-05-30 07:57:31 +08:00
|
|
|
&arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-18 03:42:33 +08:00
|
|
|
arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
|
|
|
|
if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
|
2017-04-22 06:23:14 +08:00
|
|
|
return;
|
|
|
|
}
|
2017-03-18 03:42:33 +08:00
|
|
|
arena_decay_muzzy(tsdn, arena, is_background_thread, all);
|
2010-10-01 07:55:08 +08:00
|
|
|
}
|
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
|
2020-03-12 02:36:38 +08:00
|
|
|
bool generated_dirty;
|
|
|
|
pa_dalloc(tsdn, &arena->pa_shard, slab, &generated_dirty);
|
|
|
|
if (generated_dirty) {
|
|
|
|
arena_handle_new_dirty_pages(tsdn, arena);
|
|
|
|
}
|
2016-05-30 09:34:50 +08:00
|
|
|
}
|
|
|
|
|
2016-11-16 02:31:06 +08:00
|
|
|
static void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) {
|
|
|
|
assert(edata_nfree_get(slab) > 0);
|
|
|
|
edata_heap_insert(&bin->slabs_nonfull, slab);
|
2019-04-12 19:08:50 +08:00
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.nonfull_slabs++;
|
|
|
|
}
|
2016-11-16 02:31:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) {
|
|
|
|
edata_heap_remove(&bin->slabs_nonfull, slab);
|
2019-04-12 19:08:50 +08:00
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.nonfull_slabs--;
|
|
|
|
}
|
2016-11-16 02:31:06 +08:00
|
|
|
}
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
static edata_t *
|
2017-10-02 08:22:06 +08:00
|
|
|
arena_bin_slabs_nonfull_tryget(bin_t *bin) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (slab == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (config_stats) {
|
2016-11-16 02:31:06 +08:00
|
|
|
bin->stats.reslabs++;
|
2019-04-12 19:08:50 +08:00
|
|
|
bin->stats.nonfull_slabs--;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return slab;
|
2016-11-16 02:31:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
|
|
|
|
assert(edata_nfree_get(slab) == 0);
|
2017-04-21 06:19:02 +08:00
|
|
|
/*
|
|
|
|
* Tracking extents is required by arena_reset, which is not allowed
|
2019-12-10 06:36:45 +08:00
|
|
|
* for auto arenas. Bypass this step to avoid touching the edata
|
2017-04-21 06:19:02 +08:00
|
|
|
* linkage (often results in cache misses) for auto arenas.
|
|
|
|
*/
|
|
|
|
if (arena_is_auto(arena)) {
|
|
|
|
return;
|
|
|
|
}
|
2020-06-12 06:15:51 +08:00
|
|
|
edata_list_active_append(&bin->slabs_full, slab);
|
2016-11-16 02:31:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
|
2017-04-21 06:19:02 +08:00
|
|
|
if (arena_is_auto(arena)) {
|
|
|
|
return;
|
|
|
|
}
|
2020-06-12 06:15:51 +08:00
|
|
|
edata_list_active_remove(&bin->slabs_full, slab);
|
2016-11-16 02:31:06 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 07:56:04 +08:00
|
|
|
static void
|
|
|
|
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *slab;
|
2018-11-13 07:56:04 +08:00
|
|
|
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
|
|
|
if (bin->slabcur != NULL) {
|
|
|
|
slab = bin->slabcur;
|
|
|
|
bin->slabcur = NULL;
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
|
|
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
|
|
|
}
|
2019-12-10 06:36:45 +08:00
|
|
|
while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
|
2018-11-13 07:56:04 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
|
|
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
|
|
|
}
|
2020-06-12 06:15:51 +08:00
|
|
|
for (slab = edata_list_active_first(&bin->slabs_full); slab != NULL;
|
|
|
|
slab = edata_list_active_first(&bin->slabs_full)) {
|
2018-11-13 07:56:04 +08:00
|
|
|
arena_bin_slabs_full_remove(arena, bin, slab);
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
|
|
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
|
|
|
}
|
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.curregs = 0;
|
|
|
|
bin->stats.curslabs = 0;
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
|
|
|
}
|
|
|
|
|
2016-04-23 05:37:17 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_reset(tsd_t *tsd, arena_t *arena) {
|
2016-04-23 05:37:17 +08:00
|
|
|
/*
|
|
|
|
* Locking in this function is unintuitive. The caller guarantees that
|
|
|
|
* no concurrent operations are happening in this arena, but there are
|
|
|
|
* still reasons that some locking is necessary:
|
|
|
|
*
|
|
|
|
* - Some of the functions in the transitive closure of calls assume
|
|
|
|
* appropriate locks are held, and in some cases these locks are
|
|
|
|
* temporarily dropped to avoid lock order reversal or deadlock due to
|
|
|
|
* reentry.
|
|
|
|
* - mallctl("epoch", ...) may concurrently refresh stats. While
|
|
|
|
* strictly speaking this is a "concurrent operation", disallowing
|
|
|
|
* stats refreshes would impose an inconvenient burden.
|
|
|
|
*/
|
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
/* Large allocations. */
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2020-06-12 06:15:51 +08:00
|
|
|
for (edata_t *edata = edata_list_active_first(&arena->large);
|
|
|
|
edata != NULL; edata = edata_list_active_first(&arena->large)) {
|
2019-12-10 06:36:45 +08:00
|
|
|
void *ptr = edata_base_get(edata);
|
2016-04-26 04:26:54 +08:00
|
|
|
size_t usize;
|
2016-04-23 05:37:17 +08:00
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
2020-02-07 05:45:04 +08:00
|
|
|
emap_alloc_ctx_t alloc_ctx;
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
|
2020-02-07 05:16:07 +08:00
|
|
|
&alloc_ctx);
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(alloc_ctx.szind != SC_NSIZES);
|
2017-04-12 09:13:10 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_stats || (config_prof && opt_prof)) {
|
2017-05-31 01:45:37 +08:00
|
|
|
usize = sz_index2size(alloc_ctx.szind);
|
2017-04-12 09:13:10 +08:00
|
|
|
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-01 05:50:21 +08:00
|
|
|
/* Remove large allocation from prof sample set. */
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2017-04-12 09:13:10 +08:00
|
|
|
prof_free(tsd, ptr, usize, &alloc_ctx);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2019-12-10 06:36:45 +08:00
|
|
|
large_dalloc(tsd_tsdn(tsd), edata);
|
2016-06-01 05:50:21 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
2016-04-23 05:37:17 +08:00
|
|
|
}
|
2016-06-01 05:50:21 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
2016-04-23 05:37:17 +08:00
|
|
|
|
|
|
|
/* Bins. */
|
2017-12-15 04:46:39 +08:00
|
|
|
for (unsigned i = 0; i < SC_NBINS; i++) {
|
2018-11-13 07:56:04 +08:00
|
|
|
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
|
|
|
|
arena_bin_reset(tsd, arena,
|
|
|
|
&arena->bins[i].bin_shards[j]);
|
2016-06-04 10:25:13 +08:00
|
|
|
}
|
2016-04-23 05:37:17 +08:00
|
|
|
}
|
2020-03-13 02:21:22 +08:00
|
|
|
pa_shard_reset(&arena->pa_shard);
|
2017-01-04 09:21:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_destroy(tsd_t *tsd, arena_t *arena) {
|
2017-01-04 09:21:59 +08:00
|
|
|
assert(base_ind_get(arena->base) >= narenas_auto);
|
|
|
|
assert(arena_nthreads_get(arena, false) == 0);
|
|
|
|
assert(arena_nthreads_get(arena, true) == 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No allocations have occurred since arena_reset() was called.
|
|
|
|
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
|
2020-03-13 02:21:22 +08:00
|
|
|
* extents, so only retained extents may remain and it's safe to call
|
|
|
|
* pa_shard_destroy_retained.
|
2017-01-04 09:21:59 +08:00
|
|
|
*/
|
2020-06-11 08:42:49 +08:00
|
|
|
pa_shard_destroy(tsd_tsdn(tsd), &arena->pa_shard);
|
2017-01-04 09:21:59 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the arena pointer from the arenas array. We rely on the fact
|
|
|
|
* that there is no way for the application to get a dirty read from the
|
|
|
|
* arenas array unless there is an inherent race in the application
|
|
|
|
* involving access of an arena being concurrently destroyed. The
|
|
|
|
* application must synchronize knowledge of the arena's validity, so as
|
|
|
|
* long as we use an atomic write to update the arenas array, the
|
|
|
|
* application will get a clean read any time after it synchronizes
|
|
|
|
* knowledge that the arena is no longer valid.
|
|
|
|
*/
|
|
|
|
arena_set(base_ind_get(arena->base), NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy the base allocator, which manages all metadata ever mapped by
|
|
|
|
* this arena.
|
|
|
|
*/
|
2017-06-23 06:36:41 +08:00
|
|
|
base_delete(tsd_tsdn(tsd), arena->base);
|
2017-01-04 09:21:59 +08:00
|
|
|
}
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
static edata_t *
|
2020-03-11 04:58:57 +08:00
|
|
|
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
|
|
|
|
const bin_info_t *bin_info) {
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2020-03-11 04:58:57 +08:00
|
|
|
edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
|
2020-03-15 09:10:29 +08:00
|
|
|
PAGE, /* slab */ true, /* szind */ binind, /* zero */ false);
|
2017-02-14 02:35:41 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
if (slab == NULL) {
|
2020-03-11 04:58:57 +08:00
|
|
|
return NULL;
|
2016-05-30 09:34:50 +08:00
|
|
|
}
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(edata_slab_get(slab));
|
2016-05-30 09:34:50 +08:00
|
|
|
|
|
|
|
/* Initialize slab internals. */
|
2019-12-10 06:36:45 +08:00
|
|
|
slab_data_t *slab_data = edata_slab_data_get(slab);
|
|
|
|
edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
|
2017-03-24 08:59:47 +08:00
|
|
|
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
|
2016-03-27 08:30:37 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return slab;
|
2012-02-14 09:36:52 +08:00
|
|
|
}
|
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
/*
|
|
|
|
* Before attempting the _with_fresh_slab approaches below, the _no_fresh_slab
|
|
|
|
* variants (i.e. through slabcur and nonfull) must be tried first.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
arena_bin_refill_slabcur_with_fresh_slab(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
bin_t *bin, szind_t binind, edata_t *fresh_slab) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &bin->lock);
|
|
|
|
/* Only called after slabcur and nonfull both failed. */
|
|
|
|
assert(bin->slabcur == NULL);
|
|
|
|
assert(edata_heap_first(&bin->slabs_nonfull) == NULL);
|
|
|
|
assert(fresh_slab != NULL);
|
|
|
|
|
|
|
|
/* A new slab from arena_slab_alloc() */
|
|
|
|
assert(edata_nfree_get(fresh_slab) == bin_infos[binind].nregs);
|
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.nslabs++;
|
|
|
|
bin->stats.curslabs++;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2020-02-08 06:53:36 +08:00
|
|
|
bin->slabcur = fresh_slab;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
/* Refill slabcur and then alloc using the fresh slab */
|
2010-01-17 01:53:50 +08:00
|
|
|
static void *
|
2020-02-08 06:53:36 +08:00
|
|
|
arena_bin_malloc_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|
|
|
szind_t binind, edata_t *fresh_slab) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &bin->lock);
|
|
|
|
arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, bin, binind,
|
|
|
|
fresh_slab);
|
2016-05-30 09:34:50 +08:00
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
|
|
|
|
}
|
2019-11-06 05:22:54 +08:00
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
static bool
|
|
|
|
arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
bin_t *bin) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &bin->lock);
|
|
|
|
/* Only called after arena_slab_reg_alloc[_batch] failed. */
|
|
|
|
assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0);
|
2016-05-30 09:34:50 +08:00
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
if (bin->slabcur != NULL) {
|
2017-04-21 06:19:02 +08:00
|
|
|
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
|
2010-03-15 10:43:56 +08:00
|
|
|
}
|
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
/* Look for a usable slab. */
|
|
|
|
bin->slabcur = arena_bin_slabs_nonfull_tryget(bin);
|
|
|
|
assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) > 0);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
return (bin->slabcur == NULL);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 07:56:04 +08:00
|
|
|
bin_t *
|
2020-04-23 08:22:43 +08:00
|
|
|
arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
|
|
|
unsigned *binshard_p) {
|
|
|
|
unsigned binshard;
|
2018-11-21 05:51:32 +08:00
|
|
|
if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
|
2020-04-23 08:22:43 +08:00
|
|
|
binshard = 0;
|
2018-11-13 07:56:04 +08:00
|
|
|
} else {
|
2020-04-23 08:22:43 +08:00
|
|
|
binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
|
2018-11-13 07:56:04 +08:00
|
|
|
}
|
2020-04-23 08:22:43 +08:00
|
|
|
assert(binshard < bin_infos[binind].n_shards);
|
|
|
|
if (binshard_p != NULL) {
|
|
|
|
*binshard_p = binshard;
|
|
|
|
}
|
|
|
|
return &arena->bins[binind].bin_shards[binshard];
|
2018-11-13 07:56:04 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
void
|
2020-04-08 08:49:50 +08:00
|
|
|
arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
|
|
|
|
const unsigned nfill) {
|
|
|
|
assert(cache_bin_ncached_get(cache_bin, cache_bin_info) == 0);
|
2018-11-13 07:56:04 +08:00
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
const bin_info_t *bin_info = &bin_infos[binind];
|
2020-02-28 02:22:46 +08:00
|
|
|
|
|
|
|
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
|
2020-04-08 08:49:50 +08:00
|
|
|
cache_bin_init_ptr_array_for_fill(cache_bin, cache_bin_info, &ptrs,
|
2020-02-29 11:12:07 +08:00
|
|
|
nfill);
|
2020-02-08 06:53:36 +08:00
|
|
|
/*
|
|
|
|
* Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
|
|
|
|
* slabs. After both are exhausted, new slabs will be allocated through
|
|
|
|
* arena_slab_alloc().
|
|
|
|
*
|
|
|
|
* Bin lock is only taken / released right before / after the while(...)
|
|
|
|
* refill loop, with new slab allocation (which has its own locking)
|
|
|
|
* kept outside of the loop. This setup facilitates flat combining, at
|
|
|
|
* the cost of the nested loop (through goto label_refill).
|
|
|
|
*
|
|
|
|
* To optimize for cases with contention and limited resources
|
|
|
|
* (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration
|
|
|
|
* gets one chance of slab_alloc, and a retry of bin local resources
|
|
|
|
* after the slab allocation (regardless if slab_alloc failed, because
|
|
|
|
* the bin lock is dropped during the slab allocation).
|
|
|
|
*
|
|
|
|
* In other words, new slab allocation is allowed, as long as there was
|
|
|
|
* progress since the previous slab_alloc. This is tracked with
|
|
|
|
* made_progress below, initialized to true to jump start the first
|
|
|
|
* iteration.
|
|
|
|
*
|
|
|
|
* In other words (again), the loop will only terminate early (i.e. stop
|
|
|
|
* with filled < nfill) after going through the three steps: a) bin
|
|
|
|
* local exhausted, b) unlock and slab_alloc returns null, c) re-lock
|
|
|
|
* and bin local fails again.
|
|
|
|
*/
|
|
|
|
bool made_progress = true;
|
|
|
|
edata_t *fresh_slab = NULL;
|
|
|
|
bool alloc_and_retry = false;
|
|
|
|
unsigned filled = 0;
|
2018-11-13 07:56:04 +08:00
|
|
|
unsigned binshard;
|
2020-04-23 08:22:43 +08:00
|
|
|
bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
|
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
label_refill:
|
2020-04-23 08:22:43 +08:00
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
while (filled < nfill) {
|
|
|
|
/* Try batch-fill from slabcur first. */
|
|
|
|
edata_t *slabcur = bin->slabcur;
|
|
|
|
if (slabcur != NULL && edata_nfree_get(slabcur) > 0) {
|
|
|
|
unsigned tofill = nfill - filled;
|
|
|
|
unsigned nfree = edata_nfree_get(slabcur);
|
|
|
|
unsigned cnt = tofill < nfree ? tofill : nfree;
|
|
|
|
|
|
|
|
arena_slab_reg_alloc_batch(slabcur, bin_info, cnt,
|
2020-02-28 02:22:46 +08:00
|
|
|
&ptrs.ptr[filled]);
|
2020-02-08 06:53:36 +08:00
|
|
|
made_progress = true;
|
|
|
|
filled += cnt;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Next try refilling slabcur from nonfull slabs. */
|
|
|
|
if (!arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
|
|
|
|
assert(bin->slabcur != NULL);
|
|
|
|
continue;
|
|
|
|
}
|
2018-11-13 07:56:04 +08:00
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
/* Then see if a new slab was reserved already. */
|
|
|
|
if (fresh_slab != NULL) {
|
|
|
|
arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena,
|
|
|
|
bin, binind, fresh_slab);
|
|
|
|
assert(bin->slabcur != NULL);
|
|
|
|
fresh_slab = NULL;
|
|
|
|
continue;
|
2014-10-06 04:05:10 +08:00
|
|
|
}
|
2020-02-08 06:53:36 +08:00
|
|
|
|
|
|
|
/* Try slab_alloc if made progress (or never did slab_alloc). */
|
|
|
|
if (made_progress) {
|
|
|
|
assert(bin->slabcur == NULL);
|
|
|
|
assert(fresh_slab == NULL);
|
|
|
|
alloc_and_retry = true;
|
|
|
|
/* Alloc a new slab then come back. */
|
|
|
|
break;
|
2012-04-06 15:35:09 +08:00
|
|
|
}
|
2020-02-08 06:53:36 +08:00
|
|
|
|
2020-02-28 02:22:46 +08:00
|
|
|
/* OOM. */
|
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
assert(fresh_slab == NULL);
|
|
|
|
assert(!alloc_and_retry);
|
|
|
|
break;
|
|
|
|
} /* while (filled < nfill) loop. */
|
|
|
|
|
|
|
|
if (config_stats && !alloc_and_retry) {
|
|
|
|
bin->stats.nmalloc += filled;
|
2020-04-08 08:49:50 +08:00
|
|
|
bin->stats.nrequests += cache_bin->tstats.nrequests;
|
2020-02-08 06:53:36 +08:00
|
|
|
bin->stats.curregs += filled;
|
2012-02-11 12:22:09 +08:00
|
|
|
bin->stats.nfills++;
|
2020-04-08 08:49:50 +08:00
|
|
|
cache_bin->tstats.nrequests = 0;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2020-04-23 08:22:43 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2020-02-08 06:53:36 +08:00
|
|
|
|
|
|
|
if (alloc_and_retry) {
|
|
|
|
assert(fresh_slab == NULL);
|
|
|
|
assert(filled < nfill);
|
|
|
|
assert(made_progress);
|
|
|
|
|
|
|
|
fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
|
|
|
|
bin_info);
|
|
|
|
/* fresh_slab NULL case handled in the for loop. */
|
|
|
|
|
|
|
|
alloc_and_retry = false;
|
|
|
|
made_progress = false;
|
|
|
|
goto label_refill;
|
|
|
|
}
|
|
|
|
assert(filled == nfill || (fresh_slab == NULL && !made_progress));
|
|
|
|
|
|
|
|
/* Release if allocated but not used. */
|
|
|
|
if (fresh_slab != NULL) {
|
|
|
|
assert(edata_nfree_get(fresh_slab) == bin_info->nregs);
|
|
|
|
arena_slab_dalloc(tsdn, arena, fresh_slab);
|
|
|
|
fresh_slab = NULL;
|
|
|
|
}
|
|
|
|
|
2020-04-08 08:49:50 +08:00
|
|
|
cache_bin_finish_fill(cache_bin, cache_bin_info, &ptrs, filled);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_tick(tsdn, arena);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2020-04-23 09:13:06 +08:00
|
|
|
size_t
|
|
|
|
arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
2020-07-23 08:01:44 +08:00
|
|
|
void **ptrs, size_t nfill, bool zero) {
|
2020-04-23 09:13:06 +08:00
|
|
|
assert(binind < SC_NBINS);
|
|
|
|
const bin_info_t *bin_info = &bin_infos[binind];
|
|
|
|
const size_t nregs = bin_info->nregs;
|
|
|
|
assert(nregs > 0);
|
2020-07-23 08:01:44 +08:00
|
|
|
const size_t usize = bin_info->reg_size;
|
|
|
|
|
2020-04-23 09:13:06 +08:00
|
|
|
const bool manual_arena = !arena_is_auto(arena);
|
|
|
|
unsigned binshard;
|
|
|
|
bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
|
|
|
|
|
|
|
|
size_t nslab = 0;
|
|
|
|
size_t filled = 0;
|
|
|
|
edata_t *slab = NULL;
|
|
|
|
edata_list_active_t fulls;
|
|
|
|
edata_list_active_init(&fulls);
|
|
|
|
|
|
|
|
while (filled < nfill && (slab = arena_slab_alloc(tsdn, arena, binind,
|
|
|
|
binshard, bin_info)) != NULL) {
|
|
|
|
assert((size_t)edata_nfree_get(slab) == nregs);
|
|
|
|
++nslab;
|
|
|
|
size_t batch = nfill - filled;
|
|
|
|
if (batch > nregs) {
|
|
|
|
batch = nregs;
|
|
|
|
}
|
|
|
|
assert(batch > 0);
|
|
|
|
arena_slab_reg_alloc_batch(slab, bin_info, (unsigned)batch,
|
|
|
|
&ptrs[filled]);
|
2020-07-23 08:01:44 +08:00
|
|
|
assert(edata_addr_get(slab) == ptrs[filled]);
|
|
|
|
if (zero) {
|
|
|
|
memset(ptrs[filled], 0, batch * usize);
|
|
|
|
}
|
2020-04-23 09:13:06 +08:00
|
|
|
filled += batch;
|
|
|
|
if (batch == nregs) {
|
|
|
|
if (manual_arena) {
|
|
|
|
edata_list_active_append(&fulls, slab);
|
|
|
|
}
|
|
|
|
slab = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
|
|
|
/*
|
|
|
|
* Only the last slab can be non-empty, and the last slab is non-empty
|
|
|
|
* iff slab != NULL.
|
|
|
|
*/
|
|
|
|
if (slab != NULL) {
|
|
|
|
arena_bin_lower_slab(tsdn, arena, slab, bin);
|
|
|
|
}
|
|
|
|
if (manual_arena) {
|
|
|
|
edata_list_active_concat(&bin->slabs_full, &fulls);
|
|
|
|
}
|
|
|
|
assert(edata_list_active_empty(&fulls));
|
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.nslabs += nslab;
|
|
|
|
bin->stats.curslabs += nslab;
|
|
|
|
bin->stats.nmalloc += filled;
|
|
|
|
bin->stats.nrequests += filled;
|
|
|
|
bin->stats.curregs += filled;
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
|
|
|
|
|
|
|
arena_decay_tick(tsdn, arena);
|
|
|
|
return filled;
|
|
|
|
}
|
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
/*
|
|
|
|
* Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
|
|
|
|
* bin->slabcur if necessary.
|
|
|
|
*/
|
2016-02-20 10:40:03 +08:00
|
|
|
static void *
|
2020-02-08 06:53:36 +08:00
|
|
|
arena_bin_malloc_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|
|
|
szind_t binind) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &bin->lock);
|
|
|
|
if (bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0) {
|
|
|
|
if (arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(bin->slabcur != NULL && edata_nfree_get(bin->slabcur) > 0);
|
|
|
|
return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
static void *
|
|
|
|
arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(binind < SC_NBINS);
|
2020-02-08 06:53:36 +08:00
|
|
|
const bin_info_t *bin_info = &bin_infos[binind];
|
|
|
|
size_t usize = sz_index2size(binind);
|
2018-11-13 07:56:04 +08:00
|
|
|
unsigned binshard;
|
2020-04-23 08:22:43 +08:00
|
|
|
bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2020-04-23 08:22:43 +08:00
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
2020-02-08 06:53:36 +08:00
|
|
|
edata_t *fresh_slab = NULL;
|
|
|
|
void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
|
2010-01-17 01:53:50 +08:00
|
|
|
if (ret == NULL) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2020-02-08 06:53:36 +08:00
|
|
|
/******************************/
|
|
|
|
fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
|
|
|
|
bin_info);
|
|
|
|
/********************************/
|
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
|
|
|
/* Retry since the lock was dropped. */
|
|
|
|
ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
|
|
|
|
if (ret == NULL) {
|
|
|
|
if (fresh_slab == NULL) {
|
|
|
|
/* OOM */
|
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ret = arena_bin_malloc_with_fresh_slab(tsdn, arena, bin,
|
|
|
|
binind, fresh_slab);
|
|
|
|
fresh_slab = NULL;
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.nmalloc++;
|
|
|
|
bin->stats.nrequests++;
|
2014-10-13 13:53:59 +08:00
|
|
|
bin->stats.curregs++;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
if (fresh_slab != NULL) {
|
|
|
|
arena_slab_dalloc(tsdn, arena, fresh_slab);
|
|
|
|
}
|
2020-02-29 03:37:39 +08:00
|
|
|
if (zero) {
|
2016-02-26 07:29:49 +08:00
|
|
|
memset(ret, 0, usize);
|
2012-04-06 15:35:09 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_tick(tsdn, arena);
|
2020-02-08 06:53:36 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2016-02-20 10:40:03 +08:00
|
|
|
void *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool zero) {
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(!tsdn_null(tsdn) || arena != NULL);
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (likely(!tsdn_null(tsdn))) {
|
2018-05-22 04:33:48 +08:00
|
|
|
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (unlikely(arena == NULL)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-02-20 10:40:03 +08:00
|
|
|
|
2018-07-12 07:05:58 +08:00
|
|
|
if (likely(size <= SC_SMALL_MAXCLASS)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return arena_malloc_small(tsdn, arena, ind, zero);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-31 01:45:37 +08:00
|
|
|
return large_malloc(tsdn, arena, sz_index2size(ind), zero);
|
2016-02-20 10:40:03 +08:00
|
|
|
}
|
|
|
|
|
2015-02-13 06:06:37 +08:00
|
|
|
void *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool zero, tcache_t *tcache) {
|
2015-02-13 06:06:37 +08:00
|
|
|
void *ret;
|
|
|
|
|
2018-07-12 07:05:58 +08:00
|
|
|
if (usize <= SC_SMALL_MAXCLASS
|
2017-12-15 04:46:39 +08:00
|
|
|
&& (alignment < PAGE
|
|
|
|
|| (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Small; alignment doesn't require special slab placement. */
|
2017-05-31 01:45:37 +08:00
|
|
|
ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
|
|
|
|
zero, tcache, true);
|
2015-05-20 08:42:31 +08:00
|
|
|
} else {
|
2017-01-16 08:56:30 +08:00
|
|
|
if (likely(alignment <= CACHELINE)) {
|
2016-06-01 05:50:21 +08:00
|
|
|
ret = large_malloc(tsdn, arena, usize, zero);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-06-01 05:50:21 +08:00
|
|
|
ret = large_palloc(tsdn, arena, usize, alignment, zero);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-02-13 06:06:37 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2015-02-13 06:06:37 +08:00
|
|
|
}
|
|
|
|
|
2010-04-01 07:45:04 +08:00
|
|
|
void
|
2019-03-23 03:53:11 +08:00
|
|
|
arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
|
2012-04-19 04:38:40 +08:00
|
|
|
cassert(config_prof);
|
2010-04-01 07:45:04 +08:00
|
|
|
assert(ptr != NULL);
|
2018-07-12 07:05:58 +08:00
|
|
|
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
|
|
|
|
assert(usize <= SC_SMALL_MAXCLASS);
|
2010-04-01 07:45:04 +08:00
|
|
|
|
2019-03-23 03:53:11 +08:00
|
|
|
if (config_opt_safety_checks) {
|
|
|
|
safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
|
|
|
|
}
|
|
|
|
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
|
2017-03-21 02:00:07 +08:00
|
|
|
|
2017-05-31 01:45:37 +08:00
|
|
|
szind_t szind = sz_size2index(usize);
|
2020-03-14 09:34:46 +08:00
|
|
|
edata_szind_set(edata, szind);
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false);
|
2010-04-01 07:45:04 +08:00
|
|
|
|
2017-03-17 16:25:12 +08:00
|
|
|
assert(isalloc(tsdn, ptr) == usize);
|
2016-05-28 15:17:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
|
2016-05-28 15:17:28 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2020-03-14 09:34:46 +08:00
|
|
|
edata_szind_set(edata, SC_NBINS);
|
2020-03-15 01:49:34 +08:00
|
|
|
emap_remap(tsdn, &arena_emap_global, edata, SC_NBINS, /* slab */ false);
|
2016-05-28 15:17:28 +08:00
|
|
|
|
2018-07-12 07:05:58 +08:00
|
|
|
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
|
2016-05-28 15:17:28 +08:00
|
|
|
|
2018-07-12 07:05:58 +08:00
|
|
|
return SC_LARGE_MINCLASS;
|
2016-05-28 15:17:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|
|
|
bool slow_path) {
|
2016-05-28 15:17:28 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(opt_prof);
|
|
|
|
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
|
2019-12-10 06:36:45 +08:00
|
|
|
size_t usize = edata_usize_get(edata);
|
|
|
|
size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
|
2019-03-23 03:53:11 +08:00
|
|
|
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
|
|
|
|
/*
|
|
|
|
* Currently, we only do redzoning for small sampled
|
|
|
|
* allocations.
|
|
|
|
*/
|
|
|
|
assert(bumped_usize == SC_LARGE_MINCLASS);
|
|
|
|
safety_check_verify_redzone(ptr, usize, bumped_usize);
|
|
|
|
}
|
2019-07-25 07:12:06 +08:00
|
|
|
if (bumped_usize <= tcache_maxclass && tcache != NULL) {
|
2017-03-14 08:36:57 +08:00
|
|
|
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
2019-03-23 03:53:11 +08:00
|
|
|
sz_size2index(bumped_usize), slow_path);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2019-12-10 06:36:45 +08:00
|
|
|
large_dalloc(tsdn, edata);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-04-01 07:45:04 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
static void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) {
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Dissociate slab from bin. */
|
2017-01-16 08:56:30 +08:00
|
|
|
if (slab == bin->slabcur) {
|
2016-05-30 09:34:50 +08:00
|
|
|
bin->slabcur = NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2019-12-10 06:36:45 +08:00
|
|
|
szind_t binind = edata_szind_get(slab);
|
2017-10-02 08:22:06 +08:00
|
|
|
const bin_info_t *bin_info = &bin_infos[binind];
|
2011-03-16 04:59:15 +08:00
|
|
|
|
2016-03-27 08:30:37 +08:00
|
|
|
/*
|
|
|
|
* The following block's conditional is necessary because if the
|
2016-05-30 09:34:50 +08:00
|
|
|
* slab only contains one region, then it never gets inserted
|
|
|
|
* into the non-full slabs heap.
|
2016-03-27 08:30:37 +08:00
|
|
|
*/
|
2017-01-16 08:56:30 +08:00
|
|
|
if (bin_info->nregs == 1) {
|
2017-04-21 06:19:02 +08:00
|
|
|
arena_bin_slabs_full_remove(arena, bin, slab);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_bin_slabs_nonfull_remove(bin, slab);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2010-10-18 15:04:44 +08:00
|
|
|
}
|
|
|
|
|
Fix numerous arena bugs.
In arena_ralloc_large_grow(), update the map element for the end of the
newly grown run, rather than the interior map element that was the
beginning of the appended run. This is a long-standing bug, and it had
the potential to cause massive corruption, but triggering it required
roughly the following sequence of events:
1) Large in-place growing realloc(), with left-over space in the run
that followed the large object.
2) Allocation of the remainder run left over from (1).
3) Deallocation of the remainder run *before* deallocation of the
large run, with unfortunate interior map state left over from
previous run allocation/deallocation activity, such that one or
more pages of allocated memory would be treated as part of the
remainder run during run coalescing.
In summary, this was a bad bug, but it was difficult to trigger.
In arena_bin_malloc_hard(), if another thread wins the race to allocate
a bin run, dispose of the spare run via arena_bin_lower_run() rather
than arena_run_dalloc(), since the run has already been prepared for use
as a bin run. This bug has existed since March 14, 2010:
e00572b384c81bd2aba57fac32f7077a34388915
mmap()/munmap() without arena->lock or bin->lock.
Fix bugs in arena_dalloc_bin_run(), arena_trim_head(),
arena_trim_tail(), and arena_ralloc_large_grow() that could cause the
CHUNK_MAP_UNZEROED map bit to become corrupted. These are all
long-standing bugs, but the chances of them actually causing problems
was much lower before the CHUNK_MAP_ZEROED --> CHUNK_MAP_UNZEROED
conversion.
Fix a large run statistics regression in arena_ralloc_large_grow() that
was introduced on September 17, 2010:
8e3c3c61b5bb676a705450708e7e79698cdc9e0c
Add {,r,s,d}allocm().
Add debug code to validate that supposedly pre-zeroed memory really is.
2010-10-18 08:51:37 +08:00
|
|
|
static void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
|
2018-04-17 03:08:27 +08:00
|
|
|
bin_t *bin) {
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(edata_nfree_get(slab) > 0);
|
2016-05-30 09:34:50 +08:00
|
|
|
|
2010-10-18 11:57:30 +08:00
|
|
|
/*
|
2016-11-16 05:07:53 +08:00
|
|
|
* Make sure that if bin->slabcur is non-NULL, it refers to the
|
|
|
|
* oldest/lowest non-full slab. It is okay to NULL slabcur out rather
|
|
|
|
* than proactively keeping it pointing at the oldest/lowest non-full
|
|
|
|
* slab.
|
2010-10-18 11:57:30 +08:00
|
|
|
*/
|
2019-12-10 06:36:45 +08:00
|
|
|
if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Switch slabcur. */
|
2019-12-10 06:36:45 +08:00
|
|
|
if (edata_nfree_get(bin->slabcur) > 0) {
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2017-04-21 06:19:02 +08:00
|
|
|
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-30 09:34:50 +08:00
|
|
|
bin->slabcur = slab;
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_stats) {
|
2016-05-30 09:34:50 +08:00
|
|
|
bin->stats.reslabs++;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
} else {
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_bin_slabs_nonfull_insert(bin, slab);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Fix numerous arena bugs.
In arena_ralloc_large_grow(), update the map element for the end of the
newly grown run, rather than the interior map element that was the
beginning of the appended run. This is a long-standing bug, and it had
the potential to cause massive corruption, but triggering it required
roughly the following sequence of events:
1) Large in-place growing realloc(), with left-over space in the run
that followed the large object.
2) Allocation of the remainder run left over from (1).
3) Deallocation of the remainder run *before* deallocation of the
large run, with unfortunate interior map state left over from
previous run allocation/deallocation activity, such that one or
more pages of allocated memory would be treated as part of the
remainder run during run coalescing.
In summary, this was a bad bug, but it was difficult to trigger.
In arena_bin_malloc_hard(), if another thread wins the race to allocate
a bin run, dispose of the spare run via arena_bin_lower_run() rather
than arena_run_dalloc(), since the run has already been prepared for use
as a bin run. This bug has existed since March 14, 2010:
e00572b384c81bd2aba57fac32f7077a34388915
mmap()/munmap() without arena->lock or bin->lock.
Fix bugs in arena_dalloc_bin_run(), arena_trim_head(),
arena_trim_tail(), and arena_ralloc_large_grow() that could cause the
CHUNK_MAP_UNZEROED map bit to become corrupted. These are all
long-standing bugs, but the chances of them actually causing problems
was much lower before the CHUNK_MAP_ZEROED --> CHUNK_MAP_UNZEROED
conversion.
Fix a large run statistics regression in arena_ralloc_large_grow() that
was introduced on September 17, 2010:
8e3c3c61b5bb676a705450708e7e79698cdc9e0c
Add {,r,s,d}allocm().
Add debug code to validate that supposedly pre-zeroed memory really is.
2010-10-18 08:51:37 +08:00
|
|
|
}
|
|
|
|
|
2014-10-10 08:54:06 +08:00
|
|
|
static void
|
2020-02-08 06:53:36 +08:00
|
|
|
arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &bin->lock);
|
|
|
|
|
|
|
|
assert(slab != bin->slabcur);
|
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.curslabs--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns true if arena_slab_dalloc must be called on slab */
|
|
|
|
static bool
|
2018-11-13 07:56:04 +08:00
|
|
|
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
2020-02-29 03:37:39 +08:00
|
|
|
szind_t binind, edata_t *slab, void *ptr) {
|
2017-10-02 08:22:06 +08:00
|
|
|
const bin_info_t *bin_info = &bin_infos[binind];
|
2020-02-08 06:53:36 +08:00
|
|
|
arena_slab_reg_dalloc(slab, edata_slab_data_get(slab), ptr);
|
Fix numerous arena bugs.
In arena_ralloc_large_grow(), update the map element for the end of the
newly grown run, rather than the interior map element that was the
beginning of the appended run. This is a long-standing bug, and it had
the potential to cause massive corruption, but triggering it required
roughly the following sequence of events:
1) Large in-place growing realloc(), with left-over space in the run
that followed the large object.
2) Allocation of the remainder run left over from (1).
3) Deallocation of the remainder run *before* deallocation of the
large run, with unfortunate interior map state left over from
previous run allocation/deallocation activity, such that one or
more pages of allocated memory would be treated as part of the
remainder run during run coalescing.
In summary, this was a bad bug, but it was difficult to trigger.
In arena_bin_malloc_hard(), if another thread wins the race to allocate
a bin run, dispose of the spare run via arena_bin_lower_run() rather
than arena_run_dalloc(), since the run has already been prepared for use
as a bin run. This bug has existed since March 14, 2010:
e00572b384c81bd2aba57fac32f7077a34388915
mmap()/munmap() without arena->lock or bin->lock.
Fix bugs in arena_dalloc_bin_run(), arena_trim_head(),
arena_trim_tail(), and arena_ralloc_large_grow() that could cause the
CHUNK_MAP_UNZEROED map bit to become corrupted. These are all
long-standing bugs, but the chances of them actually causing problems
was much lower before the CHUNK_MAP_ZEROED --> CHUNK_MAP_UNZEROED
conversion.
Fix a large run statistics regression in arena_ralloc_large_grow() that
was introduced on September 17, 2010:
8e3c3c61b5bb676a705450708e7e79698cdc9e0c
Add {,r,s,d}allocm().
Add debug code to validate that supposedly pre-zeroed memory really is.
2010-10-18 08:51:37 +08:00
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
bool ret = false;
|
2019-12-10 06:36:45 +08:00
|
|
|
unsigned nfree = edata_nfree_get(slab);
|
2017-03-28 07:41:47 +08:00
|
|
|
if (nfree == bin_info->nregs) {
|
2017-04-21 06:19:02 +08:00
|
|
|
arena_dissociate_bin_slab(arena, slab, bin);
|
2020-02-08 06:53:36 +08:00
|
|
|
arena_dalloc_bin_slab_prepare(tsdn, slab, bin);
|
|
|
|
ret = true;
|
2017-03-28 07:41:47 +08:00
|
|
|
} else if (nfree == 1 && slab != bin->slabcur) {
|
2017-04-21 06:19:02 +08:00
|
|
|
arena_bin_slabs_full_remove(arena, bin, slab);
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_bin_lower_slab(tsdn, arena, slab, bin);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.ndalloc++;
|
2014-10-13 13:53:59 +08:00
|
|
|
bin->stats.curregs--;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2020-02-08 06:53:36 +08:00
|
|
|
|
|
|
|
return ret;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2020-02-08 06:53:36 +08:00
|
|
|
bool
|
2020-02-29 03:37:39 +08:00
|
|
|
arena_dalloc_bin_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|
|
|
szind_t binind, edata_t *edata, void *ptr) {
|
2020-02-08 06:53:36 +08:00
|
|
|
return arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata,
|
2020-02-29 03:37:39 +08:00
|
|
|
ptr);
|
2014-10-10 08:54:06 +08:00
|
|
|
}
|
|
|
|
|
2016-03-24 11:29:33 +08:00
|
|
|
static void
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
|
|
|
|
szind_t binind = edata_szind_get(edata);
|
|
|
|
unsigned binshard = edata_binshard_get(edata);
|
2018-11-13 07:56:04 +08:00
|
|
|
bin_t *bin = &arena->bins[binind].bin_shards[binshard];
|
2012-05-02 15:30:36 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
2020-02-08 06:53:36 +08:00
|
|
|
bool ret = arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata,
|
2020-02-29 03:37:39 +08:00
|
|
|
ptr);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2020-02-08 06:53:36 +08:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
arena_slab_dalloc(tsdn, arena, edata);
|
|
|
|
}
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_t *arena = arena_get_from_edata(edata);
|
2017-03-21 02:00:07 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_dalloc_bin(tsdn, arena, edata, ptr);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_tick(tsdn, arena);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
bool
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
2018-06-05 04:36:06 +08:00
|
|
|
size_t extra, bool zero, size_t *newsize) {
|
|
|
|
bool ret;
|
2016-02-26 07:29:49 +08:00
|
|
|
/* Calls with non-zero extra had to clamp extra. */
|
2018-07-12 07:05:58 +08:00
|
|
|
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
|
2016-02-26 07:29:49 +08:00
|
|
|
|
2020-03-15 01:49:34 +08:00
|
|
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
|
2018-07-12 07:05:58 +08:00
|
|
|
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
2018-06-05 04:36:06 +08:00
|
|
|
ret = true;
|
|
|
|
goto done;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-02-26 07:29:49 +08:00
|
|
|
|
2017-05-31 01:45:37 +08:00
|
|
|
size_t usize_min = sz_s2u(size);
|
|
|
|
size_t usize_max = sz_s2u(size + extra);
|
2018-07-12 07:05:58 +08:00
|
|
|
if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min
|
|
|
|
<= SC_SMALL_MAXCLASS)) {
|
2015-02-13 06:06:37 +08:00
|
|
|
/*
|
|
|
|
* Avoid moving the allocation if the size class can be left the
|
|
|
|
* same.
|
|
|
|
*/
|
2017-10-02 08:22:06 +08:00
|
|
|
assert(bin_infos[sz_size2index(oldsize)].reg_size ==
|
2016-05-28 15:17:28 +08:00
|
|
|
oldsize);
|
2018-07-12 07:05:58 +08:00
|
|
|
if ((usize_max > SC_SMALL_MAXCLASS
|
2017-12-15 04:46:39 +08:00
|
|
|
|| sz_size2index(usize_max) != sz_size2index(oldsize))
|
|
|
|
&& (size > oldsize || usize_max < oldsize)) {
|
2018-06-05 04:36:06 +08:00
|
|
|
ret = true;
|
|
|
|
goto done;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
arena_t *arena = arena_get_from_edata(edata);
|
2019-09-21 09:20:22 +08:00
|
|
|
arena_decay_tick(tsdn, arena);
|
2018-06-05 04:36:06 +08:00
|
|
|
ret = false;
|
2018-07-12 07:05:58 +08:00
|
|
|
} else if (oldsize >= SC_LARGE_MINCLASS
|
|
|
|
&& usize_max >= SC_LARGE_MINCLASS) {
|
2019-12-10 06:36:45 +08:00
|
|
|
ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max,
|
2017-01-20 10:15:45 +08:00
|
|
|
zero);
|
2018-06-05 04:36:06 +08:00
|
|
|
} else {
|
|
|
|
ret = true;
|
2015-09-12 07:18:53 +08:00
|
|
|
}
|
2018-06-05 04:36:06 +08:00
|
|
|
done:
|
2020-03-15 01:49:34 +08:00
|
|
|
assert(edata == emap_edata_lookup(tsdn, &arena_emap_global, ptr));
|
2019-12-10 06:36:45 +08:00
|
|
|
*newsize = edata_usize_get(edata);
|
2016-05-19 12:02:46 +08:00
|
|
|
|
2018-06-05 04:36:06 +08:00
|
|
|
return ret;
|
2015-09-12 07:18:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t alignment, bool zero, tcache_t *tcache) {
|
|
|
|
if (alignment == 0) {
|
2017-05-31 01:45:37 +08:00
|
|
|
return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
|
2017-01-20 10:15:45 +08:00
|
|
|
zero, tcache, true);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-31 01:45:37 +08:00
|
|
|
usize = sz_sa2u(usize, alignment);
|
2018-07-12 07:05:58 +08:00
|
|
|
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
2018-04-24 09:07:40 +08:00
|
|
|
size_t size, size_t alignment, bool zero, tcache_t *tcache,
|
|
|
|
hook_ralloc_args_t *hook_args) {
|
2020-08-06 05:43:03 +08:00
|
|
|
size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment);
|
2018-07-12 07:05:58 +08:00
|
|
|
if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2018-07-12 07:05:58 +08:00
|
|
|
if (likely(usize <= SC_SMALL_MAXCLASS)) {
|
2015-02-13 06:06:37 +08:00
|
|
|
/* Try to avoid moving the allocation. */
|
2018-06-05 04:36:06 +08:00
|
|
|
UNUSED size_t newsize;
|
|
|
|
if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
|
|
|
|
&newsize)) {
|
2018-04-24 09:07:40 +08:00
|
|
|
hook_invoke_expand(hook_args->is_realloc
|
|
|
|
? hook_expand_realloc : hook_expand_rallocx,
|
|
|
|
ptr, oldsize, usize, (uintptr_t)ptr,
|
|
|
|
hook_args->args);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ptr;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-19 12:02:46 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2018-07-12 07:05:58 +08:00
|
|
|
if (oldsize >= SC_LARGE_MINCLASS
|
|
|
|
&& usize >= SC_LARGE_MINCLASS) {
|
2018-04-24 09:07:40 +08:00
|
|
|
return large_ralloc(tsdn, arena, ptr, usize,
|
|
|
|
alignment, zero, tcache, hook_args);
|
2016-05-19 12:02:46 +08:00
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
/*
|
|
|
|
* size and oldsize are different enough that we need to move the
|
|
|
|
* object. In that case, fall back to allocating new space and copying.
|
|
|
|
*/
|
2017-03-21 02:00:07 +08:00
|
|
|
void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
|
|
|
|
zero, tcache);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (ret == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-02-13 06:06:37 +08:00
|
|
|
|
2018-04-24 09:07:40 +08:00
|
|
|
hook_invoke_alloc(hook_args->is_realloc
|
|
|
|
? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
|
|
|
|
hook_args->args);
|
|
|
|
hook_invoke_dalloc(hook_args->is_realloc
|
|
|
|
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
|
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
/*
|
|
|
|
* Junk/zero-filling were already done by
|
|
|
|
* ipalloc()/arena_malloc().
|
|
|
|
*/
|
2017-03-21 02:00:07 +08:00
|
|
|
size_t copysize = (usize < oldsize) ? usize : oldsize;
|
2016-05-19 12:02:46 +08:00
|
|
|
memcpy(ret, ptr, copysize);
|
2017-04-12 05:56:43 +08:00
|
|
|
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2019-12-03 06:19:22 +08:00
|
|
|
ehooks_t *
|
|
|
|
arena_get_ehooks(arena_t *arena) {
|
|
|
|
return base_ehooks_get(arena->base);
|
2019-11-19 04:59:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extent_hooks_t *
|
|
|
|
arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
|
|
|
|
extent_hooks_t *extent_hooks) {
|
|
|
|
background_thread_info_t *info;
|
|
|
|
if (have_background_thread) {
|
|
|
|
info = arena_background_thread_info_get(arena);
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
|
|
|
}
|
2020-08-15 04:36:41 +08:00
|
|
|
/* No using the HPA now that we have the custom hooks. */
|
|
|
|
pa_shard_disable_hpa(&arena->pa_shard);
|
2019-11-19 04:59:34 +08:00
|
|
|
extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
|
|
|
|
if (have_background_thread) {
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
dss_prec_t
|
2017-02-13 08:34:36 +08:00
|
|
|
arena_dss_prec_get(arena_t *arena) {
|
2017-04-05 08:22:24 +08:00
|
|
|
return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
|
2012-10-12 04:53:15 +08:00
|
|
|
}
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
bool
|
2017-02-13 08:34:36 +08:00
|
|
|
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!have_dss) {
|
2014-04-16 03:09:48 +08:00
|
|
|
return (dss_prec != dss_prec_disabled);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-04-05 08:22:24 +08:00
|
|
|
atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2012-10-12 04:53:15 +08:00
|
|
|
}
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
ssize_t
|
2017-05-18 01:47:00 +08:00
|
|
|
arena_dirty_decay_ms_default_get(void) {
|
|
|
|
return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-05-18 01:47:00 +08:00
|
|
|
arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
|
2020-03-10 05:44:11 +08:00
|
|
|
if (!decay_ms_valid(decay_ms)) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
return true;
|
|
|
|
}
|
2017-05-18 01:47:00 +08:00
|
|
|
atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t
|
2017-05-18 01:47:00 +08:00
|
|
|
arena_muzzy_decay_ms_default_get(void) {
|
|
|
|
return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-05-18 01:47:00 +08:00
|
|
|
arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
|
2020-03-10 05:44:11 +08:00
|
|
|
if (!decay_ms_valid(decay_ms)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-18 01:47:00 +08:00
|
|
|
atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
|
2017-11-03 08:48:39 +08:00
|
|
|
bool
|
|
|
|
arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
|
|
|
|
size_t *new_limit) {
|
|
|
|
assert(opt_retain);
|
2020-06-04 05:43:28 +08:00
|
|
|
return pac_retain_grow_limit_get_set(tsd_tsdn(tsd),
|
|
|
|
&arena->pa_shard.pac, old_limit, new_limit);
|
2017-11-03 08:48:39 +08:00
|
|
|
}
|
|
|
|
|
2016-02-25 15:58:10 +08:00
|
|
|
unsigned
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_nthreads_get(arena_t *arena, bool internal) {
|
2017-04-05 08:22:24 +08:00
|
|
|
return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
|
2016-02-25 15:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_nthreads_inc(arena_t *arena, bool internal) {
|
2017-04-05 08:22:24 +08:00
|
|
|
atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
|
2016-02-25 15:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_nthreads_dec(arena_t *arena, bool internal) {
|
2017-04-05 08:22:24 +08:00
|
|
|
atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
|
2016-02-25 15:58:10 +08:00
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *arena;
|
2016-12-23 06:39:10 +08:00
|
|
|
base_t *base;
|
2010-01-17 01:53:50 +08:00
|
|
|
unsigned i;
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (ind == 0) {
|
2016-12-23 06:39:10 +08:00
|
|
|
base = b0get();
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-12-23 06:39:10 +08:00
|
|
|
base = base_new(tsdn, ind, extent_hooks);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (base == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
2018-11-21 05:51:32 +08:00
|
|
|
unsigned nbins_total = 0;
|
|
|
|
for (i = 0; i < SC_NBINS; i++) {
|
|
|
|
nbins_total += bin_infos[i].n_shards;
|
|
|
|
}
|
|
|
|
size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
|
2018-11-13 07:56:04 +08:00
|
|
|
arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (arena == NULL) {
|
2016-12-23 06:39:10 +08:00
|
|
|
goto label_error;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
|
2017-04-05 08:22:24 +08:00
|
|
|
atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
|
|
|
|
atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
arena->last_thd = NULL;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-02-13 09:43:33 +08:00
|
|
|
if (config_stats) {
|
|
|
|
if (arena_stats_init(tsdn, &arena->stats)) {
|
|
|
|
goto label_error;
|
|
|
|
}
|
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
ql_new(&arena->tcache_ql);
|
2017-09-13 02:38:13 +08:00
|
|
|
ql_new(&arena->cache_bin_array_descriptor_ql);
|
2017-02-13 10:50:53 +08:00
|
|
|
if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
|
2017-02-13 10:50:53 +08:00
|
|
|
goto label_error;
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-05 08:22:24 +08:00
|
|
|
atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
|
|
|
|
ATOMIC_RELAXED);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2020-06-12 06:15:51 +08:00
|
|
|
edata_list_active_init(&arena->large);
|
2016-06-01 05:50:21 +08:00
|
|
|
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
|
2016-12-23 06:39:10 +08:00
|
|
|
goto label_error;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-02-16 10:04:46 +08:00
|
|
|
|
2020-03-10 23:52:58 +08:00
|
|
|
nstime_t cur_time;
|
|
|
|
nstime_init_update(&cur_time);
|
2020-03-15 01:49:34 +08:00
|
|
|
if (pa_shard_init(tsdn, &arena->pa_shard, &arena_emap_global, base, ind,
|
2020-03-13 07:06:40 +08:00
|
|
|
&arena->stats.pa_shard_stats, LOCKEDINT_MTX(arena->stats.mtx),
|
|
|
|
&cur_time, arena_dirty_decay_ms_default_get(),
|
2020-03-10 04:11:35 +08:00
|
|
|
arena_muzzy_decay_ms_default_get())) {
|
2017-03-08 01:22:33 +08:00
|
|
|
goto label_error;
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Initialize bins. */
|
2018-11-13 07:56:04 +08:00
|
|
|
uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
|
|
|
|
atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
|
2017-12-15 04:46:39 +08:00
|
|
|
for (i = 0; i < SC_NBINS; i++) {
|
2018-11-13 07:56:04 +08:00
|
|
|
unsigned nshards = bin_infos[i].n_shards;
|
|
|
|
arena->bins[i].bin_shards = (bin_t *)bin_addr;
|
|
|
|
bin_addr += nshards * sizeof(bin_t);
|
|
|
|
for (unsigned j = 0; j < nshards; j++) {
|
|
|
|
bool err = bin_init(&arena->bins[i].bin_shards[j]);
|
|
|
|
if (err) {
|
|
|
|
goto label_error;
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2018-11-13 07:56:04 +08:00
|
|
|
assert(bin_addr == (uintptr_t)arena + arena_size);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
arena->base = base;
|
2017-03-18 03:42:33 +08:00
|
|
|
/* Set arena before creating background threads. */
|
|
|
|
arena_set(ind, arena);
|
2016-12-23 06:39:10 +08:00
|
|
|
|
2019-12-17 04:41:06 +08:00
|
|
|
nstime_init_update(&arena->create_time);
|
2017-05-17 04:56:00 +08:00
|
|
|
|
2020-08-15 04:36:41 +08:00
|
|
|
/*
|
|
|
|
* We turn on the HPA if set to. There are two exceptions:
|
|
|
|
* - Custom extent hooks (we should only return memory allocated from
|
|
|
|
* them in that case).
|
|
|
|
* - Arena 0 initialization. In this case, we're mid-bootstrapping, and
|
|
|
|
* so arena_hpa_global is not yet initialized.
|
|
|
|
*/
|
|
|
|
if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
|
2020-09-05 06:22:47 +08:00
|
|
|
if (pa_shard_enable_hpa(&arena->pa_shard, &arena_hpa_global,
|
|
|
|
opt_hpa_slab_goal, opt_hpa_slab_max_alloc)) {
|
2020-08-15 04:36:41 +08:00
|
|
|
goto label_error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
/* We don't support reentrancy for arena 0 bootstrapping. */
|
|
|
|
if (ind != 0) {
|
2017-04-01 10:59:45 +08:00
|
|
|
/*
|
|
|
|
* If we're here, then arena 0 already exists, so bootstrapping
|
|
|
|
* is done enough that we should have tsd.
|
|
|
|
*/
|
2017-03-18 03:42:33 +08:00
|
|
|
assert(!tsdn_null(tsdn));
|
2017-06-23 07:18:30 +08:00
|
|
|
pre_reentrancy(tsdn_tsd(tsdn), arena);
|
2018-04-10 09:09:34 +08:00
|
|
|
if (test_hooks_arena_new_hook) {
|
|
|
|
test_hooks_arena_new_hook();
|
2017-03-18 03:42:33 +08:00
|
|
|
}
|
2017-04-13 07:16:27 +08:00
|
|
|
post_reentrancy(tsdn_tsd(tsdn));
|
2017-04-01 10:59:45 +08:00
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return arena;
|
2016-12-23 06:39:10 +08:00
|
|
|
label_error:
|
2017-01-16 08:56:30 +08:00
|
|
|
if (ind != 0) {
|
2017-06-23 06:36:41 +08:00
|
|
|
base_delete(tsdn, base);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2018-05-22 04:33:48 +08:00
|
|
|
arena_t *
|
|
|
|
arena_choose_huge(tsd_t *tsd) {
|
|
|
|
/* huge_arena_ind can be 0 during init (will use a0). */
|
|
|
|
if (huge_arena_ind == 0) {
|
|
|
|
assert(!malloc_initialized());
|
|
|
|
}
|
|
|
|
|
|
|
|
arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false);
|
|
|
|
if (huge_arena == NULL) {
|
|
|
|
/* Create the huge arena on demand. */
|
|
|
|
assert(huge_arena_ind != 0);
|
|
|
|
huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true);
|
|
|
|
if (huge_arena == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Purge eagerly for huge allocations, because: 1) number of
|
|
|
|
* huge allocations is usually small, which means ticker based
|
|
|
|
* decay is not reliable; and 2) less immediate reuse is
|
|
|
|
* expected for huge allocations.
|
|
|
|
*/
|
|
|
|
if (arena_dirty_decay_ms_default_get() > 0) {
|
2020-06-04 09:30:33 +08:00
|
|
|
arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
|
|
|
|
extent_state_dirty, 0);
|
2018-05-22 04:33:48 +08:00
|
|
|
}
|
|
|
|
if (arena_muzzy_decay_ms_default_get() > 0) {
|
2020-06-04 09:30:33 +08:00
|
|
|
arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
|
|
|
|
extent_state_muzzy, 0);
|
2018-05-22 04:33:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return huge_arena;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
arena_init_huge(void) {
|
|
|
|
bool huge_enabled;
|
|
|
|
|
|
|
|
/* The threshold should be large size class. */
|
2019-01-25 08:15:04 +08:00
|
|
|
if (opt_oversize_threshold > SC_LARGE_MAXCLASS ||
|
|
|
|
opt_oversize_threshold < SC_LARGE_MINCLASS) {
|
|
|
|
opt_oversize_threshold = 0;
|
|
|
|
oversize_threshold = SC_LARGE_MAXCLASS + PAGE;
|
2018-05-22 04:33:48 +08:00
|
|
|
huge_enabled = false;
|
|
|
|
} else {
|
|
|
|
/* Reserve the index for the huge arena. */
|
|
|
|
huge_arena_ind = narenas_total_get();
|
2019-01-25 08:15:04 +08:00
|
|
|
oversize_threshold = opt_oversize_threshold;
|
2018-05-22 04:33:48 +08:00
|
|
|
huge_enabled = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return huge_enabled;
|
|
|
|
}
|
|
|
|
|
2019-01-15 06:16:09 +08:00
|
|
|
bool
|
|
|
|
arena_is_huge(unsigned arena_ind) {
|
|
|
|
if (huge_arena_ind == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return (arena_ind == huge_arena_ind);
|
|
|
|
}
|
|
|
|
|
2016-04-09 05:16:19 +08:00
|
|
|
void
|
2018-07-20 08:08:10 +08:00
|
|
|
arena_boot(sc_data_t *sc_data) {
|
2017-05-18 01:47:00 +08:00
|
|
|
arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
|
|
|
|
arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
|
2017-12-15 04:46:39 +08:00
|
|
|
for (unsigned i = 0; i < SC_NBINS; i++) {
|
2018-07-20 08:08:10 +08:00
|
|
|
sc_t *sc = &sc_data->sc[i];
|
2017-12-15 04:46:39 +08:00
|
|
|
div_init(&arena_binind_div_info[i],
|
|
|
|
(1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
|
2020-03-13 00:20:37 +08:00
|
|
|
pa_shard_prefork0(tsdn, &arena->pa_shard);
|
2017-03-09 05:00:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
|
2017-04-21 08:21:37 +08:00
|
|
|
if (config_stats) {
|
2017-02-13 10:50:53 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
|
|
|
|
}
|
2016-04-26 14:14:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-09 05:00:42 +08:00
|
|
|
arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
|
2020-03-13 00:20:37 +08:00
|
|
|
pa_shard_prefork2(tsdn, &arena->pa_shard);
|
2017-06-30 07:01:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
|
2020-03-13 00:20:37 +08:00
|
|
|
pa_shard_prefork3(tsdn, &arena->pa_shard);
|
2016-04-26 14:14:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-06-30 07:01:35 +08:00
|
|
|
arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
|
2020-03-13 00:20:37 +08:00
|
|
|
pa_shard_prefork4(tsdn, &arena->pa_shard);
|
2016-04-26 14:14:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-06-30 07:01:35 +08:00
|
|
|
arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
|
2016-12-23 06:39:10 +08:00
|
|
|
base_prefork(tsdn, arena->base);
|
2017-03-09 05:00:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-06-30 07:01:35 +08:00
|
|
|
arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
|
2017-02-13 09:43:33 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &arena->large_mtx);
|
2017-03-09 05:00:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-06-30 07:01:35 +08:00
|
|
|
arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
|
2017-12-15 04:46:39 +08:00
|
|
|
for (unsigned i = 0; i < SC_NBINS; i++) {
|
2018-11-13 07:56:04 +08:00
|
|
|
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
|
|
|
|
bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
|
2012-03-14 07:31:41 +08:00
|
|
|
unsigned i;
|
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
for (i = 0; i < SC_NBINS; i++) {
|
2018-11-13 07:56:04 +08:00
|
|
|
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
|
|
|
|
bin_postfork_parent(tsdn,
|
|
|
|
&arena->bins[i].bin_shards[j]);
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-02-13 09:43:33 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
|
2016-12-23 06:39:10 +08:00
|
|
|
base_postfork_parent(tsdn, arena->base);
|
2020-03-13 00:20:37 +08:00
|
|
|
pa_shard_postfork_parent(tsdn, &arena->pa_shard);
|
2017-04-21 08:21:37 +08:00
|
|
|
if (config_stats) {
|
2017-02-13 10:50:53 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
|
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
|
2012-03-14 07:31:41 +08:00
|
|
|
unsigned i;
|
|
|
|
|
2017-06-20 12:19:15 +08:00
|
|
|
atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
|
|
|
|
atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
|
|
|
|
if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
|
|
|
|
arena_nthreads_inc(arena, false);
|
|
|
|
}
|
|
|
|
if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
|
|
|
|
arena_nthreads_inc(arena, true);
|
|
|
|
}
|
|
|
|
if (config_stats) {
|
|
|
|
ql_new(&arena->tcache_ql);
|
2017-09-13 02:38:13 +08:00
|
|
|
ql_new(&arena->cache_bin_array_descriptor_ql);
|
2020-04-08 08:48:35 +08:00
|
|
|
tcache_slow_t *tcache_slow = tcache_slow_get(tsdn_tsd(tsdn));
|
|
|
|
if (tcache_slow != NULL && tcache_slow->arena == arena) {
|
|
|
|
tcache_t *tcache = tcache_slow->tcache;
|
|
|
|
ql_elm_new(tcache_slow, link);
|
|
|
|
ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
|
2017-09-13 02:38:13 +08:00
|
|
|
cache_bin_array_descriptor_init(
|
2020-04-08 08:48:35 +08:00
|
|
|
&tcache_slow->cache_bin_array_descriptor,
|
2020-04-08 11:04:46 +08:00
|
|
|
tcache->bins);
|
2017-09-13 02:38:13 +08:00
|
|
|
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
|
2020-04-08 08:48:35 +08:00
|
|
|
&tcache_slow->cache_bin_array_descriptor, link);
|
2017-06-20 12:19:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
for (i = 0; i < SC_NBINS; i++) {
|
2018-11-13 07:56:04 +08:00
|
|
|
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
|
|
|
|
bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-02-13 09:43:33 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
|
2016-12-23 06:39:10 +08:00
|
|
|
base_postfork_child(tsdn, arena->base);
|
2020-03-13 00:20:37 +08:00
|
|
|
pa_shard_postfork_child(tsdn, &arena->pa_shard);
|
2017-04-21 08:21:37 +08:00
|
|
|
if (config_stats) {
|
2017-02-13 10:50:53 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
|
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|