2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_EXTENT_C_
|
2017-04-11 09:17:55 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/assert.h"
|
2017-05-24 05:36:09 +08:00
|
|
|
#include "jemalloc/internal/extent_dss.h"
|
2017-05-24 05:42:32 +08:00
|
|
|
#include "jemalloc/internal/extent_mmap.h"
|
2017-04-11 07:54:25 +08:00
|
|
|
#include "jemalloc/internal/ph.h"
|
2017-05-24 05:26:31 +08:00
|
|
|
#include "jemalloc/internal/rtree.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-05-24 05:56:24 +08:00
|
|
|
#include "jemalloc/internal/mutex_pool.h"
|
2017-04-11 07:54:25 +08:00
|
|
|
|
2016-06-02 03:10:39 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
|
|
|
rtree_t extents_rtree;
|
2017-05-16 05:23:51 +08:00
|
|
|
/* Keyed by the address of the extent_t being protected. */
|
|
|
|
mutex_pool_t extent_mutex_pool;
|
2016-06-02 03:10:39 +08:00
|
|
|
|
2017-11-10 05:51:39 +08:00
|
|
|
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
|
|
|
|
size_t size, size_t alignment, bool *zero, bool *commit,
|
2016-06-04 03:05:53 +08:00
|
|
|
unsigned arena_ind);
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
|
2016-06-04 03:05:53 +08:00
|
|
|
size_t size, bool committed, unsigned arena_ind);
|
2017-05-27 02:06:01 +08:00
|
|
|
static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
|
2017-04-28 06:51:35 +08:00
|
|
|
size_t size, bool committed, unsigned arena_ind);
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
|
2016-06-04 03:05:53 +08:00
|
|
|
size_t size, size_t offset, size_t length, unsigned arena_ind);
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
|
2017-05-27 02:06:01 +08:00
|
|
|
size_t length, bool growing_retained);
|
|
|
|
static bool extent_decommit_default(extent_hooks_t *extent_hooks,
|
2016-06-04 03:05:53 +08:00
|
|
|
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
|
2016-12-04 07:38:25 +08:00
|
|
|
#ifdef PAGES_CAN_PURGE_LAZY
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
|
|
|
|
size_t size, size_t offset, size_t length, unsigned arena_ind);
|
2016-12-04 07:38:25 +08:00
|
|
|
#endif
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
|
2017-05-27 02:06:01 +08:00
|
|
|
size_t length, bool growing_retained);
|
2016-12-04 07:38:25 +08:00
|
|
|
#ifdef PAGES_CAN_PURGE_FORCED
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
|
2016-12-04 07:38:25 +08:00
|
|
|
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
|
|
|
|
#endif
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
|
2017-05-27 02:06:01 +08:00
|
|
|
size_t length, bool growing_retained);
|
|
|
|
static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
|
2016-06-04 03:05:53 +08:00
|
|
|
size_t size, size_t size_a, size_t size_b, bool committed,
|
|
|
|
unsigned arena_ind);
|
2017-05-27 02:06:01 +08:00
|
|
|
static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t size_a,
|
2017-05-27 02:06:01 +08:00
|
|
|
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
|
|
|
|
bool growing_retained);
|
|
|
|
static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
|
2016-06-04 03:05:53 +08:00
|
|
|
size_t size_a, void *addr_b, size_t size_b, bool committed,
|
2016-06-02 03:59:02 +08:00
|
|
|
unsigned arena_ind);
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *a, extent_t *b,
|
2017-05-27 02:06:01 +08:00
|
|
|
bool growing_retained);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
const extent_hooks_t extent_hooks_default = {
|
|
|
|
extent_alloc_default,
|
|
|
|
extent_dalloc_default,
|
2017-04-28 06:51:35 +08:00
|
|
|
extent_destroy_default,
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_commit_default,
|
2016-12-04 07:38:25 +08:00
|
|
|
extent_decommit_default
|
|
|
|
#ifdef PAGES_CAN_PURGE_LAZY
|
|
|
|
,
|
|
|
|
extent_purge_lazy_default
|
|
|
|
#else
|
|
|
|
,
|
|
|
|
NULL
|
|
|
|
#endif
|
|
|
|
#ifdef PAGES_CAN_PURGE_FORCED
|
|
|
|
,
|
|
|
|
extent_purge_forced_default
|
|
|
|
#else
|
|
|
|
,
|
|
|
|
NULL
|
|
|
|
#endif
|
|
|
|
,
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_split_default,
|
|
|
|
extent_merge_default
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Used exclusively for gdump triggering. */
|
2017-04-05 09:20:42 +08:00
|
|
|
static atomic_zu_t curpages;
|
|
|
|
static atomic_zu_t highpages;
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Function prototypes for static functions that are referenced prior to
|
|
|
|
* definition.
|
|
|
|
*/
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, eset_t *eset, void *new_addr,
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
|
2017-05-27 02:06:01 +08:00
|
|
|
bool *zero, bool *commit, bool growing_retained);
|
2017-03-03 10:04:35 +08:00
|
|
|
static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, rtree_ctx_t *rtree_ctx, eset_t *eset,
|
2017-05-27 02:06:01 +08:00
|
|
|
extent_t *extent, bool *coalesced, bool growing_retained);
|
2017-01-30 13:57:14 +08:00
|
|
|
static void extent_record(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, eset_t *eset, extent_t *extent,
|
2017-05-27 02:06:01 +08:00
|
|
|
bool growing_retained);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
|
2018-05-03 17:40:53 +08:00
|
|
|
#define ATTR_NONE /* does nothing */
|
|
|
|
|
|
|
|
ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
|
2017-04-17 13:31:16 +08:00
|
|
|
extent_esnead_comp)
|
|
|
|
|
2018-05-03 17:40:53 +08:00
|
|
|
#undef ATTR_NONE
|
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
typedef enum {
|
|
|
|
lock_result_success,
|
|
|
|
lock_result_failure,
|
|
|
|
lock_result_no_extent
|
|
|
|
} lock_result_t;
|
|
|
|
|
|
|
|
static lock_result_t
|
|
|
|
extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
|
2018-10-24 04:50:42 +08:00
|
|
|
extent_t **result, bool inactive_only) {
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
|
|
|
|
elm, true);
|
|
|
|
|
2018-10-24 04:50:42 +08:00
|
|
|
/* Slab implies active extents and should be skipped. */
|
|
|
|
if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
|
|
|
|
&extents_rtree, elm, true))) {
|
2017-05-16 05:23:51 +08:00
|
|
|
return lock_result_no_extent;
|
|
|
|
}
|
2018-10-24 04:50:42 +08:00
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
/*
|
|
|
|
* It's possible that the extent changed out from under us, and with it
|
|
|
|
* the leaf->extent mapping. We have to recheck while holding the lock.
|
|
|
|
*/
|
|
|
|
extent_lock(tsdn, extent1);
|
|
|
|
extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
|
|
|
|
&extents_rtree, elm, true);
|
|
|
|
|
|
|
|
if (extent1 == extent2) {
|
|
|
|
*result = extent1;
|
|
|
|
return lock_result_success;
|
|
|
|
} else {
|
|
|
|
extent_unlock(tsdn, extent1);
|
|
|
|
return lock_result_failure;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns a pool-locked extent_t * if there's one associated with the given
|
|
|
|
* address, and NULL otherwise.
|
|
|
|
*/
|
|
|
|
static extent_t *
|
2018-10-24 04:50:42 +08:00
|
|
|
extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
|
|
|
|
bool inactive_only) {
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_t *ret = NULL;
|
|
|
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
|
|
|
|
rtree_ctx, (uintptr_t)addr, false, false);
|
|
|
|
if (elm == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
lock_result_t lock_result;
|
|
|
|
do {
|
2018-10-24 04:50:42 +08:00
|
|
|
lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
|
|
|
|
inactive_only);
|
2017-05-16 05:23:51 +08:00
|
|
|
} while (lock_result == lock_result_failure);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-21 09:20:22 +08:00
|
|
|
static void
|
|
|
|
extent_addr_randomize(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|
|
|
size_t alignment) {
|
|
|
|
assert(extent_base_get(extent) == extent_addr_get(extent));
|
|
|
|
|
|
|
|
if (alignment < PAGE) {
|
|
|
|
unsigned lg_range = LG_PAGE -
|
|
|
|
lg_floor(CACHELINE_CEILING(alignment));
|
|
|
|
size_t r;
|
|
|
|
if (!tsdn_null(tsdn)) {
|
|
|
|
tsd_t *tsd = tsdn_tsd(tsdn);
|
|
|
|
r = (size_t)prng_lg_range_u64(
|
2019-11-05 08:48:12 +08:00
|
|
|
tsd_prng_statep_get(tsd), lg_range);
|
2019-09-21 09:20:22 +08:00
|
|
|
} else {
|
2019-11-05 08:44:37 +08:00
|
|
|
uint64_t stack_value = (uint64_t)(uintptr_t)&r;
|
|
|
|
r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
|
2019-09-21 09:20:22 +08:00
|
|
|
}
|
|
|
|
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
|
|
|
lg_range);
|
|
|
|
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
|
|
|
|
random_offset);
|
|
|
|
assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
|
|
|
|
extent->e_addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-24 05:56:35 +08:00
|
|
|
extent_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_alloc(tsdn_t *tsdn, arena_t *arena) {
|
2017-04-17 13:31:16 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
|
|
|
|
extent_t *extent = extent_avail_first(&arena->extent_avail);
|
2016-05-24 05:56:35 +08:00
|
|
|
if (extent == NULL) {
|
2017-04-17 13:31:16 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
|
2017-04-17 12:51:26 +08:00
|
|
|
return base_alloc_extent(tsdn, arena->base);
|
2016-05-24 05:56:35 +08:00
|
|
|
}
|
2017-04-17 13:31:16 +08:00
|
|
|
extent_avail_remove(&arena->extent_avail, extent);
|
2018-08-02 05:14:33 +08:00
|
|
|
atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
|
2017-04-17 13:31:16 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-05-24 05:56:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
2017-04-17 13:31:16 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
|
|
|
|
extent_avail_insert(&arena->extent_avail, extent);
|
2018-08-02 05:14:33 +08:00
|
|
|
atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
|
2017-04-17 13:31:16 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
|
2016-05-24 05:56:35 +08:00
|
|
|
}
|
|
|
|
|
2016-11-16 05:07:53 +08:00
|
|
|
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
|
2016-06-02 03:10:39 +08:00
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
static bool
|
|
|
|
extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, rtree_ctx_t *rtree_ctx, eset_t *eset,
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_t *extent) {
|
|
|
|
extent_state_set(extent, extent_state_active);
|
|
|
|
bool coalesced;
|
2019-11-19 06:03:22 +08:00
|
|
|
extent = extent_try_coalesce(tsdn, arena, extent_hooks, rtree_ctx,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset, extent, &coalesced, false);
|
2019-09-21 11:45:16 +08:00
|
|
|
extent_state_set(extent, eset_state_get(eset));
|
2017-03-03 10:04:35 +08:00
|
|
|
|
|
|
|
if (!coalesced) {
|
|
|
|
return true;
|
|
|
|
}
|
2019-09-21 14:51:13 +08:00
|
|
|
eset_insert_locked(tsdn, eset, extent);
|
2017-03-03 10:04:35 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
extent_t *
|
2019-11-19 06:03:22 +08:00
|
|
|
extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset_t *eset, void *new_addr, size_t size, size_t pad,
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
|
|
|
assert(size + pad != 0);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
assert(alignment != 0);
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_t *extent = extent_recycle(tsdn, arena, extent_hooks, eset,
|
2017-09-19 08:25:57 +08:00
|
|
|
new_addr, size, pad, alignment, slab, szind, zero, commit, false);
|
|
|
|
assert(extent == NULL || extent_dumpable_get(extent));
|
|
|
|
return extent;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-11-19 06:03:22 +08:00
|
|
|
extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset_t *eset, extent_t *extent) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
assert(extent_base_get(extent) != NULL);
|
|
|
|
assert(extent_size_get(extent) != 0);
|
2017-09-19 08:25:57 +08:00
|
|
|
assert(extent_dumpable_get(extent));
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
|
|
|
|
extent_addr_set(extent, extent_base_get(extent));
|
|
|
|
extent_zeroed_set(extent, false);
|
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_record(tsdn, arena, extent_hooks, eset, extent, false);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_t *
|
2019-11-19 06:03:22 +08:00
|
|
|
extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset_t *eset, size_t npages_min) {
|
2017-03-03 10:04:35 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
2019-09-21 10:59:55 +08:00
|
|
|
malloc_mutex_lock(tsdn, &eset->mtx);
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
/*
|
|
|
|
* Get the LRU coalesced extent, if any. If coalescing was delayed,
|
|
|
|
* the loop will iterate until the LRU extent is fully coalesced.
|
|
|
|
*/
|
|
|
|
extent_t *extent;
|
|
|
|
while (true) {
|
|
|
|
/* Get the LRU extent, if any. */
|
2019-09-21 10:59:55 +08:00
|
|
|
extent = extent_list_first(&eset->lru);
|
2017-03-03 10:04:35 +08:00
|
|
|
if (extent == NULL) {
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
/* Check the eviction limit. */
|
2019-09-21 10:59:55 +08:00
|
|
|
size_t extents_npages = atomic_load_zu(&eset->npages,
|
2017-03-08 09:57:48 +08:00
|
|
|
ATOMIC_RELAXED);
|
2017-12-09 04:13:50 +08:00
|
|
|
if (extents_npages <= npages_min) {
|
2017-03-03 10:04:35 +08:00
|
|
|
extent = NULL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2019-09-21 14:51:13 +08:00
|
|
|
eset_remove_locked(tsdn, eset, extent);
|
2019-09-21 10:59:55 +08:00
|
|
|
if (!eset->delay_coalesce) {
|
2017-03-03 10:04:35 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Try to coalesce. */
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_try_delayed_coalesce(tsdn, arena, extent_hooks,
|
2019-09-21 10:59:55 +08:00
|
|
|
rtree_ctx, eset, extent)) {
|
2017-03-03 10:04:35 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The LRU extent was just coalesced and the result placed in
|
|
|
|
* the LRU at its neighbor's position. Start over.
|
|
|
|
*/
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Either mark the extent active or deregister it to protect against
|
|
|
|
* concurrent operations.
|
|
|
|
*/
|
2019-09-21 11:45:16 +08:00
|
|
|
switch (eset_state_get(eset)) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
case extent_state_active:
|
|
|
|
not_reached();
|
2017-01-30 13:57:14 +08:00
|
|
|
case extent_state_dirty:
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
case extent_state_muzzy:
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_state_set(extent, extent_state_active);
|
|
|
|
break;
|
|
|
|
case extent_state_retained:
|
|
|
|
extent_deregister(tsdn, extent);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
label_return:
|
2019-09-21 10:59:55 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &eset->mtx);
|
2017-01-30 13:57:14 +08:00
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
|
2019-07-16 06:56:05 +08:00
|
|
|
/*
|
|
|
|
* This can only happen when we fail to allocate a new extent struct (which
|
|
|
|
* indicates OOM), e.g. when trying to split an existing extent.
|
|
|
|
*/
|
2017-01-30 13:57:14 +08:00
|
|
|
static void
|
2019-11-19 06:03:22 +08:00
|
|
|
extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset_t *eset, extent_t *extent, bool growing_retained) {
|
2019-07-16 06:56:05 +08:00
|
|
|
size_t sz = extent_size_get(extent);
|
|
|
|
if (config_stats) {
|
|
|
|
arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
|
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Leak extent after making sure its pages have already been purged, so
|
|
|
|
* that this is only a virtual memory leak.
|
|
|
|
*/
|
2019-09-21 11:45:16 +08:00
|
|
|
if (eset_state_get(eset) == extent_state_dirty) {
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_purge_lazy_impl(tsdn, arena, extent_hooks,
|
2019-07-16 06:56:05 +08:00
|
|
|
extent, 0, sz, growing_retained)) {
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_purge_forced_impl(tsdn, arena, extent_hooks,
|
2017-05-27 02:06:01 +08:00
|
|
|
extent, 0, extent_size_get(extent),
|
|
|
|
growing_retained);
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
extent_dalloc(tsdn, arena, extent);
|
|
|
|
}
|
2016-09-23 02:57:28 +08:00
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static void
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset,
|
2018-02-23 01:40:27 +08:00
|
|
|
extent_t *extent) {
|
2019-09-21 09:20:22 +08:00
|
|
|
assert(extent_arena_ind_get(extent) == arena_ind_get(arena));
|
2017-01-30 13:57:14 +08:00
|
|
|
assert(extent_state_get(extent) == extent_state_active);
|
2016-09-23 02:57:28 +08:00
|
|
|
|
2019-09-21 11:45:16 +08:00
|
|
|
extent_state_set(extent, eset_state_get(eset));
|
2019-09-21 14:51:13 +08:00
|
|
|
eset_insert_locked(tsdn, eset, extent);
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
2016-09-23 02:57:28 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
static void
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_deactivate(tsdn_t *tsdn, arena_t *arena, eset_t *eset,
|
2018-02-23 01:40:27 +08:00
|
|
|
extent_t *extent) {
|
2019-09-21 10:59:55 +08:00
|
|
|
malloc_mutex_lock(tsdn, &eset->mtx);
|
|
|
|
extent_deactivate_locked(tsdn, arena, eset, extent);
|
|
|
|
malloc_mutex_unlock(tsdn, &eset->mtx);
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_activate_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset,
|
2018-02-23 01:40:27 +08:00
|
|
|
extent_t *extent) {
|
2019-09-21 09:20:22 +08:00
|
|
|
assert(extent_arena_ind_get(extent) == arena_ind_get(arena));
|
2019-09-21 11:45:16 +08:00
|
|
|
assert(extent_state_get(extent) == eset_state_get(eset));
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2019-09-21 14:51:13 +08:00
|
|
|
eset_remove_locked(tsdn, eset, extent);
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_state_set(extent, extent_state_active);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
|
2016-06-03 09:43:10 +08:00
|
|
|
const extent_t *extent, bool dependent, bool init_missing,
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
|
2017-05-16 05:23:51 +08:00
|
|
|
*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
|
2016-06-02 03:59:02 +08:00
|
|
|
(uintptr_t)extent_base_get(extent), dependent, init_missing);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!dependent && *r_elm_a == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(*r_elm_a != NULL);
|
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)extent_last_get(extent), dependent, init_missing);
|
|
|
|
if (!dependent && *r_elm_b == NULL) {
|
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-16 05:23:51 +08:00
|
|
|
assert(*r_elm_b != NULL);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-17 00:46:42 +08:00
|
|
|
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
|
2017-05-16 05:23:51 +08:00
|
|
|
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (elm_b != NULL) {
|
2017-05-16 05:23:51 +08:00
|
|
|
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
|
|
|
|
slab);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
|
|
|
|
szind_t szind) {
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(extent_slab_get(extent));
|
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
/* Register interior. */
|
|
|
|
for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_write(tsdn, &extents_rtree, rtree_ctx,
|
2016-06-02 03:59:02 +08:00
|
|
|
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
|
2017-03-17 08:57:52 +08:00
|
|
|
LG_PAGE), extent, szind, true);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-22 15:23:03 +08:00
|
|
|
static void
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
|
2016-11-22 15:23:03 +08:00
|
|
|
cassert(config_prof);
|
2017-01-30 13:57:14 +08:00
|
|
|
/* prof_gdump() requirement. */
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
if (opt_prof && extent_state_get(extent) == extent_state_active) {
|
2016-06-06 05:43:20 +08:00
|
|
|
size_t nadd = extent_size_get(extent) >> LG_PAGE;
|
2017-04-05 09:20:42 +08:00
|
|
|
size_t cur = atomic_fetch_add_zu(&curpages, nadd,
|
|
|
|
ATOMIC_RELAXED) + nadd;
|
|
|
|
size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
|
|
|
|
while (cur > high && !atomic_compare_exchange_weak_zu(
|
|
|
|
&highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
|
2016-06-02 03:59:02 +08:00
|
|
|
/*
|
|
|
|
* Don't refresh cur, because it may have decreased
|
2016-06-06 05:43:20 +08:00
|
|
|
* since this thread lost the highpages update race.
|
2017-04-05 09:20:42 +08:00
|
|
|
* Note that high is updated in case of CAS failure.
|
2016-06-02 03:59:02 +08:00
|
|
|
*/
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (cur > high && prof_gdump_get_unlocked()) {
|
2016-06-02 03:59:02 +08:00
|
|
|
prof_gdump(tsdn);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
|
2016-11-22 15:23:03 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
if (opt_prof && extent_state_get(extent) == extent_state_active) {
|
2016-11-22 15:23:03 +08:00
|
|
|
size_t nsub = extent_size_get(extent) >> LG_PAGE;
|
2017-04-05 09:20:42 +08:00
|
|
|
assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
|
|
|
|
atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
|
2016-11-22 15:23:03 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t *elm_a, *elm_b;
|
2016-11-22 15:23:03 +08:00
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
/*
|
|
|
|
* We need to hold the lock to protect against a concurrent coalesce
|
|
|
|
* operation that sees us in a partial state.
|
|
|
|
*/
|
|
|
|
extent_lock(tsdn, extent);
|
|
|
|
|
|
|
|
if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
|
|
|
|
&elm_a, &elm_b)) {
|
2019-03-30 04:27:20 +08:00
|
|
|
extent_unlock(tsdn, extent);
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-16 05:23:51 +08:00
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
szind_t szind = extent_szind_get_maybe_invalid(extent);
|
|
|
|
bool slab = extent_slab_get(extent);
|
|
|
|
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
|
|
|
|
if (slab) {
|
|
|
|
extent_interior_register(tsdn, rtree_ctx, extent, szind);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-16 05:23:51 +08:00
|
|
|
|
|
|
|
extent_unlock(tsdn, extent);
|
2016-11-22 15:23:03 +08:00
|
|
|
|
2017-02-04 16:43:32 +08:00
|
|
|
if (config_prof && gdump_add) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_add(tsdn, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-02-04 16:43:32 +08:00
|
|
|
static bool
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_register(tsdn_t *tsdn, extent_t *extent) {
|
2017-02-04 16:43:32 +08:00
|
|
|
return extent_register_impl(tsdn, extent, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
|
2017-02-04 16:43:32 +08:00
|
|
|
return extent_register_impl(tsdn, extent, false);
|
|
|
|
}
|
|
|
|
|
2016-06-06 11:39:25 +08:00
|
|
|
static void
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_reregister(tsdn_t *tsdn, extent_t *extent) {
|
2016-06-06 11:39:25 +08:00
|
|
|
bool err = extent_register(tsdn, extent);
|
|
|
|
assert(!err);
|
|
|
|
}
|
|
|
|
|
2017-08-29 09:27:12 +08:00
|
|
|
/*
|
|
|
|
* Removes all pointers to the given extent from the global rtree indices for
|
|
|
|
* its interior. This is relevant for slab extents, for which we need to do
|
|
|
|
* metadata lookups at places other than the head of the extent. We deregister
|
|
|
|
* on the interior, then, when an extent moves from being an active slab to an
|
|
|
|
* inactive state.
|
|
|
|
*/
|
2016-06-02 03:59:02 +08:00
|
|
|
static void
|
2016-06-03 09:43:10 +08:00
|
|
|
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_t *extent) {
|
2016-06-02 03:59:02 +08:00
|
|
|
size_t i;
|
|
|
|
|
|
|
|
assert(extent_slab_get(extent));
|
|
|
|
|
|
|
|
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_clear(tsdn, &extents_rtree, rtree_ctx,
|
2016-06-02 03:59:02 +08:00
|
|
|
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
|
|
|
|
LG_PAGE));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-29 09:27:12 +08:00
|
|
|
/*
|
|
|
|
* Removes all pointers to the given extent from the global rtree.
|
|
|
|
*/
|
2016-06-02 03:59:02 +08:00
|
|
|
static void
|
2017-12-09 07:06:08 +08:00
|
|
|
extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t *elm_a, *elm_b;
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
|
|
|
|
&elm_a, &elm_b);
|
|
|
|
|
|
|
|
extent_lock(tsdn, extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
|
2016-06-06 11:39:25 +08:00
|
|
|
if (extent_slab_get(extent)) {
|
2016-06-03 09:43:10 +08:00
|
|
|
extent_interior_deregister(tsdn, rtree_ctx, extent);
|
2016-06-06 11:39:25 +08:00
|
|
|
extent_slab_set(extent, false);
|
|
|
|
}
|
2017-05-16 05:23:51 +08:00
|
|
|
|
|
|
|
extent_unlock(tsdn, extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-12-09 07:06:08 +08:00
|
|
|
if (config_prof && gdump) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_sub(tsdn, extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-09 07:06:08 +08:00
|
|
|
static void
|
|
|
|
extent_deregister(tsdn_t *tsdn, extent_t *extent) {
|
|
|
|
extent_deregister_impl(tsdn, extent, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
|
|
|
|
extent_deregister_impl(tsdn, extent, false);
|
|
|
|
}
|
|
|
|
|
2017-08-29 09:27:12 +08:00
|
|
|
/*
|
2019-09-21 10:59:55 +08:00
|
|
|
* Tries to find and remove an extent from eset that can be used for the
|
2017-08-29 09:27:12 +08:00
|
|
|
* given allocation request.
|
|
|
|
*/
|
2016-06-02 03:59:02 +08:00
|
|
|
static extent_t *
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, rtree_ctx_t *rtree_ctx, eset_t *eset,
|
2017-05-27 02:06:01 +08:00
|
|
|
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
2017-11-17 06:27:23 +08:00
|
|
|
bool growing_retained) {
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
2017-05-27 02:06:01 +08:00
|
|
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
2016-11-04 14:49:21 +08:00
|
|
|
assert(alignment > 0);
|
2016-09-24 03:11:01 +08:00
|
|
|
if (config_debug && new_addr != NULL) {
|
|
|
|
/*
|
|
|
|
* Non-NULL new_addr has two use cases:
|
|
|
|
*
|
|
|
|
* 1) Recycle a known-extant extent, e.g. during purging.
|
|
|
|
* 2) Perform in-place expanding reallocation.
|
|
|
|
*
|
|
|
|
* Regardless of use case, new_addr must either refer to a
|
|
|
|
* non-existing extent, or to the base of an extant extent,
|
|
|
|
* since only active slabs support interior lookups (which of
|
|
|
|
* course cannot be recycled).
|
|
|
|
*/
|
|
|
|
assert(PAGE_ADDR2BASE(new_addr) == new_addr);
|
2016-10-04 05:18:55 +08:00
|
|
|
assert(pad == 0);
|
|
|
|
assert(alignment <= PAGE);
|
2016-09-24 03:11:01 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t esize = size + pad;
|
2019-09-21 10:59:55 +08:00
|
|
|
malloc_mutex_lock(tsdn, &eset->mtx);
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_t *extent;
|
2016-06-02 03:59:02 +08:00
|
|
|
if (new_addr != NULL) {
|
2018-10-24 04:50:42 +08:00
|
|
|
extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
|
|
|
|
false);
|
2017-05-16 05:23:51 +08:00
|
|
|
if (extent != NULL) {
|
|
|
|
/*
|
|
|
|
* We might null-out extent to report an error, but we
|
|
|
|
* still need to unlock the associated mutex after.
|
|
|
|
*/
|
|
|
|
extent_t *unlock_extent = extent;
|
|
|
|
assert(extent_base_get(extent) == new_addr);
|
2019-09-21 09:20:22 +08:00
|
|
|
if (extent_arena_ind_get(extent)
|
|
|
|
!= arena_ind_get(arena) ||
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_size_get(extent) < esize ||
|
|
|
|
extent_state_get(extent) !=
|
2019-09-21 11:45:16 +08:00
|
|
|
eset_state_get(eset)) {
|
2017-05-16 05:23:51 +08:00
|
|
|
extent = NULL;
|
2016-09-24 03:11:01 +08:00
|
|
|
}
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_unlock(tsdn, unlock_extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-09-24 03:18:36 +08:00
|
|
|
} else {
|
2019-09-22 00:36:22 +08:00
|
|
|
extent = eset_fit_locked(tsdn, eset, esize, alignment);
|
2016-09-24 03:18:36 +08:00
|
|
|
}
|
2016-09-24 03:11:01 +08:00
|
|
|
if (extent == NULL) {
|
2019-09-21 10:59:55 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &eset->mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_activate_locked(tsdn, arena, eset, extent);
|
|
|
|
malloc_mutex_unlock(tsdn, &eset->mtx);
|
2017-01-30 13:57:14 +08:00
|
|
|
|
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
|
2017-08-29 09:27:12 +08:00
|
|
|
/*
|
2017-08-30 07:50:57 +08:00
|
|
|
* Given an allocation request and an extent guaranteed to be able to satisfy
|
|
|
|
* it, this splits off lead and trail extents, leaving extent pointing to an
|
|
|
|
* extent satisfying the allocation.
|
2019-09-21 10:59:55 +08:00
|
|
|
* This function doesn't put lead or trail into any eset_t; it's the caller's
|
2017-08-30 07:50:57 +08:00
|
|
|
* job to ensure that they can be reused.
|
2017-08-29 09:27:12 +08:00
|
|
|
*/
|
2017-08-30 07:50:57 +08:00
|
|
|
typedef enum {
|
|
|
|
/*
|
|
|
|
* Split successfully. lead, extent, and trail, are modified to extents
|
|
|
|
* describing the ranges before, in, and after the given allocation.
|
|
|
|
*/
|
|
|
|
extent_split_interior_ok,
|
|
|
|
/*
|
|
|
|
* The extent can't satisfy the given allocation request. None of the
|
|
|
|
* input extent_t *s are touched.
|
|
|
|
*/
|
|
|
|
extent_split_interior_cant_alloc,
|
|
|
|
/*
|
|
|
|
* In a potentially invalid state. Must leak (if *to_leak is non-NULL),
|
|
|
|
* and salvage what's still salvageable (if *to_salvage is non-NULL).
|
|
|
|
* None of lead, extent, or trail are valid.
|
|
|
|
*/
|
|
|
|
extent_split_interior_error
|
|
|
|
} extent_split_interior_result_t;
|
|
|
|
|
|
|
|
static extent_split_interior_result_t
|
|
|
|
extent_split_interior(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, rtree_ctx_t *rtree_ctx,
|
2017-08-30 07:50:57 +08:00
|
|
|
/* The result of splitting, in case of success. */
|
|
|
|
extent_t **extent, extent_t **lead, extent_t **trail,
|
|
|
|
/* The mess to clean up, in case of error. */
|
|
|
|
extent_t **to_leak, extent_t **to_salvage,
|
2017-03-14 08:36:57 +08:00
|
|
|
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
2017-08-30 07:50:57 +08:00
|
|
|
szind_t szind, bool growing_retained) {
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t esize = size + pad;
|
2017-08-30 07:50:57 +08:00
|
|
|
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
|
|
|
|
PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
|
2017-01-30 13:57:14 +08:00
|
|
|
assert(new_addr == NULL || leadsize == 0);
|
2017-08-30 07:50:57 +08:00
|
|
|
if (extent_size_get(*extent) < leadsize + esize) {
|
|
|
|
return extent_split_interior_cant_alloc;
|
|
|
|
}
|
|
|
|
size_t trailsize = extent_size_get(*extent) - leadsize - esize;
|
|
|
|
|
|
|
|
*lead = NULL;
|
|
|
|
*trail = NULL;
|
|
|
|
*to_leak = NULL;
|
|
|
|
*to_salvage = NULL;
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
/* Split the lead. */
|
|
|
|
if (leadsize != 0) {
|
2017-08-30 07:50:57 +08:00
|
|
|
*lead = *extent;
|
2019-11-19 06:03:22 +08:00
|
|
|
*extent = extent_split_impl(tsdn, arena, extent_hooks,
|
2017-12-15 04:46:39 +08:00
|
|
|
*lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
|
2017-05-27 02:06:01 +08:00
|
|
|
slab, growing_retained);
|
2017-08-30 07:50:57 +08:00
|
|
|
if (*extent == NULL) {
|
|
|
|
*to_leak = *lead;
|
|
|
|
*lead = NULL;
|
|
|
|
return extent_split_interior_error;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Split the trail. */
|
|
|
|
if (trailsize != 0) {
|
2019-11-19 06:03:22 +08:00
|
|
|
*trail = extent_split_impl(tsdn, arena, extent_hooks, *extent,
|
2017-12-15 04:46:39 +08:00
|
|
|
esize, szind, slab, trailsize, SC_NSIZES, false,
|
2017-08-30 07:50:57 +08:00
|
|
|
growing_retained);
|
|
|
|
if (*trail == NULL) {
|
|
|
|
*to_leak = *extent;
|
|
|
|
*to_salvage = *lead;
|
|
|
|
*lead = NULL;
|
|
|
|
*extent = NULL;
|
|
|
|
return extent_split_interior_error;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2017-08-30 07:50:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (leadsize == 0 && trailsize == 0) {
|
2016-06-02 03:59:02 +08:00
|
|
|
/*
|
2017-03-14 08:36:57 +08:00
|
|
|
* Splitting causes szind to be set as a side effect, but no
|
2016-06-02 03:59:02 +08:00
|
|
|
* splitting occurred.
|
|
|
|
*/
|
2017-08-30 07:50:57 +08:00
|
|
|
extent_szind_set(*extent, szind);
|
2017-12-15 04:46:39 +08:00
|
|
|
if (szind != SC_NSIZES) {
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
2017-08-30 07:50:57 +08:00
|
|
|
(uintptr_t)extent_addr_get(*extent), szind, slab);
|
|
|
|
if (slab && extent_size_get(*extent) > PAGE) {
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_szind_slab_update(tsdn, &extents_rtree,
|
|
|
|
rtree_ctx,
|
2017-08-30 07:50:57 +08:00
|
|
|
(uintptr_t)extent_past_get(*extent) -
|
2017-03-17 08:57:52 +08:00
|
|
|
(uintptr_t)PAGE, szind, slab);
|
|
|
|
}
|
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-08-30 07:50:57 +08:00
|
|
|
return extent_split_interior_ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This fulfills the indicated allocation request out of the given extent (which
|
|
|
|
* the caller should have ensured was big enough). If there's any unused space
|
|
|
|
* before or after the resulting allocation, that space is given its own extent
|
2019-09-21 10:59:55 +08:00
|
|
|
* and put back into eset.
|
2017-08-30 07:50:57 +08:00
|
|
|
*/
|
|
|
|
static extent_t *
|
|
|
|
extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, rtree_ctx_t *rtree_ctx, eset_t *eset,
|
2017-08-30 07:50:57 +08:00
|
|
|
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
|
|
|
szind_t szind, extent_t *extent, bool growing_retained) {
|
|
|
|
extent_t *lead;
|
|
|
|
extent_t *trail;
|
|
|
|
extent_t *to_leak;
|
|
|
|
extent_t *to_salvage;
|
|
|
|
|
|
|
|
extent_split_interior_result_t result = extent_split_interior(
|
2019-11-19 06:03:22 +08:00
|
|
|
tsdn, arena, extent_hooks, rtree_ctx, &extent, &lead, &trail,
|
2017-08-30 07:50:57 +08:00
|
|
|
&to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
|
|
|
|
growing_retained);
|
|
|
|
|
2019-07-16 03:16:02 +08:00
|
|
|
if (!maps_coalesce && result != extent_split_interior_ok
|
|
|
|
&& !opt_retain) {
|
|
|
|
/*
|
|
|
|
* Split isn't supported (implies Windows w/o retain). Avoid
|
2019-09-21 10:59:55 +08:00
|
|
|
* leaking the eset.
|
2019-07-16 03:16:02 +08:00
|
|
|
*/
|
|
|
|
assert(to_leak != NULL && lead == NULL && trail == NULL);
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_deactivate(tsdn, arena, eset, to_leak);
|
2019-07-16 03:16:02 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-08-30 07:50:57 +08:00
|
|
|
if (result == extent_split_interior_ok) {
|
|
|
|
if (lead != NULL) {
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_deactivate(tsdn, arena, eset, lead);
|
2017-08-30 07:50:57 +08:00
|
|
|
}
|
|
|
|
if (trail != NULL) {
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_deactivate(tsdn, arena, eset, trail);
|
2017-08-30 07:50:57 +08:00
|
|
|
}
|
|
|
|
return extent;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We should have picked an extent that was large enough to
|
|
|
|
* fulfill our allocation request.
|
|
|
|
*/
|
|
|
|
assert(result == extent_split_interior_error);
|
|
|
|
if (to_salvage != NULL) {
|
|
|
|
extent_deregister(tsdn, to_salvage);
|
|
|
|
}
|
|
|
|
if (to_leak != NULL) {
|
2017-11-29 04:21:58 +08:00
|
|
|
void *leak = extent_base_get(to_leak);
|
2017-12-09 07:06:08 +08:00
|
|
|
extent_deregister_no_gdump_sub(tsdn, to_leak);
|
2019-11-19 06:03:22 +08:00
|
|
|
extents_abandon_vm(tsdn, arena, extent_hooks, eset,
|
2017-08-30 07:50:57 +08:00
|
|
|
to_leak, growing_retained);
|
2018-10-24 04:50:42 +08:00
|
|
|
assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
|
|
|
|
false) == NULL);
|
2017-08-30 07:50:57 +08:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
unreachable();
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
|
2018-08-11 07:08:50 +08:00
|
|
|
static bool
|
|
|
|
extent_need_manual_zero(arena_t *arena) {
|
|
|
|
/*
|
|
|
|
* Need to manually zero the extent on repopulating if either; 1) non
|
|
|
|
* default extent hooks installed (in which case the purge semantics may
|
|
|
|
* change); or 2) transparent huge pages enabled.
|
|
|
|
*/
|
|
|
|
return (!arena_has_default_hooks(arena) ||
|
|
|
|
(opt_thp == thp_mode_always));
|
|
|
|
}
|
|
|
|
|
2017-08-29 09:27:12 +08:00
|
|
|
/*
|
|
|
|
* Tries to satisfy the given allocation request by reusing one of the extents
|
2019-09-21 10:59:55 +08:00
|
|
|
* in the given eset_t.
|
2017-08-29 09:27:12 +08:00
|
|
|
*/
|
2017-01-30 13:57:14 +08:00
|
|
|
static extent_t *
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset_t *eset, void *new_addr, size_t size, size_t pad,
|
2017-05-27 02:06:01 +08:00
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
|
|
|
|
bool growing_retained) {
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
2017-05-27 02:06:01 +08:00
|
|
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
2017-01-30 13:57:14 +08:00
|
|
|
assert(new_addr == NULL || !slab);
|
|
|
|
assert(pad == 0 || !slab);
|
2017-03-14 08:36:57 +08:00
|
|
|
assert(!*zero || !slab);
|
2017-01-30 13:57:14 +08:00
|
|
|
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_t *extent = extent_recycle_extract(tsdn, arena, extent_hooks,
|
2019-09-21 10:59:55 +08:00
|
|
|
rtree_ctx, eset, new_addr, size, pad, alignment, slab,
|
2017-11-17 06:27:23 +08:00
|
|
|
growing_retained);
|
2017-01-30 13:57:14 +08:00
|
|
|
if (extent == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
extent = extent_recycle_split(tsdn, arena, extent_hooks, rtree_ctx,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset, new_addr, size, pad, alignment, slab, szind, extent,
|
2017-05-27 02:06:01 +08:00
|
|
|
growing_retained);
|
2017-01-30 13:57:14 +08:00
|
|
|
if (extent == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-11-22 15:23:03 +08:00
|
|
|
if (*commit && !extent_committed_get(extent)) {
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_commit_impl(tsdn, arena, extent_hooks, extent,
|
2017-05-27 02:06:01 +08:00
|
|
|
0, extent_size_get(extent), growing_retained)) {
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_record(tsdn, arena, extent_hooks, eset,
|
2017-05-27 02:06:01 +08:00
|
|
|
extent, growing_retained);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2018-08-11 07:08:50 +08:00
|
|
|
if (!extent_need_manual_zero(arena)) {
|
|
|
|
extent_zeroed_set(extent, true);
|
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-11-17 06:27:23 +08:00
|
|
|
if (extent_committed_get(extent)) {
|
|
|
|
*commit = true;
|
|
|
|
}
|
|
|
|
if (extent_zeroed_get(extent)) {
|
|
|
|
*zero = true;
|
|
|
|
}
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (pad != 0) {
|
2019-09-21 09:20:22 +08:00
|
|
|
extent_addr_randomize(tsdn, arena, extent, alignment);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
assert(extent_state_get(extent) == extent_state_active);
|
2016-06-02 03:59:02 +08:00
|
|
|
if (slab) {
|
|
|
|
extent_slab_set(extent, slab);
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_interior_register(tsdn, rtree_ctx, extent, szind);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (*zero) {
|
2017-03-14 08:36:57 +08:00
|
|
|
void *addr = extent_base_get(extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
if (!extent_zeroed_get(extent)) {
|
2018-11-13 03:15:03 +08:00
|
|
|
size_t size = extent_size_get(extent);
|
2018-08-11 07:08:50 +08:00
|
|
|
if (extent_need_manual_zero(arena) ||
|
|
|
|
pages_purge_forced(addr, size)) {
|
2017-03-14 08:36:57 +08:00
|
|
|
memset(addr, 0, size);
|
2017-03-10 09:20:00 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
} else if (config_debug) {
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t *p = (size_t *)(uintptr_t)addr;
|
2018-11-13 03:15:03 +08:00
|
|
|
/* Check the first page only. */
|
|
|
|
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(p[i] == 0);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the caller specifies (!*zero), it is still possible to receive zeroed
|
|
|
|
* memory, in which case *zero is toggled to true. arena_extent_alloc() takes
|
|
|
|
* advantage of this to avoid demanding zeroed extents, but taking advantage of
|
|
|
|
* them if they are returned.
|
|
|
|
*/
|
|
|
|
static void *
|
|
|
|
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
|
2016-06-02 03:59:02 +08:00
|
|
|
void *ret;
|
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
assert(alignment != 0);
|
|
|
|
|
|
|
|
/* "primary" dss. */
|
|
|
|
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
|
|
|
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
|
2017-01-16 08:56:30 +08:00
|
|
|
commit)) != NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
/* mmap. */
|
|
|
|
if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
|
2017-01-16 08:56:30 +08:00
|
|
|
!= NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
/* "secondary" dss. */
|
|
|
|
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
|
|
|
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
|
2017-01-16 08:56:30 +08:00
|
|
|
commit)) != NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
/* All strategies for allocation failed. */
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-08 04:37:22 +08:00
|
|
|
static void *
|
|
|
|
extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t size, size_t alignment, bool *zero, bool *commit) {
|
2018-02-17 06:19:19 +08:00
|
|
|
void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
|
2017-04-05 08:22:24 +08:00
|
|
|
commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
|
|
|
|
ATOMIC_RELAXED));
|
2018-02-17 06:19:19 +08:00
|
|
|
if (have_madvise_huge && ret) {
|
|
|
|
pages_set_thp_state(ret, size);
|
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static void *
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
|
2016-06-02 03:59:02 +08:00
|
|
|
tsdn_t *tsdn;
|
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
tsdn = tsdn_fetch();
|
|
|
|
arena = arena_get(tsdn, arena_ind, false);
|
|
|
|
/*
|
|
|
|
* The arena we're allocating on behalf of must have been initialized
|
|
|
|
* already.
|
|
|
|
*/
|
|
|
|
assert(arena != NULL);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_alloc_default_impl(tsdn, arena, new_addr, size,
|
2019-03-29 11:42:40 +08:00
|
|
|
ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-06-23 07:18:30 +08:00
|
|
|
static void
|
|
|
|
extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
|
|
|
|
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
2017-08-31 07:17:04 +08:00
|
|
|
if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
|
|
|
|
/*
|
|
|
|
* The only legitimate case of customized extent hooks for a0 is
|
|
|
|
* hooks with no allocation activities. One such example is to
|
|
|
|
* place metadata on pre-allocated resources such as huge pages.
|
|
|
|
* In that case, rely on reentrancy_level checks to catch
|
|
|
|
* infinite recursions.
|
|
|
|
*/
|
|
|
|
pre_reentrancy(tsd, NULL);
|
|
|
|
} else {
|
|
|
|
pre_reentrancy(tsd, arena);
|
|
|
|
}
|
2017-06-23 07:18:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
extent_hook_post_reentrancy(tsdn_t *tsdn) {
|
|
|
|
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
|
|
|
post_reentrancy(tsd);
|
|
|
|
}
|
|
|
|
|
2016-11-22 15:23:03 +08:00
|
|
|
/*
|
|
|
|
* If virtual memory is retained, create increasingly larger extents from which
|
|
|
|
* to split requested extents in order to limit the total number of disjoint
|
|
|
|
* virtual memory ranges retained by each arena.
|
|
|
|
*/
|
|
|
|
static extent_t *
|
|
|
|
extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, size_t size, size_t pad, size_t alignment,
|
2017-05-27 02:06:01 +08:00
|
|
|
bool slab, szind_t szind, bool *zero, bool *commit) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
|
2017-03-14 08:36:57 +08:00
|
|
|
assert(pad == 0 || !slab);
|
|
|
|
assert(!*zero || !slab);
|
2016-11-22 15:23:03 +08:00
|
|
|
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t esize = size + pad;
|
|
|
|
size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
|
2016-11-22 15:23:03 +08:00
|
|
|
/* Beware size_t wrap-around. */
|
2017-03-14 08:36:57 +08:00
|
|
|
if (alloc_size_min < esize) {
|
2017-05-27 02:06:01 +08:00
|
|
|
goto label_err;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-27 02:06:01 +08:00
|
|
|
/*
|
|
|
|
* Find the next extent size in the series that would be large enough to
|
|
|
|
* satisfy this request.
|
|
|
|
*/
|
|
|
|
pszind_t egn_skip = 0;
|
2017-05-31 01:45:37 +08:00
|
|
|
size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
|
2017-05-27 02:06:01 +08:00
|
|
|
while (alloc_size < alloc_size_min) {
|
|
|
|
egn_skip++;
|
2018-07-20 08:08:10 +08:00
|
|
|
if (arena->extent_grow_next + egn_skip >=
|
|
|
|
sz_psz2ind(SC_LARGE_MAXCLASS)) {
|
2017-05-27 02:06:01 +08:00
|
|
|
/* Outside legal range. */
|
|
|
|
goto label_err;
|
|
|
|
}
|
2017-05-31 01:45:37 +08:00
|
|
|
alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-27 02:06:01 +08:00
|
|
|
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_t *extent = extent_alloc(tsdn, arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (extent == NULL) {
|
2017-05-27 02:06:01 +08:00
|
|
|
goto label_err;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-03-14 08:36:57 +08:00
|
|
|
bool zeroed = false;
|
|
|
|
bool committed = false;
|
2017-06-14 07:16:33 +08:00
|
|
|
|
|
|
|
void *ptr;
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks == &extent_hooks_default) {
|
2018-02-17 06:19:19 +08:00
|
|
|
ptr = extent_alloc_default_impl(tsdn, arena, NULL,
|
|
|
|
alloc_size, PAGE, &zeroed, &committed);
|
2017-06-14 07:16:33 +08:00
|
|
|
} else {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2019-11-19 06:03:22 +08:00
|
|
|
ptr = extent_hooks->alloc(extent_hooks, NULL,
|
2017-06-14 07:16:33 +08:00
|
|
|
alloc_size, PAGE, &zeroed, &committed,
|
|
|
|
arena_ind_get(arena));
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2017-06-14 07:16:33 +08:00
|
|
|
}
|
|
|
|
|
2019-09-21 09:20:22 +08:00
|
|
|
extent_init(extent, arena_ind_get(arena), ptr, alloc_size, false,
|
|
|
|
SC_NSIZES, arena_extent_sn_next(arena), extent_state_active, zeroed,
|
2019-07-13 07:20:23 +08:00
|
|
|
committed, true, EXTENT_IS_HEAD);
|
2017-05-26 09:15:41 +08:00
|
|
|
if (ptr == NULL) {
|
2016-11-22 15:23:03 +08:00
|
|
|
extent_dalloc(tsdn, arena, extent);
|
2017-05-27 02:06:01 +08:00
|
|
|
goto label_err;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2017-09-19 08:25:57 +08:00
|
|
|
|
2017-05-26 09:15:41 +08:00
|
|
|
if (extent_register_no_gdump_add(tsdn, extent)) {
|
2019-07-16 05:55:43 +08:00
|
|
|
extent_dalloc(tsdn, arena, extent);
|
2017-05-27 02:06:01 +08:00
|
|
|
goto label_err;
|
2017-05-26 09:15:41 +08:00
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
|
2017-04-20 07:14:54 +08:00
|
|
|
if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
|
2016-11-22 15:23:03 +08:00
|
|
|
*zero = true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (extent_committed_get(extent)) {
|
2016-11-22 15:23:03 +08:00
|
|
|
*commit = true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
|
2017-08-30 07:50:57 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
|
|
|
extent_t *lead;
|
|
|
|
extent_t *trail;
|
|
|
|
extent_t *to_leak;
|
|
|
|
extent_t *to_salvage;
|
|
|
|
extent_split_interior_result_t result = extent_split_interior(
|
2019-11-19 06:03:22 +08:00
|
|
|
tsdn, arena, extent_hooks, rtree_ctx, &extent, &lead, &trail,
|
2017-08-30 07:50:57 +08:00
|
|
|
&to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
|
|
|
|
true);
|
|
|
|
|
|
|
|
if (result == extent_split_interior_ok) {
|
|
|
|
if (lead != NULL) {
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_record(tsdn, arena, extent_hooks,
|
2019-09-22 01:35:47 +08:00
|
|
|
&arena->eset_retained, lead, true);
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2017-08-30 07:50:57 +08:00
|
|
|
if (trail != NULL) {
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_record(tsdn, arena, extent_hooks,
|
2019-09-22 01:35:47 +08:00
|
|
|
&arena->eset_retained, trail, true);
|
2017-08-30 07:50:57 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We should have allocated a sufficiently large extent; the
|
|
|
|
* cant_alloc case should not occur.
|
|
|
|
*/
|
|
|
|
assert(result == extent_split_interior_error);
|
2018-04-18 04:16:42 +08:00
|
|
|
if (to_salvage != NULL) {
|
|
|
|
if (config_prof) {
|
|
|
|
extent_gdump_add(tsdn, to_salvage);
|
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_record(tsdn, arena, extent_hooks,
|
2019-09-22 01:35:47 +08:00
|
|
|
&arena->eset_retained, to_salvage, true);
|
2018-04-18 04:16:42 +08:00
|
|
|
}
|
2017-08-30 07:50:57 +08:00
|
|
|
if (to_leak != NULL) {
|
2017-12-09 07:06:08 +08:00
|
|
|
extent_deregister_no_gdump_sub(tsdn, to_leak);
|
2019-11-19 06:03:22 +08:00
|
|
|
extents_abandon_vm(tsdn, arena, extent_hooks,
|
2019-09-22 01:35:47 +08:00
|
|
|
&arena->eset_retained, to_leak, true);
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2018-04-18 04:16:42 +08:00
|
|
|
goto label_err;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (*commit && !extent_committed_get(extent)) {
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_commit_impl(tsdn, arena, extent_hooks, extent, 0,
|
2017-05-27 02:06:01 +08:00
|
|
|
extent_size_get(extent), true)) {
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_record(tsdn, arena, extent_hooks,
|
2019-09-22 01:35:47 +08:00
|
|
|
&arena->eset_retained, extent, true);
|
2017-05-27 02:06:01 +08:00
|
|
|
goto label_err;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2018-08-11 07:08:50 +08:00
|
|
|
if (!extent_need_manual_zero(arena)) {
|
|
|
|
extent_zeroed_set(extent, true);
|
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
/*
|
2017-11-03 08:48:39 +08:00
|
|
|
* Increment extent_grow_next if doing so wouldn't exceed the allowed
|
2017-05-27 02:06:01 +08:00
|
|
|
* range.
|
|
|
|
*/
|
2017-11-03 08:48:39 +08:00
|
|
|
if (arena->extent_grow_next + egn_skip + 1 <=
|
|
|
|
arena->retain_grow_limit) {
|
2017-05-27 02:06:01 +08:00
|
|
|
arena->extent_grow_next += egn_skip + 1;
|
|
|
|
} else {
|
2017-11-03 08:48:39 +08:00
|
|
|
arena->extent_grow_next = arena->retain_grow_limit;
|
2017-05-27 02:06:01 +08:00
|
|
|
}
|
|
|
|
/* All opportunities for failure are past. */
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
|
|
|
|
|
2016-11-22 15:23:03 +08:00
|
|
|
if (config_prof) {
|
2017-01-30 13:57:14 +08:00
|
|
|
/* Adjust gdump stats now that extent is final size. */
|
|
|
|
extent_gdump_add(tsdn, extent);
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (pad != 0) {
|
2019-09-21 09:20:22 +08:00
|
|
|
extent_addr_randomize(tsdn, arena, extent, alignment);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
if (slab) {
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
|
|
|
&rtree_ctx_fallback);
|
|
|
|
|
|
|
|
extent_slab_set(extent, true);
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_interior_register(tsdn, rtree_ctx, extent, szind);
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (*zero && !extent_zeroed_get(extent)) {
|
2017-03-14 08:36:57 +08:00
|
|
|
void *addr = extent_base_get(extent);
|
|
|
|
size_t size = extent_size_get(extent);
|
2018-08-11 07:08:50 +08:00
|
|
|
if (extent_need_manual_zero(arena) ||
|
|
|
|
pages_purge_forced(addr, size)) {
|
2017-03-14 08:36:57 +08:00
|
|
|
memset(addr, 0, size);
|
2017-03-10 09:20:00 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-27 02:06:01 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2017-05-27 02:06:01 +08:00
|
|
|
label_err:
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
|
|
|
|
return NULL;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static extent_t *
|
|
|
|
extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t pad,
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
|
|
|
assert(size != 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(alignment != 0);
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
|
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_t *extent = extent_recycle(tsdn, arena, extent_hooks,
|
2019-09-22 01:35:47 +08:00
|
|
|
&arena->eset_retained, new_addr, size, pad, alignment, slab,
|
2017-05-27 02:06:01 +08:00
|
|
|
szind, zero, commit, true);
|
2017-01-19 15:03:37 +08:00
|
|
|
if (extent != NULL) {
|
2017-05-27 02:06:01 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_prof) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_add(tsdn, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-05-27 02:06:01 +08:00
|
|
|
} else if (opt_retain && new_addr == NULL) {
|
2019-11-19 06:03:22 +08:00
|
|
|
extent = extent_grow_retained(tsdn, arena, extent_hooks, size,
|
2017-05-27 02:06:01 +08:00
|
|
|
pad, alignment, slab, szind, zero, commit);
|
|
|
|
/* extent_grow_retained() always releases extent_grow_mtx. */
|
|
|
|
} else {
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2017-05-27 02:06:01 +08:00
|
|
|
malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static extent_t *
|
|
|
|
extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t pad,
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
|
|
|
size_t esize = size + pad;
|
|
|
|
extent_t *extent = extent_alloc(tsdn, arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (extent == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-03-14 08:36:57 +08:00
|
|
|
void *addr;
|
2019-03-29 11:42:40 +08:00
|
|
|
size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks == &extent_hooks_default) {
|
2016-06-08 04:37:22 +08:00
|
|
|
/* Call directly to propagate tsdn. */
|
2017-03-14 08:36:57 +08:00
|
|
|
addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
|
2019-03-29 11:42:40 +08:00
|
|
|
palignment, zero, commit);
|
2016-06-08 04:37:22 +08:00
|
|
|
} else {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2019-11-19 06:03:22 +08:00
|
|
|
addr = extent_hooks->alloc(extent_hooks, new_addr,
|
2019-03-29 11:42:40 +08:00
|
|
|
esize, palignment, zero, commit, arena_ind_get(arena));
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
if (addr == NULL) {
|
|
|
|
extent_dalloc(tsdn, arena, extent);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2019-09-21 09:20:22 +08:00
|
|
|
extent_init(extent, arena_ind_get(arena), addr, esize, slab, szind,
|
2018-04-07 04:18:21 +08:00
|
|
|
arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
|
2019-07-13 07:20:23 +08:00
|
|
|
true, EXTENT_NOT_HEAD);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (pad != 0) {
|
2019-09-21 09:20:22 +08:00
|
|
|
extent_addr_randomize(tsdn, arena, extent, alignment);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
if (extent_register(tsdn, extent)) {
|
2019-07-16 05:55:43 +08:00
|
|
|
extent_dalloc(tsdn, arena, extent);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extent_t *
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t pad,
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_t *extent = extent_alloc_retained(tsdn, arena, extent_hooks,
|
2017-03-14 08:36:57 +08:00
|
|
|
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
2016-06-02 03:59:02 +08:00
|
|
|
if (extent == NULL) {
|
2017-08-01 04:01:07 +08:00
|
|
|
if (opt_retain && new_addr != NULL) {
|
|
|
|
/*
|
|
|
|
* When retain is enabled and new_addr is set, we do not
|
|
|
|
* attempt extent_alloc_wrapper_hard which does mmap
|
|
|
|
* that is very unlikely to succeed (unless it happens
|
|
|
|
* to be at the end).
|
|
|
|
*/
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
extent = extent_alloc_wrapper_hard(tsdn, arena, extent_hooks,
|
2017-03-14 08:36:57 +08:00
|
|
|
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-09-19 08:25:57 +08:00
|
|
|
assert(extent == NULL || extent_dumpable_get(extent));
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_can_coalesce(arena_t *arena, eset_t *eset, const extent_t *inner,
|
2017-02-13 15:00:19 +08:00
|
|
|
const extent_t *outer) {
|
2019-09-21 09:20:22 +08:00
|
|
|
assert(extent_arena_ind_get(inner) == arena_ind_get(arena));
|
|
|
|
if (extent_arena_ind_get(outer) != arena_ind_get(arena)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-02-13 15:00:19 +08:00
|
|
|
|
|
|
|
assert(extent_state_get(inner) == extent_state_active);
|
2019-09-21 10:59:55 +08:00
|
|
|
if (extent_state_get(outer) != eset->state) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-02-13 15:00:19 +08:00
|
|
|
|
|
|
|
if (extent_committed_get(inner) != extent_committed_get(outer)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
static bool
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset_t *eset, extent_t *inner, extent_t *outer, bool forward,
|
2017-05-27 02:06:01 +08:00
|
|
|
bool growing_retained) {
|
2019-09-21 10:59:55 +08:00
|
|
|
assert(extent_can_coalesce(arena, eset, inner, outer));
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_activate_locked(tsdn, arena, eset, outer);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2019-09-21 10:59:55 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &eset->mtx);
|
2019-11-19 06:03:22 +08:00
|
|
|
bool err = extent_merge_impl(tsdn, arena, extent_hooks,
|
2017-05-27 02:06:01 +08:00
|
|
|
forward ? inner : outer, forward ? outer : inner, growing_retained);
|
2019-09-21 10:59:55 +08:00
|
|
|
malloc_mutex_lock(tsdn, &eset->mtx);
|
2017-02-13 15:00:19 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
if (err) {
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_deactivate_locked(tsdn, arena, eset, outer);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-02-13 15:00:19 +08:00
|
|
|
return err;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-02-13 15:18:57 +08:00
|
|
|
static extent_t *
|
2018-10-24 04:50:42 +08:00
|
|
|
extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, rtree_ctx_t *rtree_ctx, eset_t *eset,
|
2018-10-24 04:50:42 +08:00
|
|
|
extent_t *extent, bool *coalesced, bool growing_retained,
|
|
|
|
bool inactive_only) {
|
|
|
|
/*
|
|
|
|
* We avoid checking / locking inactive neighbors for large size
|
|
|
|
* classes, since they are eagerly coalesced on deallocation which can
|
|
|
|
* cause lock contention.
|
|
|
|
*/
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Continue attempting to coalesce until failure, to protect against
|
|
|
|
* races with other threads that are thwarted by this one.
|
|
|
|
*/
|
2017-03-03 10:04:35 +08:00
|
|
|
bool again;
|
2017-01-30 13:57:14 +08:00
|
|
|
do {
|
2017-03-03 10:04:35 +08:00
|
|
|
again = false;
|
2017-01-30 13:57:14 +08:00
|
|
|
|
|
|
|
/* Try to coalesce forward. */
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
|
2018-10-24 04:50:42 +08:00
|
|
|
extent_past_get(extent), inactive_only);
|
2017-05-16 05:23:51 +08:00
|
|
|
if (next != NULL) {
|
2017-02-06 15:59:53 +08:00
|
|
|
/*
|
2019-09-21 10:59:55 +08:00
|
|
|
* eset->mtx only protects against races for
|
|
|
|
* like-state eset, so call extent_can_coalesce()
|
2017-05-16 05:23:51 +08:00
|
|
|
* before releasing next's pool lock.
|
2017-02-06 15:59:53 +08:00
|
|
|
*/
|
2019-09-21 10:59:55 +08:00
|
|
|
bool can_coalesce = extent_can_coalesce(arena, eset,
|
2017-05-16 05:23:51 +08:00
|
|
|
extent, next);
|
|
|
|
|
|
|
|
extent_unlock(tsdn, next);
|
|
|
|
|
2017-02-06 15:59:53 +08:00
|
|
|
if (can_coalesce && !extent_coalesce(tsdn, arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks, eset, extent, next, true,
|
2017-05-27 02:06:01 +08:00
|
|
|
growing_retained)) {
|
2019-09-21 10:59:55 +08:00
|
|
|
if (eset->delay_coalesce) {
|
2017-03-03 10:04:35 +08:00
|
|
|
/* Do minimal coalescing. */
|
|
|
|
*coalesced = true;
|
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
again = true;
|
2017-02-06 15:59:53 +08:00
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/* Try to coalesce backward. */
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
|
2018-10-24 04:50:42 +08:00
|
|
|
extent_before_get(extent), inactive_only);
|
2017-05-16 05:23:51 +08:00
|
|
|
if (prev != NULL) {
|
2019-09-21 10:59:55 +08:00
|
|
|
bool can_coalesce = extent_can_coalesce(arena, eset,
|
2017-05-16 05:23:51 +08:00
|
|
|
extent, prev);
|
|
|
|
extent_unlock(tsdn, prev);
|
|
|
|
|
2017-02-06 15:59:53 +08:00
|
|
|
if (can_coalesce && !extent_coalesce(tsdn, arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks, eset, extent, prev, false,
|
2017-05-27 02:06:01 +08:00
|
|
|
growing_retained)) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent = prev;
|
2019-09-21 10:59:55 +08:00
|
|
|
if (eset->delay_coalesce) {
|
2017-03-03 10:04:35 +08:00
|
|
|
/* Do minimal coalescing. */
|
|
|
|
*coalesced = true;
|
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
again = true;
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
}
|
2017-03-03 10:04:35 +08:00
|
|
|
} while (again);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2019-09-21 10:59:55 +08:00
|
|
|
if (eset->delay_coalesce) {
|
2017-03-03 10:04:35 +08:00
|
|
|
*coalesced = false;
|
|
|
|
}
|
2017-02-13 15:18:57 +08:00
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
|
2018-10-24 04:50:42 +08:00
|
|
|
static extent_t *
|
|
|
|
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, rtree_ctx_t *rtree_ctx, eset_t *eset,
|
2018-10-24 04:50:42 +08:00
|
|
|
extent_t *extent, bool *coalesced, bool growing_retained) {
|
2019-11-19 06:03:22 +08:00
|
|
|
return extent_try_coalesce_impl(tsdn, arena, extent_hooks, rtree_ctx,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset, extent, coalesced, growing_retained, false);
|
2018-10-24 04:50:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static extent_t *
|
|
|
|
extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, rtree_ctx_t *rtree_ctx, eset_t *eset,
|
2018-10-24 04:50:42 +08:00
|
|
|
extent_t *extent, bool *coalesced, bool growing_retained) {
|
2019-11-19 06:03:22 +08:00
|
|
|
return extent_try_coalesce_impl(tsdn, arena, extent_hooks, rtree_ctx,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset, extent, coalesced, growing_retained, true);
|
2018-10-24 04:50:42 +08:00
|
|
|
}
|
|
|
|
|
2017-08-29 09:27:12 +08:00
|
|
|
/*
|
|
|
|
* Does the metadata management portions of putting an unused extent into the
|
2019-09-21 10:59:55 +08:00
|
|
|
* given eset_t (coalesces, deregisters slab interiors, the heap operations).
|
2017-08-29 09:27:12 +08:00
|
|
|
*/
|
2017-02-13 15:18:57 +08:00
|
|
|
static void
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
|
2019-09-21 10:59:55 +08:00
|
|
|
eset_t *eset, extent_t *extent, bool growing_retained) {
|
2017-02-13 15:18:57 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
2019-09-21 11:45:16 +08:00
|
|
|
assert((eset_state_get(eset) != extent_state_dirty &&
|
|
|
|
eset_state_get(eset) != extent_state_muzzy) ||
|
2017-02-13 15:18:57 +08:00
|
|
|
!extent_zeroed_get(extent));
|
|
|
|
|
2019-09-21 10:59:55 +08:00
|
|
|
malloc_mutex_lock(tsdn, &eset->mtx);
|
2017-02-13 15:18:57 +08:00
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
extent_szind_set(extent, SC_NSIZES);
|
2017-02-13 15:18:57 +08:00
|
|
|
if (extent_slab_get(extent)) {
|
|
|
|
extent_interior_deregister(tsdn, rtree_ctx, extent);
|
|
|
|
extent_slab_set(extent, false);
|
|
|
|
}
|
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)extent_base_get(extent), true) == extent);
|
2017-02-13 15:18:57 +08:00
|
|
|
|
2019-09-21 10:59:55 +08:00
|
|
|
if (!eset->delay_coalesce) {
|
2019-11-19 06:03:22 +08:00
|
|
|
extent = extent_try_coalesce(tsdn, arena, extent_hooks,
|
2019-09-21 10:59:55 +08:00
|
|
|
rtree_ctx, eset, extent, NULL, growing_retained);
|
2018-07-12 07:05:58 +08:00
|
|
|
} else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
|
2019-09-22 01:35:47 +08:00
|
|
|
assert(eset == &arena->eset_dirty);
|
2019-09-21 10:59:55 +08:00
|
|
|
/* Always coalesce large eset eagerly. */
|
2017-11-16 06:48:55 +08:00
|
|
|
bool coalesced;
|
|
|
|
do {
|
|
|
|
assert(extent_state_get(extent) == extent_state_active);
|
2018-10-24 04:50:42 +08:00
|
|
|
extent = extent_try_coalesce_large(tsdn, arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks, rtree_ctx, eset, extent,
|
2017-11-16 06:48:55 +08:00
|
|
|
&coalesced, growing_retained);
|
2018-10-24 04:50:42 +08:00
|
|
|
} while (coalesced);
|
2019-03-13 06:02:41 +08:00
|
|
|
if (extent_size_get(extent) >= oversize_threshold) {
|
|
|
|
/* Shortcut to purge the oversize extent eagerly. */
|
2019-09-21 10:59:55 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &eset->mtx);
|
2019-11-19 06:03:22 +08:00
|
|
|
arena_decay_extent(tsdn, arena, extent_hooks, extent);
|
2019-03-13 06:02:41 +08:00
|
|
|
return;
|
|
|
|
}
|
2017-02-13 15:18:57 +08:00
|
|
|
}
|
2019-09-21 10:59:55 +08:00
|
|
|
extent_deactivate_locked(tsdn, arena, eset, extent);
|
2017-02-13 15:00:19 +08:00
|
|
|
|
2019-09-21 10:59:55 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &eset->mtx);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-06 06:27:20 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks = arena_get_extent_hooks(arena);
|
2016-06-06 06:27:20 +08:00
|
|
|
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2016-06-06 06:27:20 +08:00
|
|
|
if (extent_register(tsdn, extent)) {
|
2019-07-16 05:55:43 +08:00
|
|
|
extent_dalloc(tsdn, arena, extent);
|
2016-06-06 06:27:20 +08:00
|
|
|
return;
|
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_dalloc_wrapper(tsdn, arena, extent_hooks, extent);
|
2016-06-06 06:27:20 +08:00
|
|
|
}
|
|
|
|
|
2018-11-01 05:54:53 +08:00
|
|
|
static bool
|
|
|
|
extent_may_dalloc(void) {
|
|
|
|
/* With retain enabled, the default dalloc always fails. */
|
|
|
|
return !opt_retain;
|
|
|
|
}
|
|
|
|
|
2016-06-08 04:37:22 +08:00
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_dalloc_default_impl(void *addr, size_t size) {
|
|
|
|
if (!have_dss || !extent_in_dss(addr)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_dalloc_mmap(addr, size);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static bool
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool committed, unsigned arena_ind) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_dalloc_default_impl(addr, size);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-04-28 06:51:35 +08:00
|
|
|
static bool
|
2017-01-04 09:21:59 +08:00
|
|
|
extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent) {
|
2017-01-04 09:21:59 +08:00
|
|
|
bool err;
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
assert(extent_base_get(extent) != NULL);
|
|
|
|
assert(extent_size_get(extent) != 0);
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
extent_addr_set(extent, extent_base_get(extent));
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/* Try to deallocate. */
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks == &extent_hooks_default) {
|
2016-06-08 04:37:22 +08:00
|
|
|
/* Call directly to propagate tsdn. */
|
2016-10-14 03:18:38 +08:00
|
|
|
err = extent_dalloc_default_impl(extent_base_get(extent),
|
2016-06-08 04:37:22 +08:00
|
|
|
extent_size_get(extent));
|
|
|
|
} else {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2019-11-19 06:03:22 +08:00
|
|
|
err = (extent_hooks->dalloc == NULL ||
|
|
|
|
extent_hooks->dalloc(extent_hooks,
|
2016-06-08 04:37:22 +08:00
|
|
|
extent_base_get(extent), extent_size_get(extent),
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_committed_get(extent), arena_ind_get(arena)));
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!err) {
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_dalloc(tsdn, arena, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-04 09:21:59 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return err;
|
2017-01-04 09:21:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent) {
|
2017-09-19 08:25:57 +08:00
|
|
|
assert(extent_dumpable_get(extent));
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
2017-01-04 09:21:59 +08:00
|
|
|
|
2018-11-01 05:54:53 +08:00
|
|
|
/* Avoid calling the default extent_dalloc unless have to. */
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
|
2018-11-01 05:54:53 +08:00
|
|
|
/*
|
|
|
|
* Deregister first to avoid a race with other allocating
|
|
|
|
* threads, and reregister if deallocation fails.
|
|
|
|
*/
|
|
|
|
extent_deregister(tsdn, extent);
|
2019-11-19 06:03:22 +08:00
|
|
|
if (!extent_dalloc_wrapper_try(tsdn, arena, extent_hooks,
|
2018-11-01 05:54:53 +08:00
|
|
|
extent)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
extent_reregister(tsdn, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-04 09:21:59 +08:00
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
/* Try to decommit; purge if that fails. */
|
2017-01-30 13:57:14 +08:00
|
|
|
bool zeroed;
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!extent_committed_get(extent)) {
|
2016-12-04 07:38:25 +08:00
|
|
|
zeroed = true;
|
2019-11-19 06:03:22 +08:00
|
|
|
} else if (!extent_decommit_wrapper(tsdn, arena, extent_hooks, extent,
|
2017-01-16 08:56:30 +08:00
|
|
|
0, extent_size_get(extent))) {
|
2016-12-04 07:38:25 +08:00
|
|
|
zeroed = true;
|
2019-11-19 06:03:22 +08:00
|
|
|
} else if (extent_hooks->purge_forced != NULL &&
|
|
|
|
!extent_hooks->purge_forced(extent_hooks,
|
2016-12-04 07:38:25 +08:00
|
|
|
extent_base_get(extent), extent_size_get(extent), 0,
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_size_get(extent), arena_ind_get(arena))) {
|
2016-12-04 07:38:25 +08:00
|
|
|
zeroed = true;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
} else if (extent_state_get(extent) == extent_state_muzzy ||
|
2019-11-19 06:03:22 +08:00
|
|
|
(extent_hooks->purge_lazy != NULL &&
|
|
|
|
!extent_hooks->purge_lazy(extent_hooks,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
extent_base_get(extent), extent_size_get(extent), 0,
|
|
|
|
extent_size_get(extent), arena_ind_get(arena)))) {
|
|
|
|
zeroed = false;
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-12-04 07:38:25 +08:00
|
|
|
zeroed = false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
extent_zeroed_set(extent, zeroed);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_prof) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_sub(tsdn, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_record(tsdn, arena, extent_hooks, &arena->eset_retained,
|
2017-05-27 02:06:01 +08:00
|
|
|
extent, false);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-04-28 06:51:35 +08:00
|
|
|
static void
|
|
|
|
extent_destroy_default_impl(void *addr, size_t size) {
|
|
|
|
if (!have_dss || !extent_in_dss(addr)) {
|
|
|
|
pages_unmap(addr, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
|
|
|
bool committed, unsigned arena_ind) {
|
|
|
|
extent_destroy_default_impl(addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent) {
|
2017-04-28 06:51:35 +08:00
|
|
|
assert(extent_base_get(extent) != NULL);
|
|
|
|
assert(extent_size_get(extent) != 0);
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
2017-04-28 06:51:35 +08:00
|
|
|
|
|
|
|
/* Deregister first to avoid a race with other allocating threads. */
|
|
|
|
extent_deregister(tsdn, extent);
|
|
|
|
|
|
|
|
extent_addr_set(extent, extent_base_get(extent));
|
|
|
|
|
|
|
|
/* Try to destroy; silently fail otherwise. */
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks == &extent_hooks_default) {
|
2017-04-28 06:51:35 +08:00
|
|
|
/* Call directly to propagate tsdn. */
|
|
|
|
extent_destroy_default_impl(extent_base_get(extent),
|
|
|
|
extent_size_get(extent));
|
2019-11-19 06:03:22 +08:00
|
|
|
} else if (extent_hooks->destroy != NULL) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks->destroy(extent_hooks,
|
2017-04-28 06:51:35 +08:00
|
|
|
extent_base_get(extent), extent_size_get(extent),
|
|
|
|
extent_committed_get(extent), arena_ind_get(arena));
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2017-04-28 06:51:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extent_dalloc(tsdn, arena, extent);
|
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static bool
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t offset, size_t length, unsigned arena_ind) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
|
|
|
|
length);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool
|
|
|
|
extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
|
2017-05-27 02:06:01 +08:00
|
|
|
size_t length, bool growing_retained) {
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
2017-05-27 02:06:01 +08:00
|
|
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
bool err = (extent_hooks->commit == NULL ||
|
|
|
|
extent_hooks->commit(extent_hooks, extent_base_get(extent),
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_size_get(extent), offset, length, arena_ind_get(arena)));
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
2016-06-06 14:24:52 +08:00
|
|
|
extent_committed_set(extent, extent_committed_get(extent) || !err);
|
2017-01-20 10:15:45 +08:00
|
|
|
return err;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
bool
|
|
|
|
extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
|
2017-05-27 02:06:01 +08:00
|
|
|
size_t length) {
|
2019-11-19 06:03:22 +08:00
|
|
|
return extent_commit_impl(tsdn, arena, extent_hooks, extent, offset,
|
2017-05-27 02:06:01 +08:00
|
|
|
length, false);
|
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static bool
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t offset, size_t length, unsigned arena_ind) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
|
|
|
|
length);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t length) {
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
bool err = (extent_hooks->decommit == NULL ||
|
|
|
|
extent_hooks->decommit(extent_hooks,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_base_get(extent), extent_size_get(extent), offset, length,
|
2016-12-23 06:39:10 +08:00
|
|
|
arena_ind_get(arena)));
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
2016-06-06 14:24:52 +08:00
|
|
|
extent_committed_set(extent, extent_committed_get(extent) && err);
|
2017-01-20 10:15:45 +08:00
|
|
|
return err;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-12-04 07:38:25 +08:00
|
|
|
#ifdef PAGES_CAN_PURGE_LAZY
|
2016-06-02 03:59:02 +08:00
|
|
|
static bool
|
2016-12-04 07:38:25 +08:00
|
|
|
extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t offset, size_t length, unsigned arena_ind) {
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(addr != NULL);
|
|
|
|
assert((offset & PAGE_MASK) == 0);
|
|
|
|
assert(length != 0);
|
|
|
|
assert((length & PAGE_MASK) == 0);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
|
|
|
|
length);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
#endif
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool
|
|
|
|
extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
|
2017-05-27 02:06:01 +08:00
|
|
|
size_t length, bool growing_retained) {
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
2017-05-27 02:06:01 +08:00
|
|
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks->purge_lazy == NULL) {
|
2017-06-23 06:36:41 +08:00
|
|
|
return true;
|
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
bool err = extent_hooks->purge_lazy(extent_hooks,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_base_get(extent), extent_size_get(extent), offset, length,
|
2017-06-23 06:36:41 +08:00
|
|
|
arena_ind_get(arena));
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
bool
|
|
|
|
extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
|
2017-05-27 02:06:01 +08:00
|
|
|
size_t length) {
|
2019-11-19 06:03:22 +08:00
|
|
|
return extent_purge_lazy_impl(tsdn, arena, extent_hooks, extent,
|
2017-05-27 02:06:01 +08:00
|
|
|
offset, length, false);
|
|
|
|
}
|
|
|
|
|
2016-12-04 07:38:25 +08:00
|
|
|
#ifdef PAGES_CAN_PURGE_FORCED
|
|
|
|
static bool
|
|
|
|
extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t size, size_t offset, size_t length, unsigned arena_ind) {
|
2016-12-04 07:38:25 +08:00
|
|
|
assert(addr != NULL);
|
|
|
|
assert((offset & PAGE_MASK) == 0);
|
|
|
|
assert(length != 0);
|
|
|
|
assert((length & PAGE_MASK) == 0);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return pages_purge_forced((void *)((uintptr_t)addr +
|
|
|
|
(uintptr_t)offset), length);
|
2016-12-04 07:38:25 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool
|
|
|
|
extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
|
2017-05-27 02:06:01 +08:00
|
|
|
size_t length, bool growing_retained) {
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
2017-05-27 02:06:01 +08:00
|
|
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks->purge_forced == NULL) {
|
2017-06-23 06:36:41 +08:00
|
|
|
return true;
|
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
bool err = extent_hooks->purge_forced(extent_hooks,
|
2016-12-04 07:38:25 +08:00
|
|
|
extent_base_get(extent), extent_size_get(extent), offset, length,
|
2017-06-23 06:36:41 +08:00
|
|
|
arena_ind_get(arena));
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
|
|
|
return err;
|
2016-12-04 07:38:25 +08:00
|
|
|
}
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
bool
|
|
|
|
extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
|
2017-05-27 02:06:01 +08:00
|
|
|
size_t length) {
|
2019-11-19 06:03:22 +08:00
|
|
|
return extent_purge_forced_impl(tsdn, arena, extent_hooks, extent,
|
2017-05-27 02:06:01 +08:00
|
|
|
offset, length, false);
|
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static bool
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
|
2019-07-13 07:20:23 +08:00
|
|
|
if (!maps_coalesce) {
|
|
|
|
/*
|
|
|
|
* Without retain, only whole regions can be purged (required by
|
|
|
|
* MEM_RELEASE on Windows) -- therefore disallow splitting. See
|
|
|
|
* comments in extent_head_no_merge().
|
|
|
|
*/
|
|
|
|
return !opt_retain;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-08-29 09:27:12 +08:00
|
|
|
/*
|
|
|
|
* Accepts the extent to split, and the characteristics of each side of the
|
|
|
|
* split. The 'a' parameters go with the 'lead' of the resulting pair of
|
|
|
|
* extents (the lower addressed portion of the split), and the 'b' parameters go
|
|
|
|
* with the trail (the higher addressed portion). This makes 'extent' the lead,
|
|
|
|
* and returns the trail (except in case of error).
|
|
|
|
*/
|
2017-05-27 02:06:01 +08:00
|
|
|
static extent_t *
|
|
|
|
extent_split_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t size_a,
|
2017-05-27 02:06:01 +08:00
|
|
|
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
|
|
|
|
bool growing_retained) {
|
2017-01-30 13:57:14 +08:00
|
|
|
assert(extent_size_get(extent) == size_a + size_b);
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
2017-05-27 02:06:01 +08:00
|
|
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks->split == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_t *trail = extent_alloc(tsdn, arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (trail == NULL) {
|
2016-06-02 03:59:02 +08:00
|
|
|
goto label_error_a;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2019-09-21 09:20:22 +08:00
|
|
|
extent_init(trail, arena_ind_get(arena),
|
|
|
|
(void *)((uintptr_t)extent_base_get(extent) + size_a), size_b,
|
|
|
|
slab_b, szind_b, extent_sn_get(extent), extent_state_get(extent),
|
|
|
|
extent_zeroed_get(extent), extent_committed_get(extent),
|
|
|
|
extent_dumpable_get(extent), EXTENT_NOT_HEAD);
|
2017-05-16 05:23:51 +08:00
|
|
|
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
|
2016-06-02 03:59:02 +08:00
|
|
|
{
|
|
|
|
extent_t lead;
|
|
|
|
|
2019-09-21 09:20:22 +08:00
|
|
|
extent_init(&lead, arena_ind_get(arena),
|
|
|
|
extent_addr_get(extent), size_a,
|
2017-03-17 08:57:52 +08:00
|
|
|
slab_a, szind_a, extent_sn_get(extent),
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_state_get(extent), extent_zeroed_get(extent),
|
2019-07-13 07:20:23 +08:00
|
|
|
extent_committed_get(extent), extent_dumpable_get(extent),
|
|
|
|
EXTENT_NOT_HEAD);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
|
|
|
|
true, &lead_elm_a, &lead_elm_b);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2017-05-16 05:23:51 +08:00
|
|
|
rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
|
|
|
|
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
|
|
|
|
&trail_elm_a, &trail_elm_b);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
|
|
|
|
|| trail_elm_b == NULL) {
|
|
|
|
goto label_error_b;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_lock2(tsdn, extent, trail);
|
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
2019-11-19 06:03:22 +08:00
|
|
|
bool err = extent_hooks->split(extent_hooks, extent_base_get(extent),
|
2016-06-04 03:05:53 +08:00
|
|
|
size_a + size_b, size_a, size_b, extent_committed_get(extent),
|
2017-06-23 06:36:41 +08:00
|
|
|
arena_ind_get(arena));
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks != &extent_hooks_default) {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2017-06-23 06:36:41 +08:00
|
|
|
}
|
|
|
|
if (err) {
|
2017-05-16 05:23:51 +08:00
|
|
|
goto label_error_c;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
extent_size_set(extent, size_a);
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_szind_set(extent, szind_a);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
|
|
|
|
szind_a, slab_a);
|
|
|
|
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
|
|
|
|
szind_b, slab_b);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_unlock2(tsdn, extent, trail);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return trail;
|
2016-06-02 03:59:02 +08:00
|
|
|
label_error_c:
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_unlock2(tsdn, extent, trail);
|
2016-06-02 03:59:02 +08:00
|
|
|
label_error_b:
|
|
|
|
extent_dalloc(tsdn, arena, trail);
|
|
|
|
label_error_a:
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
extent_t *
|
|
|
|
extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *extent, size_t size_a,
|
2017-05-27 02:06:01 +08:00
|
|
|
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
|
2019-11-19 06:03:22 +08:00
|
|
|
return extent_split_impl(tsdn, arena, extent_hooks, extent, size_a,
|
2017-05-27 02:06:01 +08:00
|
|
|
szind_a, slab_a, size_b, szind_b, slab_b, false);
|
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_merge_default_impl(void *addr_a, void *addr_b) {
|
2019-07-13 07:20:23 +08:00
|
|
|
if (!maps_coalesce && !opt_retain) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2019-07-13 07:20:23 +08:00
|
|
|
/*
|
|
|
|
* Returns true if the given extents can't be merged because of their head bit
|
|
|
|
* settings. Assumes the second extent has the higher address.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
extent_head_no_merge(extent_t *a, extent_t *b) {
|
|
|
|
assert(extent_base_get(a) < extent_base_get(b));
|
|
|
|
/*
|
|
|
|
* When coalesce is not always allowed (Windows), only merge extents
|
|
|
|
* from the same VirtualAlloc region under opt.retain (in which case
|
|
|
|
* MEM_DECOMMIT is utilized for purging).
|
|
|
|
*/
|
|
|
|
if (maps_coalesce) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!opt_retain) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
/* If b is a head extent, disallow the cross-region merge. */
|
|
|
|
if (extent_is_head_get(b)) {
|
|
|
|
/*
|
|
|
|
* Additionally, sn should not overflow with retain; sanity
|
|
|
|
* check that different regions have unique sn.
|
|
|
|
*/
|
|
|
|
assert(extent_sn_comp(a, b) != 0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
assert(extent_sn_comp(a, b) == 0);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-06-08 04:37:22 +08:00
|
|
|
static bool
|
|
|
|
extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
2017-01-16 08:56:30 +08:00
|
|
|
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
|
2019-07-13 07:20:23 +08:00
|
|
|
if (!maps_coalesce) {
|
|
|
|
tsdn_t *tsdn = tsdn_fetch();
|
|
|
|
extent_t *a = iealloc(tsdn, addr_a);
|
|
|
|
extent_t *b = iealloc(tsdn, addr_b);
|
|
|
|
if (extent_head_no_merge(a, b)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_merge_default_impl(addr_a, addr_b);
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
static bool
|
|
|
|
extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *a, extent_t *b,
|
2017-05-27 02:06:01 +08:00
|
|
|
bool growing_retained) {
|
2017-05-23 10:32:04 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
2017-05-27 02:06:01 +08:00
|
|
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
2019-07-13 07:20:23 +08:00
|
|
|
assert(extent_base_get(a) < extent_base_get(b));
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks->merge == NULL || extent_head_no_merge(a, b)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
bool err;
|
2019-11-19 06:03:22 +08:00
|
|
|
if (extent_hooks == &extent_hooks_default) {
|
2016-06-08 04:37:22 +08:00
|
|
|
/* Call directly to propagate tsdn. */
|
2016-10-14 03:18:38 +08:00
|
|
|
err = extent_merge_default_impl(extent_base_get(a),
|
2016-06-08 04:37:22 +08:00
|
|
|
extent_base_get(b));
|
|
|
|
} else {
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_pre_reentrancy(tsdn, arena);
|
2019-11-19 06:03:22 +08:00
|
|
|
err = extent_hooks->merge(extent_hooks,
|
2016-06-08 04:37:22 +08:00
|
|
|
extent_base_get(a), extent_size_get(a), extent_base_get(b),
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_size_get(b), extent_committed_get(a),
|
|
|
|
arena_ind_get(arena));
|
2017-06-23 07:18:30 +08:00
|
|
|
extent_hook_post_reentrancy(tsdn);
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (err) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The rtree writes must happen while all the relevant elements are
|
|
|
|
* owned, so the following code uses decomposed helper functions rather
|
|
|
|
* than extent_{,de}register() to do things in the right order.
|
|
|
|
*/
|
2017-01-30 13:57:14 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
|
2016-06-03 09:43:10 +08:00
|
|
|
&a_elm_b);
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
|
2016-06-03 09:43:10 +08:00
|
|
|
&b_elm_b);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
extent_lock2(tsdn, a, b);
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
if (a_elm_b != NULL) {
|
2017-05-16 05:23:51 +08:00
|
|
|
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
|
2017-12-15 04:46:39 +08:00
|
|
|
SC_NSIZES, false);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
if (b_elm_b != NULL) {
|
2017-05-16 05:23:51 +08:00
|
|
|
rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
|
2017-12-15 04:46:39 +08:00
|
|
|
SC_NSIZES, false);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-06-02 03:59:02 +08:00
|
|
|
b_elm_b = b_elm_a;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
|
2017-12-15 04:46:39 +08:00
|
|
|
extent_szind_set(a, SC_NSIZES);
|
2016-11-16 05:07:53 +08:00
|
|
|
extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
|
|
|
|
extent_sn_get(a) : extent_sn_get(b));
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
|
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
|
|
|
|
false);
|
2017-05-16 05:23:51 +08:00
|
|
|
|
|
|
|
extent_unlock2(tsdn, a, b);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2019-09-21 09:20:22 +08:00
|
|
|
/*
|
|
|
|
* If we got here, we merged the extents; so they must be from the same
|
|
|
|
* arena (i.e. this one).
|
|
|
|
*/
|
|
|
|
assert(extent_arena_ind_get(b) == arena_ind_get(arena));
|
|
|
|
extent_dalloc(tsdn, arena, b);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-05-27 02:06:01 +08:00
|
|
|
bool
|
|
|
|
extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2019-11-19 06:03:22 +08:00
|
|
|
extent_hooks_t *extent_hooks, extent_t *a, extent_t *b) {
|
|
|
|
return extent_merge_impl(tsdn, arena, extent_hooks, a, b, false);
|
2017-05-27 02:06:01 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:10:39 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_boot(void) {
|
2017-03-23 07:38:03 +08:00
|
|
|
if (rtree_new(&extents_rtree, true)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:10:39 +08:00
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
|
|
|
|
WITNESS_RANK_EXTENT_POOL)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (have_dss) {
|
2016-10-14 03:18:38 +08:00
|
|
|
extent_dss_boot();
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-06-02 03:10:39 +08:00
|
|
|
}
|
2019-03-16 02:01:45 +08:00
|
|
|
|
|
|
|
void
|
|
|
|
extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
|
|
|
|
size_t *nfree, size_t *nregs, size_t *size) {
|
|
|
|
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
|
|
|
|
|
|
|
|
const extent_t *extent = iealloc(tsdn, ptr);
|
|
|
|
if (unlikely(extent == NULL)) {
|
|
|
|
*nfree = *nregs = *size = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*size = extent_size_get(extent);
|
|
|
|
if (!extent_slab_get(extent)) {
|
|
|
|
*nfree = 0;
|
|
|
|
*nregs = 1;
|
|
|
|
} else {
|
|
|
|
*nfree = extent_nfree_get(extent);
|
|
|
|
*nregs = bin_infos[extent_szind_get(extent)].nregs;
|
|
|
|
assert(*nfree <= *nregs);
|
|
|
|
assert(*nfree * extent_usize_get(extent) <= *size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
|
|
|
|
size_t *nfree, size_t *nregs, size_t *size,
|
|
|
|
size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
|
|
|
|
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
|
|
|
|
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
|
|
|
|
|
|
|
|
const extent_t *extent = iealloc(tsdn, ptr);
|
|
|
|
if (unlikely(extent == NULL)) {
|
|
|
|
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
|
|
|
|
*slabcur_addr = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*size = extent_size_get(extent);
|
|
|
|
if (!extent_slab_get(extent)) {
|
|
|
|
*nfree = *bin_nfree = *bin_nregs = 0;
|
|
|
|
*nregs = 1;
|
|
|
|
*slabcur_addr = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*nfree = extent_nfree_get(extent);
|
|
|
|
const szind_t szind = extent_szind_get(extent);
|
|
|
|
*nregs = bin_infos[szind].nregs;
|
|
|
|
assert(*nfree <= *nregs);
|
|
|
|
assert(*nfree * extent_usize_get(extent) <= *size);
|
|
|
|
|
2019-09-21 09:20:22 +08:00
|
|
|
const arena_t *arena = (arena_t *)atomic_load_p(
|
|
|
|
&arenas[extent_arena_ind_get(extent)], ATOMIC_RELAXED);
|
2019-03-16 02:01:45 +08:00
|
|
|
assert(arena != NULL);
|
|
|
|
const unsigned binshard = extent_binshard_get(extent);
|
|
|
|
bin_t *bin = &arena->bins[szind].bin_shards[binshard];
|
|
|
|
|
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
|
|
|
if (config_stats) {
|
|
|
|
*bin_nregs = *nregs * bin->stats.curslabs;
|
|
|
|
assert(*bin_nregs >= bin->stats.curregs);
|
|
|
|
*bin_nfree = *bin_nregs - bin->stats.curregs;
|
|
|
|
} else {
|
|
|
|
*bin_nfree = *bin_nregs = 0;
|
|
|
|
}
|
2019-10-29 00:24:42 +08:00
|
|
|
extent_t *slab;
|
|
|
|
if (bin->slabcur != NULL) {
|
|
|
|
slab = bin->slabcur;
|
|
|
|
} else {
|
|
|
|
slab = extent_heap_first(&bin->slabs_nonfull);
|
|
|
|
}
|
|
|
|
*slabcur_addr = slab != NULL ? extent_addr_get(slab) : NULL;
|
2019-03-16 02:01:45 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
|
|
|
}
|