2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_EXTENT_C_
|
2017-04-11 09:17:55 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/assert.h"
|
2017-04-11 07:54:25 +08:00
|
|
|
#include "jemalloc/internal/ph.h"
|
|
|
|
|
2017-04-11 09:17:55 +08:00
|
|
|
|
2016-06-02 03:10:39 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
|
|
|
rtree_t extents_rtree;
|
|
|
|
|
2017-03-24 14:45:11 +08:00
|
|
|
static const bitmap_info_t extents_bitmap_info =
|
|
|
|
BITMAP_INFO_INITIALIZER(NPSIZES+1);
|
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
static void *extent_alloc_default(extent_hooks_t *extent_hooks,
|
|
|
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
|
|
|
unsigned arena_ind);
|
|
|
|
static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
|
|
|
|
size_t size, bool committed, unsigned arena_ind);
|
2017-04-28 06:51:35 +08:00
|
|
|
static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
|
|
|
|
size_t size, bool committed, unsigned arena_ind);
|
2016-06-04 03:05:53 +08:00
|
|
|
static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
|
|
|
|
size_t size, size_t offset, size_t length, unsigned arena_ind);
|
|
|
|
static bool extent_decommit_default(extent_hooks_t *extent_hooks,
|
|
|
|
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
|
2016-12-04 07:38:25 +08:00
|
|
|
#ifdef PAGES_CAN_PURGE_LAZY
|
|
|
|
static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks,
|
|
|
|
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
|
|
|
|
#endif
|
|
|
|
#ifdef PAGES_CAN_PURGE_FORCED
|
|
|
|
static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
|
|
|
|
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
|
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_MAPS_COALESCE
|
2016-06-04 03:05:53 +08:00
|
|
|
static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
|
|
|
|
size_t size, size_t size_a, size_t size_b, bool committed,
|
|
|
|
unsigned arena_ind);
|
|
|
|
static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
|
|
|
|
size_t size_a, void *addr_b, size_t size_b, bool committed,
|
2016-06-02 03:59:02 +08:00
|
|
|
unsigned arena_ind);
|
2016-12-04 07:38:25 +08:00
|
|
|
#endif
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
const extent_hooks_t extent_hooks_default = {
|
|
|
|
extent_alloc_default,
|
|
|
|
extent_dalloc_default,
|
2017-04-28 06:51:35 +08:00
|
|
|
extent_destroy_default,
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_commit_default,
|
2016-12-04 07:38:25 +08:00
|
|
|
extent_decommit_default
|
|
|
|
#ifdef PAGES_CAN_PURGE_LAZY
|
|
|
|
,
|
|
|
|
extent_purge_lazy_default
|
|
|
|
#else
|
|
|
|
,
|
|
|
|
NULL
|
|
|
|
#endif
|
|
|
|
#ifdef PAGES_CAN_PURGE_FORCED
|
|
|
|
,
|
|
|
|
extent_purge_forced_default
|
|
|
|
#else
|
|
|
|
,
|
|
|
|
NULL
|
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_MAPS_COALESCE
|
|
|
|
,
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_split_default,
|
|
|
|
extent_merge_default
|
2016-12-04 07:38:25 +08:00
|
|
|
#endif
|
2016-06-02 03:59:02 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Used exclusively for gdump triggering. */
|
2017-04-05 09:20:42 +08:00
|
|
|
static atomic_zu_t curpages;
|
|
|
|
static atomic_zu_t highpages;
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Function prototypes for static functions that are referenced prior to
|
|
|
|
* definition.
|
|
|
|
*/
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
|
|
|
|
bool *zero, bool *commit);
|
2017-03-03 10:04:35 +08:00
|
|
|
static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
|
|
|
extent_t *extent, bool *coalesced);
|
2017-01-30 13:57:14 +08:00
|
|
|
static void extent_record(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
|
2017-04-17 13:31:16 +08:00
|
|
|
rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link,
|
|
|
|
extent_esnead_comp)
|
|
|
|
|
2016-05-24 05:56:35 +08:00
|
|
|
extent_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_alloc(tsdn_t *tsdn, arena_t *arena) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
|
|
|
|
2017-04-17 13:31:16 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
|
|
|
|
extent_t *extent = extent_avail_first(&arena->extent_avail);
|
2016-05-24 05:56:35 +08:00
|
|
|
if (extent == NULL) {
|
2017-04-17 13:31:16 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
|
2017-04-17 12:51:26 +08:00
|
|
|
return base_alloc_extent(tsdn, arena->base);
|
2016-05-24 05:56:35 +08:00
|
|
|
}
|
2017-04-17 13:31:16 +08:00
|
|
|
extent_avail_remove(&arena->extent_avail, extent);
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-05-24 05:56:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
|
|
|
|
2017-04-17 13:31:16 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
|
|
|
|
extent_avail_insert(&arena->extent_avail, extent);
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
|
2016-05-24 05:56:35 +08:00
|
|
|
}
|
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_hooks_get(arena_t *arena) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return base_extent_hooks_get(arena->base);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return base_extent_hooks_set(arena->base, extent_hooks);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_hooks_assure_initialized(arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks) {
|
|
|
|
if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
|
2016-06-04 03:05:53 +08:00
|
|
|
*r_extent_hooks = extent_hooks_get(arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-02-27 04:48:43 +08:00
|
|
|
#ifndef JEMALLOC_JET
|
|
|
|
static
|
2016-05-18 05:58:56 +08:00
|
|
|
#endif
|
|
|
|
size_t
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_size_quantize_floor(size_t size) {
|
2016-05-18 05:58:56 +08:00
|
|
|
size_t ret;
|
|
|
|
pszind_t pind;
|
2015-05-05 00:58:36 +08:00
|
|
|
|
2016-05-18 05:58:56 +08:00
|
|
|
assert(size > 0);
|
|
|
|
assert((size & PAGE_MASK) == 0);
|
|
|
|
|
|
|
|
pind = psz2ind(size - large_pad + 1);
|
|
|
|
if (pind == 0) {
|
|
|
|
/*
|
|
|
|
* Avoid underflow. This short-circuit would also do the right
|
|
|
|
* thing for all sizes in the range for which there are
|
|
|
|
* PAGE-spaced size classes, but it's simplest to just handle
|
|
|
|
* the one case that would cause erroneous results.
|
|
|
|
*/
|
2017-01-20 10:15:45 +08:00
|
|
|
return size;
|
2016-05-18 05:58:56 +08:00
|
|
|
}
|
|
|
|
ret = pind2sz(pind - 1) + large_pad;
|
|
|
|
assert(ret <= size);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2015-05-05 00:58:36 +08:00
|
|
|
}
|
|
|
|
|
2017-02-27 04:48:43 +08:00
|
|
|
#ifndef JEMALLOC_JET
|
|
|
|
static
|
2016-05-18 05:58:56 +08:00
|
|
|
#endif
|
|
|
|
size_t
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_size_quantize_ceil(size_t size) {
|
2016-05-18 05:58:56 +08:00
|
|
|
size_t ret;
|
|
|
|
|
|
|
|
assert(size > 0);
|
2016-06-01 05:50:21 +08:00
|
|
|
assert(size - large_pad <= LARGE_MAXCLASS);
|
2016-05-18 05:58:56 +08:00
|
|
|
assert((size & PAGE_MASK) == 0);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-18 05:58:56 +08:00
|
|
|
ret = extent_size_quantize_floor(size);
|
|
|
|
if (ret < size) {
|
|
|
|
/*
|
|
|
|
* Skip a quantization that may have an adequately large extent,
|
|
|
|
* because under-sized extents may be mixed in. This only
|
|
|
|
* happens when an unusual size is requested, i.e. for aligned
|
|
|
|
* allocation, and is just one of several places where linear
|
|
|
|
* search would potentially find sufficiently aligned available
|
|
|
|
* memory somewhere lower.
|
|
|
|
*/
|
|
|
|
ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
|
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2016-05-18 05:58:56 +08:00
|
|
|
|
|
|
|
/* Generate pairing heap functions. */
|
2016-11-16 05:07:53 +08:00
|
|
|
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
|
2016-06-02 03:10:39 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
bool
|
2017-02-13 15:18:57 +08:00
|
|
|
extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
|
2017-03-03 10:04:35 +08:00
|
|
|
bool delay_coalesce) {
|
2017-01-30 13:57:14 +08:00
|
|
|
if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
for (unsigned i = 0; i < NPSIZES+1; i++) {
|
|
|
|
extent_heap_new(&extents->heaps[i]);
|
|
|
|
}
|
2017-03-24 14:45:11 +08:00
|
|
|
bitmap_init(extents->bitmap, &extents_bitmap_info, true);
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_list_init(&extents->lru);
|
2017-03-08 09:57:48 +08:00
|
|
|
atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
|
2017-01-30 13:57:14 +08:00
|
|
|
extents->state = state;
|
2017-03-03 10:04:35 +08:00
|
|
|
extents->delay_coalesce = delay_coalesce;
|
2017-01-30 13:57:14 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
extent_state_t
|
|
|
|
extents_state_get(const extents_t *extents) {
|
|
|
|
return extents->state;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
extents_npages_get(extents_t *extents) {
|
2017-03-08 09:57:48 +08:00
|
|
|
return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static void
|
2017-03-03 10:04:35 +08:00
|
|
|
extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
|
|
|
bool preserve_lru) {
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn, &extents->mtx);
|
|
|
|
assert(extent_state_get(extent) == extents->state);
|
|
|
|
|
|
|
|
size_t size = extent_size_get(extent);
|
|
|
|
size_t psz = extent_size_quantize_floor(size);
|
2016-11-04 12:18:50 +08:00
|
|
|
pszind_t pind = psz2ind(psz);
|
2017-03-24 14:45:11 +08:00
|
|
|
if (extent_heap_empty(&extents->heaps[pind])) {
|
|
|
|
bitmap_unset(extents->bitmap, &extents_bitmap_info,
|
|
|
|
(size_t)pind);
|
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_heap_insert(&extents->heaps[pind], extent);
|
2017-03-03 10:04:35 +08:00
|
|
|
if (!preserve_lru) {
|
|
|
|
extent_list_append(&extents->lru, extent);
|
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
size_t npages = size >> LG_PAGE;
|
2017-03-08 09:57:48 +08:00
|
|
|
/*
|
|
|
|
* All modifications to npages hold the mutex (as asserted above), so we
|
|
|
|
* don't need an atomic fetch-add; we can get by with a load followed by
|
|
|
|
* a store.
|
|
|
|
*/
|
|
|
|
size_t cur_extents_npages =
|
|
|
|
atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
|
|
|
|
atomic_store_zu(&extents->npages, cur_extents_npages + npages,
|
|
|
|
ATOMIC_RELAXED);
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-03 10:04:35 +08:00
|
|
|
extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
|
|
|
bool preserve_lru) {
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn, &extents->mtx);
|
|
|
|
assert(extent_state_get(extent) == extents->state);
|
|
|
|
|
|
|
|
size_t size = extent_size_get(extent);
|
|
|
|
size_t psz = extent_size_quantize_floor(size);
|
|
|
|
pszind_t pind = psz2ind(psz);
|
|
|
|
extent_heap_remove(&extents->heaps[pind], extent);
|
2017-03-24 14:45:11 +08:00
|
|
|
if (extent_heap_empty(&extents->heaps[pind])) {
|
|
|
|
bitmap_set(extents->bitmap, &extents_bitmap_info,
|
|
|
|
(size_t)pind);
|
|
|
|
}
|
2017-03-03 10:04:35 +08:00
|
|
|
if (!preserve_lru) {
|
|
|
|
extent_list_remove(&extents->lru, extent);
|
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
size_t npages = size >> LG_PAGE;
|
2017-03-08 09:57:48 +08:00
|
|
|
/*
|
|
|
|
* As in extents_insert_locked, we hold extents->mtx and so don't need
|
|
|
|
* atomic operations for updating extents->npages.
|
|
|
|
*/
|
|
|
|
size_t cur_extents_npages =
|
|
|
|
atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
|
|
|
|
assert(cur_extents_npages >= npages);
|
|
|
|
atomic_store_zu(&extents->npages,
|
|
|
|
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
|
2017-03-23 11:06:25 +08:00
|
|
|
/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
|
2017-01-30 13:57:14 +08:00
|
|
|
static extent_t *
|
2017-03-04 14:55:28 +08:00
|
|
|
extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
2017-01-30 13:57:14 +08:00
|
|
|
size_t size) {
|
|
|
|
pszind_t pind = psz2ind(extent_size_quantize_ceil(size));
|
2017-03-24 14:45:11 +08:00
|
|
|
pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
|
|
|
|
(size_t)pind);
|
|
|
|
if (i < NPSIZES+1) {
|
|
|
|
assert(!extent_heap_empty(&extents->heaps[i]));
|
2017-03-23 11:06:25 +08:00
|
|
|
extent_t *extent = extent_heap_any(&extents->heaps[i]);
|
2017-03-24 14:45:11 +08:00
|
|
|
assert(extent_size_get(extent) >= size);
|
|
|
|
return extent;
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-03-23 11:06:25 +08:00
|
|
|
/*
|
|
|
|
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
|
|
|
|
* large enough.
|
|
|
|
*/
|
|
|
|
static extent_t *
|
|
|
|
extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
|
|
|
size_t size) {
|
|
|
|
extent_t *ret = NULL;
|
|
|
|
|
|
|
|
pszind_t pind = psz2ind(extent_size_quantize_ceil(size));
|
2017-03-24 14:45:11 +08:00
|
|
|
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
|
|
|
|
&extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
|
|
|
|
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
|
|
|
|
(size_t)i+1)) {
|
|
|
|
assert(!extent_heap_empty(&extents->heaps[i]));
|
2017-03-23 11:06:25 +08:00
|
|
|
extent_t *extent = extent_heap_first(&extents->heaps[i]);
|
2017-03-24 14:45:11 +08:00
|
|
|
assert(extent_size_get(extent) >= size);
|
|
|
|
if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
|
|
|
|
ret = extent;
|
|
|
|
}
|
|
|
|
if (i == NPSIZES) {
|
|
|
|
break;
|
2017-03-23 11:06:25 +08:00
|
|
|
}
|
2017-03-24 14:45:11 +08:00
|
|
|
assert(i < NPSIZES);
|
2017-03-23 11:06:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do {best,first}-fit extent selection, where the selection policy choice is
|
|
|
|
* based on extents->delay_coalesce. Best-fit selection requires less
|
|
|
|
* searching, but its layout policy is less stable and may cause higher virtual
|
|
|
|
* memory fragmentation as a side effect.
|
|
|
|
*/
|
|
|
|
static extent_t *
|
|
|
|
extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
|
|
|
size_t size) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &extents->mtx);
|
|
|
|
|
|
|
|
return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena,
|
|
|
|
extents, size) : extents_first_fit_locked(tsdn, arena, extents,
|
|
|
|
size);
|
|
|
|
}
|
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
static bool
|
|
|
|
extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
|
|
|
extent_t *extent) {
|
|
|
|
extent_state_set(extent, extent_state_active);
|
|
|
|
bool coalesced;
|
|
|
|
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
|
|
|
|
extents, extent, &coalesced);
|
|
|
|
extent_state_set(extent, extents_state_get(extents));
|
|
|
|
|
|
|
|
if (!coalesced) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
extents_insert_locked(tsdn, extents, extent, true);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
extent_t *
|
|
|
|
extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
2017-03-14 08:36:57 +08:00
|
|
|
extents_t *extents, void *new_addr, size_t size, size_t pad,
|
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
|
|
|
assert(size + pad != 0);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
assert(alignment != 0);
|
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
|
|
|
|
|
|
|
return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr,
|
2017-03-14 08:36:57 +08:00
|
|
|
size, pad, alignment, slab, szind, zero, commit);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
|
|
|
extents_t *extents, extent_t *extent) {
|
|
|
|
assert(extent_base_get(extent) != NULL);
|
|
|
|
assert(extent_size_get(extent) != 0);
|
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
|
|
|
|
|
|
|
extent_addr_set(extent, extent_base_get(extent));
|
|
|
|
extent_zeroed_set(extent, false);
|
|
|
|
|
|
|
|
extent_record(tsdn, arena, r_extent_hooks, extents, extent);
|
|
|
|
}
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_t *
|
2017-03-03 10:04:35 +08:00
|
|
|
extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
|
|
|
extents_t *extents, size_t npages_min) {
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_lock(tsdn, &extents->mtx);
|
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
/*
|
|
|
|
* Get the LRU coalesced extent, if any. If coalescing was delayed,
|
|
|
|
* the loop will iterate until the LRU extent is fully coalesced.
|
|
|
|
*/
|
|
|
|
extent_t *extent;
|
|
|
|
while (true) {
|
|
|
|
/* Get the LRU extent, if any. */
|
|
|
|
extent = extent_list_first(&extents->lru);
|
|
|
|
if (extent == NULL) {
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
/* Check the eviction limit. */
|
|
|
|
size_t npages = extent_size_get(extent) >> LG_PAGE;
|
2017-03-08 09:57:48 +08:00
|
|
|
size_t extents_npages = atomic_load_zu(&extents->npages,
|
|
|
|
ATOMIC_RELAXED);
|
|
|
|
if (extents_npages - npages < npages_min) {
|
2017-03-03 10:04:35 +08:00
|
|
|
extent = NULL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
extents_remove_locked(tsdn, extents, extent, false);
|
|
|
|
if (!extents->delay_coalesce) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Try to coalesce. */
|
|
|
|
if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
|
|
|
|
rtree_ctx, extents, extent)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The LRU extent was just coalesced and the result placed in
|
|
|
|
* the LRU at its neighbor's position. Start over.
|
|
|
|
*/
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Either mark the extent active or deregister it to protect against
|
|
|
|
* concurrent operations.
|
|
|
|
*/
|
|
|
|
switch (extents_state_get(extents)) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
case extent_state_active:
|
|
|
|
not_reached();
|
2017-01-30 13:57:14 +08:00
|
|
|
case extent_state_dirty:
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
case extent_state_muzzy:
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_state_set(extent, extent_state_active);
|
|
|
|
break;
|
|
|
|
case extent_state_retained:
|
|
|
|
extent_deregister(tsdn, extent);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
label_return:
|
|
|
|
malloc_mutex_unlock(tsdn, &extents->mtx);
|
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
|
|
|
extents_t *extents, extent_t *extent) {
|
|
|
|
/*
|
|
|
|
* Leak extent after making sure its pages have already been purged, so
|
|
|
|
* that this is only a virtual memory leak.
|
|
|
|
*/
|
|
|
|
if (extents_state_get(extents) == extent_state_dirty) {
|
|
|
|
if (extent_purge_lazy_wrapper(tsdn, arena, r_extent_hooks,
|
|
|
|
extent, 0, extent_size_get(extent))) {
|
|
|
|
extent_purge_forced_wrapper(tsdn, arena, r_extent_hooks,
|
|
|
|
extent, 0, extent_size_get(extent));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
extent_dalloc(tsdn, arena, extent);
|
|
|
|
}
|
2016-09-23 02:57:28 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
void
|
|
|
|
extents_prefork(tsdn_t *tsdn, extents_t *extents) {
|
|
|
|
malloc_mutex_prefork(tsdn, &extents->mtx);
|
|
|
|
}
|
2016-09-23 02:57:28 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
void
|
|
|
|
extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
|
|
|
|
malloc_mutex_postfork_parent(tsdn, &extents->mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
|
|
|
|
malloc_mutex_postfork_child(tsdn, &extents->mtx);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_t *extent, bool preserve_lru) {
|
2017-01-30 13:57:14 +08:00
|
|
|
assert(extent_arena_get(extent) == arena);
|
|
|
|
assert(extent_state_get(extent) == extent_state_active);
|
2016-09-23 02:57:28 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_state_set(extent, extents_state_get(extents));
|
2017-03-03 10:04:35 +08:00
|
|
|
extents_insert_locked(tsdn, extents, extent, preserve_lru);
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
2016-09-23 02:57:28 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
static void
|
|
|
|
extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_t *extent, bool preserve_lru) {
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_lock(tsdn, &extents->mtx);
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_deactivate_locked(tsdn, arena, extents, extent, preserve_lru);
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &extents->mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_t *extent, bool preserve_lru) {
|
2017-01-30 13:57:14 +08:00
|
|
|
assert(extent_arena_get(extent) == arena);
|
|
|
|
assert(extent_state_get(extent) == extents_state_get(extents));
|
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
extents_remove_locked(tsdn, extents, extent, preserve_lru);
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_state_set(extent, extent_state_active);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2016-06-03 09:43:10 +08:00
|
|
|
extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
|
|
|
|
const extent_t *extent, bool dependent, bool init_missing,
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
|
|
|
|
*r_elm_a = rtree_leaf_elm_acquire(tsdn, &extents_rtree, rtree_ctx,
|
2016-06-02 03:59:02 +08:00
|
|
|
(uintptr_t)extent_base_get(extent), dependent, init_missing);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!dependent && *r_elm_a == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(*r_elm_a != NULL);
|
|
|
|
|
|
|
|
if (extent_size_get(extent) > PAGE) {
|
2017-03-17 00:46:42 +08:00
|
|
|
*r_elm_b = rtree_leaf_elm_acquire(tsdn, &extents_rtree,
|
|
|
|
rtree_ctx, (uintptr_t)extent_last_get(extent), dependent,
|
2016-06-02 03:59:02 +08:00
|
|
|
init_missing);
|
2016-11-01 07:32:33 +08:00
|
|
|
if (!dependent && *r_elm_b == NULL) {
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_release(tsdn, &extents_rtree, *r_elm_a);
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2016-11-01 07:32:33 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(*r_elm_b != NULL);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-06-02 03:59:02 +08:00
|
|
|
*r_elm_b = NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-17 00:46:42 +08:00
|
|
|
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
|
|
|
|
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, true, extent, szind,
|
|
|
|
slab);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (elm_b != NULL) {
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, true, extent,
|
|
|
|
szind, slab);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-17 00:46:42 +08:00
|
|
|
extent_rtree_release(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
|
|
|
|
rtree_leaf_elm_t *elm_b) {
|
|
|
|
rtree_leaf_elm_release(tsdn, &extents_rtree, elm_a);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (elm_b != NULL) {
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_release(tsdn, &extents_rtree, elm_b);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
|
|
|
|
szind_t szind) {
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(extent_slab_get(extent));
|
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
/* Register interior. */
|
|
|
|
for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_write(tsdn, &extents_rtree, rtree_ctx,
|
2016-06-02 03:59:02 +08:00
|
|
|
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
|
2017-03-17 08:57:52 +08:00
|
|
|
LG_PAGE), extent, szind, true);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-22 15:23:03 +08:00
|
|
|
static void
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
|
2016-11-22 15:23:03 +08:00
|
|
|
cassert(config_prof);
|
2017-01-30 13:57:14 +08:00
|
|
|
/* prof_gdump() requirement. */
|
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
if (opt_prof && extent_state_get(extent) == extent_state_active) {
|
2016-06-06 05:43:20 +08:00
|
|
|
size_t nadd = extent_size_get(extent) >> LG_PAGE;
|
2017-04-05 09:20:42 +08:00
|
|
|
size_t cur = atomic_fetch_add_zu(&curpages, nadd,
|
|
|
|
ATOMIC_RELAXED) + nadd;
|
|
|
|
size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
|
|
|
|
while (cur > high && !atomic_compare_exchange_weak_zu(
|
|
|
|
&highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
|
2016-06-02 03:59:02 +08:00
|
|
|
/*
|
|
|
|
* Don't refresh cur, because it may have decreased
|
2016-06-06 05:43:20 +08:00
|
|
|
* since this thread lost the highpages update race.
|
2017-04-05 09:20:42 +08:00
|
|
|
* Note that high is updated in case of CAS failure.
|
2016-06-02 03:59:02 +08:00
|
|
|
*/
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (cur > high && prof_gdump_get_unlocked()) {
|
2016-06-02 03:59:02 +08:00
|
|
|
prof_gdump(tsdn);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
|
2016-11-22 15:23:03 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
if (opt_prof && extent_state_get(extent) == extent_state_active) {
|
2016-11-22 15:23:03 +08:00
|
|
|
size_t nsub = extent_size_get(extent) >> LG_PAGE;
|
2017-04-05 09:20:42 +08:00
|
|
|
assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
|
|
|
|
atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
|
2016-11-22 15:23:03 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t *elm_a, *elm_b;
|
2016-11-22 15:23:03 +08:00
|
|
|
|
|
|
|
if (extent_rtree_acquire(tsdn, rtree_ctx, extent, false, true, &elm_a,
|
2017-01-16 08:56:30 +08:00
|
|
|
&elm_b)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-03-17 08:57:52 +08:00
|
|
|
szind_t szind = extent_szind_get_maybe_invalid(extent);
|
|
|
|
bool slab = extent_slab_get(extent);
|
|
|
|
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
|
|
|
|
if (slab) {
|
|
|
|
extent_interior_register(tsdn, rtree_ctx, extent, szind);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
extent_rtree_release(tsdn, elm_a, elm_b);
|
|
|
|
|
2017-02-04 16:43:32 +08:00
|
|
|
if (config_prof && gdump_add) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_add(tsdn, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-02-04 16:43:32 +08:00
|
|
|
static bool
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_register(tsdn_t *tsdn, extent_t *extent) {
|
2017-02-04 16:43:32 +08:00
|
|
|
return extent_register_impl(tsdn, extent, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
|
2017-02-04 16:43:32 +08:00
|
|
|
return extent_register_impl(tsdn, extent, false);
|
|
|
|
}
|
|
|
|
|
2016-06-06 11:39:25 +08:00
|
|
|
static void
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_reregister(tsdn_t *tsdn, extent_t *extent) {
|
2016-06-06 11:39:25 +08:00
|
|
|
bool err = extent_register(tsdn, extent);
|
|
|
|
assert(!err);
|
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static void
|
2016-06-03 09:43:10 +08:00
|
|
|
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_t *extent) {
|
2016-06-02 03:59:02 +08:00
|
|
|
size_t i;
|
|
|
|
|
|
|
|
assert(extent_slab_get(extent));
|
|
|
|
|
|
|
|
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_clear(tsdn, &extents_rtree, rtree_ctx,
|
2016-06-02 03:59:02 +08:00
|
|
|
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
|
|
|
|
LG_PAGE));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_deregister(tsdn_t *tsdn, extent_t *extent) {
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t *elm_a, *elm_b;
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2016-06-03 09:43:10 +08:00
|
|
|
extent_rtree_acquire(tsdn, rtree_ctx, extent, true, false, &elm_a,
|
|
|
|
&elm_b);
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
|
2016-06-06 11:39:25 +08:00
|
|
|
if (extent_slab_get(extent)) {
|
2016-06-03 09:43:10 +08:00
|
|
|
extent_interior_deregister(tsdn, rtree_ctx, extent);
|
2016-06-06 11:39:25 +08:00
|
|
|
extent_slab_set(extent, false);
|
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_rtree_release(tsdn, elm_a, elm_b);
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_prof) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_sub(tsdn, extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static extent_t *
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
2017-03-14 08:36:57 +08:00
|
|
|
bool locked, void *new_addr, size_t size, size_t pad, size_t alignment,
|
|
|
|
bool slab, bool *zero, bool *commit) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, locked ? 1 : 0);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (locked) {
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn, &extents->mtx);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-11-04 14:49:21 +08:00
|
|
|
assert(alignment > 0);
|
2016-09-24 03:11:01 +08:00
|
|
|
if (config_debug && new_addr != NULL) {
|
|
|
|
/*
|
|
|
|
* Non-NULL new_addr has two use cases:
|
|
|
|
*
|
|
|
|
* 1) Recycle a known-extant extent, e.g. during purging.
|
|
|
|
* 2) Perform in-place expanding reallocation.
|
|
|
|
*
|
|
|
|
* Regardless of use case, new_addr must either refer to a
|
|
|
|
* non-existing extent, or to the base of an extant extent,
|
|
|
|
* since only active slabs support interior lookups (which of
|
|
|
|
* course cannot be recycled).
|
|
|
|
*/
|
|
|
|
assert(PAGE_ADDR2BASE(new_addr) == new_addr);
|
2016-10-04 05:18:55 +08:00
|
|
|
assert(pad == 0);
|
|
|
|
assert(alignment <= PAGE);
|
2016-09-24 03:11:01 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t esize = size + pad;
|
|
|
|
size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE;
|
2016-11-04 14:49:21 +08:00
|
|
|
/* Beware size_t wrap-around. */
|
2017-03-14 08:36:57 +08:00
|
|
|
if (alloc_size < esize) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (!locked) {
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_lock(tsdn, &extents->mtx);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_t *extent;
|
2016-06-02 03:59:02 +08:00
|
|
|
if (new_addr != NULL) {
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_acquire(tsdn,
|
|
|
|
&extents_rtree, rtree_ctx, (uintptr_t)new_addr, false,
|
|
|
|
false);
|
2016-06-02 03:59:02 +08:00
|
|
|
if (elm != NULL) {
|
2017-03-17 08:57:52 +08:00
|
|
|
extent = rtree_leaf_elm_extent_read(tsdn,
|
|
|
|
&extents_rtree, elm, true, true);
|
2016-09-24 03:11:01 +08:00
|
|
|
if (extent != NULL) {
|
|
|
|
assert(extent_base_get(extent) == new_addr);
|
|
|
|
if (extent_arena_get(extent) != arena ||
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_size_get(extent) < esize ||
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_state_get(extent) !=
|
|
|
|
extents_state_get(extents)) {
|
2016-09-24 03:11:01 +08:00
|
|
|
extent = NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-09-24 03:11:01 +08:00
|
|
|
}
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_release(tsdn, &extents_rtree, elm);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-06-02 03:59:02 +08:00
|
|
|
extent = NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-09-24 03:18:36 +08:00
|
|
|
} else {
|
2017-03-23 11:06:25 +08:00
|
|
|
extent = extents_fit_locked(tsdn, arena, extents, alloc_size);
|
2016-09-24 03:18:36 +08:00
|
|
|
}
|
2016-09-24 03:11:01 +08:00
|
|
|
if (extent == NULL) {
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!locked) {
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &extents->mtx);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_activate_locked(tsdn, arena, extents, extent, false);
|
2017-01-30 13:57:14 +08:00
|
|
|
if (!locked) {
|
|
|
|
malloc_mutex_unlock(tsdn, &extents->mtx);
|
|
|
|
}
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (extent_zeroed_get(extent)) {
|
2016-06-02 03:59:02 +08:00
|
|
|
*zero = true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (extent_committed_get(extent)) {
|
2016-06-02 03:59:02 +08:00
|
|
|
*commit = true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
|
|
|
|
static extent_t *
|
|
|
|
extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
2017-03-14 08:36:57 +08:00
|
|
|
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
|
|
|
szind_t szind, extent_t *extent) {
|
|
|
|
size_t esize = size + pad;
|
2017-01-30 13:57:14 +08:00
|
|
|
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
|
|
|
|
PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
|
|
|
|
assert(new_addr == NULL || leadsize == 0);
|
2017-03-14 08:36:57 +08:00
|
|
|
assert(extent_size_get(extent) >= leadsize + esize);
|
|
|
|
size_t trailsize = extent_size_get(extent) - leadsize - esize;
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
/* Split the lead. */
|
|
|
|
if (leadsize != 0) {
|
|
|
|
extent_t *lead = extent;
|
2016-06-04 03:05:53 +08:00
|
|
|
extent = extent_split_wrapper(tsdn, arena, r_extent_hooks,
|
2017-03-17 08:57:52 +08:00
|
|
|
lead, leadsize, NSIZES, false, esize + trailsize, szind,
|
|
|
|
slab);
|
2016-06-02 03:59:02 +08:00
|
|
|
if (extent == NULL) {
|
2016-06-06 13:08:20 +08:00
|
|
|
extent_deregister(tsdn, lead);
|
2017-01-30 13:57:14 +08:00
|
|
|
extents_leak(tsdn, arena, r_extent_hooks, extents,
|
|
|
|
lead);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_deactivate(tsdn, arena, extents, lead, false);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Split the trail. */
|
|
|
|
if (trailsize != 0) {
|
|
|
|
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
2017-03-17 08:57:52 +08:00
|
|
|
r_extent_hooks, extent, esize, szind, slab, trailsize,
|
|
|
|
NSIZES, false);
|
2016-06-02 03:59:02 +08:00
|
|
|
if (trail == NULL) {
|
2016-06-06 13:08:20 +08:00
|
|
|
extent_deregister(tsdn, extent);
|
2017-01-30 13:57:14 +08:00
|
|
|
extents_leak(tsdn, arena, r_extent_hooks, extents,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_deactivate(tsdn, arena, extents, trail, false);
|
2016-06-02 03:59:02 +08:00
|
|
|
} else if (leadsize == 0) {
|
|
|
|
/*
|
2017-03-14 08:36:57 +08:00
|
|
|
* Splitting causes szind to be set as a side effect, but no
|
2016-06-02 03:59:02 +08:00
|
|
|
* splitting occurred.
|
|
|
|
*/
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_szind_set(extent, szind);
|
2017-03-17 08:57:52 +08:00
|
|
|
if (szind != NSIZES) {
|
|
|
|
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)extent_addr_get(extent), szind, slab);
|
|
|
|
if (slab && extent_size_get(extent) > PAGE) {
|
|
|
|
rtree_szind_slab_update(tsdn, &extents_rtree,
|
|
|
|
rtree_ctx,
|
|
|
|
(uintptr_t)extent_past_get(extent) -
|
|
|
|
(uintptr_t)PAGE, szind, slab);
|
|
|
|
}
|
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
|
|
|
|
static extent_t *
|
|
|
|
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
2017-03-14 08:36:57 +08:00
|
|
|
extents_t *extents, void *new_addr, size_t size, size_t pad,
|
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
|
|
|
assert(new_addr == NULL || !slab);
|
|
|
|
assert(pad == 0 || !slab);
|
2017-03-14 08:36:57 +08:00
|
|
|
assert(!*zero || !slab);
|
2017-01-30 13:57:14 +08:00
|
|
|
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
2017-04-20 07:14:54 +08:00
|
|
|
bool committed = false;
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
|
2017-03-14 08:36:57 +08:00
|
|
|
rtree_ctx, extents, false, new_addr, size, pad, alignment, slab,
|
2017-04-20 07:14:54 +08:00
|
|
|
zero, &committed);
|
2017-01-30 13:57:14 +08:00
|
|
|
if (extent == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-04-20 07:14:54 +08:00
|
|
|
if (committed) {
|
|
|
|
*commit = true;
|
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
|
|
|
|
extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
|
2017-03-14 08:36:57 +08:00
|
|
|
extents, new_addr, size, pad, alignment, slab, szind, extent);
|
2017-01-30 13:57:14 +08:00
|
|
|
if (extent == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-11-22 15:23:03 +08:00
|
|
|
if (*commit && !extent_committed_get(extent)) {
|
|
|
|
if (extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent,
|
|
|
|
0, extent_size_get(extent))) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_record(tsdn, arena, r_extent_hooks, extents,
|
|
|
|
extent);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
extent_zeroed_set(extent, true);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (pad != 0) {
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_addr_randomize(tsdn, extent, alignment);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
assert(extent_state_get(extent) == extent_state_active);
|
2016-06-02 03:59:02 +08:00
|
|
|
if (slab) {
|
|
|
|
extent_slab_set(extent, slab);
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_interior_register(tsdn, rtree_ctx, extent, szind);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (*zero) {
|
2017-03-14 08:36:57 +08:00
|
|
|
void *addr = extent_base_get(extent);
|
|
|
|
size_t size = extent_size_get(extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
if (!extent_zeroed_get(extent)) {
|
2017-03-14 08:36:57 +08:00
|
|
|
if (pages_purge_forced(addr, size)) {
|
|
|
|
memset(addr, 0, size);
|
2017-03-10 09:20:00 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
} else if (config_debug) {
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t *p = (size_t *)(uintptr_t)addr;
|
|
|
|
for (size_t i = 0; i < size / sizeof(size_t); i++) {
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(p[i] == 0);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the caller specifies (!*zero), it is still possible to receive zeroed
|
|
|
|
* memory, in which case *zero is toggled to true. arena_extent_alloc() takes
|
|
|
|
* advantage of this to avoid demanding zeroed extents, but taking advantage of
|
|
|
|
* them if they are returned.
|
|
|
|
*/
|
|
|
|
static void *
|
|
|
|
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
|
2016-06-02 03:59:02 +08:00
|
|
|
void *ret;
|
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
assert(alignment != 0);
|
|
|
|
|
|
|
|
/* "primary" dss. */
|
|
|
|
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
|
|
|
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
|
2017-01-16 08:56:30 +08:00
|
|
|
commit)) != NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
/* mmap. */
|
|
|
|
if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
|
2017-01-16 08:56:30 +08:00
|
|
|
!= NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
/* "secondary" dss. */
|
|
|
|
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
|
|
|
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
|
2017-01-16 08:56:30 +08:00
|
|
|
commit)) != NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
/* All strategies for allocation failed. */
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-08 04:37:22 +08:00
|
|
|
static void *
|
|
|
|
extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t size, size_t alignment, bool *zero, bool *commit) {
|
2016-06-08 04:37:22 +08:00
|
|
|
void *ret;
|
|
|
|
|
|
|
|
ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
|
2017-04-05 08:22:24 +08:00
|
|
|
commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
|
|
|
|
ATOMIC_RELAXED));
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static void *
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
|
2016-06-02 03:59:02 +08:00
|
|
|
tsdn_t *tsdn;
|
|
|
|
arena_t *arena;
|
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
assert(extent_hooks == &extent_hooks_default);
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
tsdn = tsdn_fetch();
|
|
|
|
arena = arena_get(tsdn, arena_ind, false);
|
|
|
|
/*
|
|
|
|
* The arena we're allocating on behalf of must have been initialized
|
|
|
|
* already.
|
|
|
|
*/
|
|
|
|
assert(arena != NULL);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_alloc_default_impl(tsdn, arena, new_addr, size,
|
|
|
|
alignment, zero, commit);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-11-22 15:23:03 +08:00
|
|
|
/*
|
|
|
|
* If virtual memory is retained, create increasingly larger extents from which
|
|
|
|
* to split requested extents in order to limit the total number of disjoint
|
|
|
|
* virtual memory ranges retained by each arena.
|
|
|
|
*/
|
|
|
|
static extent_t *
|
|
|
|
extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
|
|
|
assert(pad == 0 || !slab);
|
|
|
|
assert(!*zero || !slab);
|
2016-11-22 15:23:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether the next extent size in the series would be large
|
|
|
|
* enough to satisfy this request. If no, just bail, so that e.g. a
|
|
|
|
* series of unsatisfiable allocation requests doesn't cause unused
|
|
|
|
* extent creation as a side effect.
|
|
|
|
*/
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t esize = size + pad;
|
2017-04-05 08:22:24 +08:00
|
|
|
size_t alloc_size = pind2sz(atomic_load_u(&arena->extent_grow_next,
|
|
|
|
ATOMIC_RELAXED));
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
|
2016-11-22 15:23:03 +08:00
|
|
|
/* Beware size_t wrap-around. */
|
2017-03-14 08:36:57 +08:00
|
|
|
if (alloc_size_min < esize) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (alloc_size < alloc_size_min) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_t *extent = extent_alloc(tsdn, arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (extent == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-03-14 08:36:57 +08:00
|
|
|
bool zeroed = false;
|
|
|
|
bool committed = false;
|
|
|
|
void *ptr = extent_alloc_core(tsdn, arena, new_addr, alloc_size, PAGE,
|
2017-04-05 08:22:24 +08:00
|
|
|
&zeroed, &committed, (dss_prec_t)atomic_load_u(&arena->dss_prec,
|
|
|
|
ATOMIC_RELAXED));
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
|
2017-02-04 16:43:32 +08:00
|
|
|
arena_extent_sn_next(arena), extent_state_active, zeroed,
|
2017-03-14 08:36:57 +08:00
|
|
|
committed);
|
2017-02-04 16:43:32 +08:00
|
|
|
if (ptr == NULL || extent_register_no_gdump_add(tsdn, extent)) {
|
2016-11-22 15:23:03 +08:00
|
|
|
extent_dalloc(tsdn, arena, extent);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr,
|
|
|
|
PAGE_CEILING(alignment)) - (uintptr_t)ptr;
|
2016-11-22 15:23:03 +08:00
|
|
|
assert(new_addr == NULL || leadsize == 0);
|
2017-03-14 08:36:57 +08:00
|
|
|
assert(alloc_size >= leadsize + esize);
|
|
|
|
size_t trailsize = alloc_size - leadsize - esize;
|
2017-04-20 07:14:54 +08:00
|
|
|
if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
|
2016-11-22 15:23:03 +08:00
|
|
|
*zero = true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (extent_committed_get(extent)) {
|
2016-11-22 15:23:03 +08:00
|
|
|
*commit = true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
|
|
|
|
/* Split the lead. */
|
|
|
|
if (leadsize != 0) {
|
|
|
|
extent_t *lead = extent;
|
|
|
|
extent = extent_split_wrapper(tsdn, arena, r_extent_hooks, lead,
|
2017-03-17 08:57:52 +08:00
|
|
|
leadsize, NSIZES, false, esize + trailsize, szind, slab);
|
2016-11-22 15:23:03 +08:00
|
|
|
if (extent == NULL) {
|
|
|
|
extent_deregister(tsdn, lead);
|
2017-01-30 13:57:14 +08:00
|
|
|
extents_leak(tsdn, arena, r_extent_hooks, false, lead);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_record(tsdn, arena, r_extent_hooks,
|
|
|
|
&arena->extents_retained, lead);
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Split the trail. */
|
|
|
|
if (trailsize != 0) {
|
|
|
|
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
2017-03-17 08:57:52 +08:00
|
|
|
r_extent_hooks, extent, esize, szind, slab, trailsize,
|
|
|
|
NSIZES, false);
|
2016-11-22 15:23:03 +08:00
|
|
|
if (trail == NULL) {
|
|
|
|
extent_deregister(tsdn, extent);
|
2017-01-30 13:57:14 +08:00
|
|
|
extents_leak(tsdn, arena, r_extent_hooks,
|
|
|
|
&arena->extents_retained, extent);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_record(tsdn, arena, r_extent_hooks,
|
|
|
|
&arena->extents_retained, trail);
|
2016-11-22 15:23:03 +08:00
|
|
|
} else if (leadsize == 0) {
|
|
|
|
/*
|
2017-03-14 08:36:57 +08:00
|
|
|
* Splitting causes szind to be set as a side effect, but no
|
2016-11-22 15:23:03 +08:00
|
|
|
* splitting occurred.
|
|
|
|
*/
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
|
|
|
&rtree_ctx_fallback);
|
|
|
|
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_szind_set(extent, szind);
|
2017-03-17 08:57:52 +08:00
|
|
|
if (szind != NSIZES) {
|
|
|
|
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)extent_addr_get(extent), szind, slab);
|
|
|
|
if (slab && extent_size_get(extent) > PAGE) {
|
|
|
|
rtree_szind_slab_update(tsdn, &extents_rtree,
|
|
|
|
rtree_ctx,
|
|
|
|
(uintptr_t)extent_past_get(extent) -
|
|
|
|
(uintptr_t)PAGE, szind, slab);
|
|
|
|
}
|
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (*commit && !extent_committed_get(extent)) {
|
|
|
|
if (extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent,
|
|
|
|
0, extent_size_get(extent))) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_record(tsdn, arena, r_extent_hooks,
|
|
|
|
&arena->extents_retained, extent);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
extent_zeroed_set(extent, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (config_prof) {
|
2017-01-30 13:57:14 +08:00
|
|
|
/* Adjust gdump stats now that extent is final size. */
|
|
|
|
extent_gdump_add(tsdn, extent);
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (pad != 0) {
|
2016-11-22 15:23:03 +08:00
|
|
|
extent_addr_randomize(tsdn, extent, alignment);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
if (slab) {
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
|
|
|
&rtree_ctx_fallback);
|
|
|
|
|
|
|
|
extent_slab_set(extent, true);
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_interior_register(tsdn, rtree_ctx, extent, szind);
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (*zero && !extent_zeroed_get(extent)) {
|
2017-03-14 08:36:57 +08:00
|
|
|
void *addr = extent_base_get(extent);
|
|
|
|
size_t size = extent_size_get(extent);
|
|
|
|
if (pages_purge_forced(addr, size)) {
|
|
|
|
memset(addr, 0, size);
|
2017-03-10 09:20:00 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-28 07:03:11 +08:00
|
|
|
/*
|
|
|
|
* Increment extent_grow_next, but take care to do so atomically and
|
|
|
|
* bail out if the increment would exceed the legal range.
|
|
|
|
*/
|
2017-04-05 08:22:24 +08:00
|
|
|
pszind_t egn = atomic_load_u(&arena->extent_grow_next, ATOMIC_RELAXED);
|
2017-01-28 07:03:11 +08:00
|
|
|
while (true) {
|
|
|
|
if (egn + 1 == NPSIZES) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert(egn + 1 < NPSIZES);
|
2017-04-05 08:22:24 +08:00
|
|
|
if (atomic_compare_exchange_weak_u(&arena->extent_grow_next,
|
|
|
|
&egn, egn + 1, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
|
2017-01-28 07:03:11 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static extent_t *
|
|
|
|
extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_t *extent;
|
|
|
|
|
2017-03-14 08:36:57 +08:00
|
|
|
assert(size != 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(alignment != 0);
|
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent = extent_recycle(tsdn, arena, r_extent_hooks,
|
2017-03-14 08:36:57 +08:00
|
|
|
&arena->extents_retained, new_addr, size, pad, alignment, slab,
|
|
|
|
szind, zero, commit);
|
2017-01-19 15:03:37 +08:00
|
|
|
if (extent != NULL) {
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_prof) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_add(tsdn, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-11-22 15:23:03 +08:00
|
|
|
}
|
2017-04-27 07:26:12 +08:00
|
|
|
if (opt_retain && extent == NULL) {
|
2016-11-22 15:23:03 +08:00
|
|
|
extent = extent_grow_retained(tsdn, arena, r_extent_hooks,
|
2017-03-14 08:36:57 +08:00
|
|
|
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static extent_t *
|
|
|
|
extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
|
|
|
size_t esize = size + pad;
|
|
|
|
extent_t *extent = extent_alloc(tsdn, arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (extent == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-03-14 08:36:57 +08:00
|
|
|
void *addr;
|
2016-06-08 04:37:22 +08:00
|
|
|
if (*r_extent_hooks == &extent_hooks_default) {
|
|
|
|
/* Call directly to propagate tsdn. */
|
2017-03-14 08:36:57 +08:00
|
|
|
addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
|
2016-06-08 04:37:22 +08:00
|
|
|
alignment, zero, commit);
|
|
|
|
} else {
|
2017-03-14 08:36:57 +08:00
|
|
|
addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
|
|
|
|
esize, alignment, zero, commit, arena_ind_get(arena));
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
if (addr == NULL) {
|
|
|
|
extent_dalloc(tsdn, arena, extent);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_init(extent, arena, addr, esize, slab, szind,
|
|
|
|
arena_extent_sn_next(arena), extent_state_active, zero, commit);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (pad != 0) {
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_addr_randomize(tsdn, extent, alignment);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
if (extent_register(tsdn, extent)) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extents_leak(tsdn, arena, r_extent_hooks,
|
|
|
|
&arena->extents_retained, extent);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extent_t *
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
|
|
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
|
2017-03-14 08:36:57 +08:00
|
|
|
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
2016-06-02 03:59:02 +08:00
|
|
|
if (extent == NULL) {
|
2016-06-04 03:05:53 +08:00
|
|
|
extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
|
2017-03-14 08:36:57 +08:00
|
|
|
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2017-02-13 15:00:19 +08:00
|
|
|
extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
|
|
|
|
const extent_t *outer) {
|
|
|
|
assert(extent_arena_get(inner) == arena);
|
|
|
|
if (extent_arena_get(outer) != arena) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-02-13 15:00:19 +08:00
|
|
|
|
|
|
|
assert(extent_state_get(inner) == extent_state_active);
|
|
|
|
if (extent_state_get(outer) != extents->state) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-02-13 15:00:19 +08:00
|
|
|
|
|
|
|
if (extent_committed_get(inner) != extent_committed_get(outer)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
static bool
|
2017-02-06 15:59:53 +08:00
|
|
|
extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
2017-02-13 15:00:19 +08:00
|
|
|
extents_t *extents, extent_t *inner, extent_t *outer, bool forward) {
|
|
|
|
assert(extent_can_coalesce(arena, extents, inner, outer));
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
if (forward && extents->delay_coalesce) {
|
|
|
|
/*
|
|
|
|
* The extent that remains after coalescing must occupy the
|
|
|
|
* outer extent's position in the LRU. For forward coalescing,
|
|
|
|
* swap the inner extent into the LRU.
|
|
|
|
*/
|
|
|
|
extent_list_replace(&extents->lru, outer, inner);
|
|
|
|
}
|
|
|
|
extent_activate_locked(tsdn, arena, extents, outer,
|
|
|
|
extents->delay_coalesce);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &extents->mtx);
|
2017-02-13 15:00:19 +08:00
|
|
|
bool err = extent_merge_wrapper(tsdn, arena, r_extent_hooks,
|
|
|
|
forward ? inner : outer, forward ? outer : inner);
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_lock(tsdn, &extents->mtx);
|
2017-02-13 15:00:19 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
if (err) {
|
2017-03-03 10:04:35 +08:00
|
|
|
if (forward && extents->delay_coalesce) {
|
|
|
|
extent_list_replace(&extents->lru, inner, outer);
|
|
|
|
}
|
|
|
|
extent_deactivate_locked(tsdn, arena, extents, outer,
|
|
|
|
extents->delay_coalesce);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-02-13 15:00:19 +08:00
|
|
|
return err;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-02-13 15:18:57 +08:00
|
|
|
static extent_t *
|
|
|
|
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_t *extent, bool *coalesced) {
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Continue attempting to coalesce until failure, to protect against
|
|
|
|
* races with other threads that are thwarted by this one.
|
|
|
|
*/
|
2017-03-03 10:04:35 +08:00
|
|
|
bool again;
|
2017-01-30 13:57:14 +08:00
|
|
|
do {
|
2017-03-03 10:04:35 +08:00
|
|
|
again = false;
|
2017-01-30 13:57:14 +08:00
|
|
|
|
|
|
|
/* Try to coalesce forward. */
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t *next_elm = rtree_leaf_elm_acquire(tsdn,
|
|
|
|
&extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)extent_past_get(extent), false, false);
|
2017-02-06 15:59:53 +08:00
|
|
|
if (next_elm != NULL) {
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_t *next = rtree_leaf_elm_extent_read(tsdn,
|
|
|
|
&extents_rtree, next_elm, true, true);
|
2017-02-06 15:59:53 +08:00
|
|
|
/*
|
|
|
|
* extents->mtx only protects against races for
|
|
|
|
* like-state extents, so call extent_can_coalesce()
|
|
|
|
* before releasing the next_elm lock.
|
|
|
|
*/
|
|
|
|
bool can_coalesce = (next != NULL &&
|
2017-02-13 15:00:19 +08:00
|
|
|
extent_can_coalesce(arena, extents, extent, next));
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_release(tsdn, &extents_rtree, next_elm);
|
2017-02-06 15:59:53 +08:00
|
|
|
if (can_coalesce && !extent_coalesce(tsdn, arena,
|
2017-02-13 15:00:19 +08:00
|
|
|
r_extent_hooks, extents, extent, next, true)) {
|
2017-03-03 10:04:35 +08:00
|
|
|
if (extents->delay_coalesce) {
|
|
|
|
/* Do minimal coalescing. */
|
|
|
|
*coalesced = true;
|
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
again = true;
|
2017-02-06 15:59:53 +08:00
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/* Try to coalesce backward. */
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t *prev_elm = rtree_leaf_elm_acquire(tsdn,
|
|
|
|
&extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)extent_before_get(extent), false, false);
|
2017-02-06 15:59:53 +08:00
|
|
|
if (prev_elm != NULL) {
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_t *prev = rtree_leaf_elm_extent_read(tsdn,
|
|
|
|
&extents_rtree, prev_elm, true, true);
|
2017-02-06 15:59:53 +08:00
|
|
|
bool can_coalesce = (prev != NULL &&
|
2017-02-13 15:00:19 +08:00
|
|
|
extent_can_coalesce(arena, extents, extent, prev));
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_release(tsdn, &extents_rtree, prev_elm);
|
2017-02-06 15:59:53 +08:00
|
|
|
if (can_coalesce && !extent_coalesce(tsdn, arena,
|
2017-02-13 15:00:19 +08:00
|
|
|
r_extent_hooks, extents, extent, prev, false)) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent = prev;
|
2017-03-03 10:04:35 +08:00
|
|
|
if (extents->delay_coalesce) {
|
|
|
|
/* Do minimal coalescing. */
|
|
|
|
*coalesced = true;
|
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
again = true;
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
}
|
2017-03-03 10:04:35 +08:00
|
|
|
} while (again);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
if (extents->delay_coalesce) {
|
|
|
|
*coalesced = false;
|
|
|
|
}
|
2017-02-13 15:18:57 +08:00
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
|
|
|
extents_t *extents, extent_t *extent) {
|
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
assert((extents_state_get(extents) != extent_state_dirty &&
|
|
|
|
extents_state_get(extents) != extent_state_muzzy) ||
|
2017-02-13 15:18:57 +08:00
|
|
|
!extent_zeroed_get(extent));
|
|
|
|
|
|
|
|
malloc_mutex_lock(tsdn, &extents->mtx);
|
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
|
|
|
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_szind_set(extent, NSIZES);
|
2017-02-13 15:18:57 +08:00
|
|
|
if (extent_slab_get(extent)) {
|
|
|
|
extent_interior_deregister(tsdn, rtree_ctx, extent);
|
|
|
|
extent_slab_set(extent, false);
|
|
|
|
}
|
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)extent_base_get(extent), true) == extent);
|
2017-02-13 15:18:57 +08:00
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
if (!extents->delay_coalesce) {
|
2017-02-13 15:18:57 +08:00
|
|
|
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
|
2017-03-03 10:04:35 +08:00
|
|
|
rtree_ctx, extents, extent, NULL);
|
2017-02-13 15:18:57 +08:00
|
|
|
}
|
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_deactivate_locked(tsdn, arena, extents, extent, false);
|
2017-02-13 15:00:19 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &extents->mtx);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-06 06:27:20 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
2016-06-06 06:27:20 +08:00
|
|
|
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
|
|
|
|
2016-06-06 06:27:20 +08:00
|
|
|
if (extent_register(tsdn, extent)) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extents_leak(tsdn, arena, &extent_hooks,
|
|
|
|
&arena->extents_retained, extent);
|
2016-06-06 06:27:20 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
|
|
|
|
}
|
|
|
|
|
2016-06-08 04:37:22 +08:00
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_dalloc_default_impl(void *addr, size_t size) {
|
|
|
|
if (!have_dss || !extent_in_dss(addr)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_dalloc_mmap(addr, size);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static bool
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool committed, unsigned arena_ind) {
|
2016-06-04 03:05:53 +08:00
|
|
|
assert(extent_hooks == &extent_hooks_default);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_dalloc_default_impl(addr, size);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-04-28 06:51:35 +08:00
|
|
|
static bool
|
2017-01-04 09:21:59 +08:00
|
|
|
extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent) {
|
2017-01-04 09:21:59 +08:00
|
|
|
bool err;
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
assert(extent_base_get(extent) != NULL);
|
|
|
|
assert(extent_size_get(extent) != 0);
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
extent_addr_set(extent, extent_base_get(extent));
|
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
2017-01-30 13:57:14 +08:00
|
|
|
/* Try to deallocate. */
|
2016-06-08 04:37:22 +08:00
|
|
|
if (*r_extent_hooks == &extent_hooks_default) {
|
|
|
|
/* Call directly to propagate tsdn. */
|
2016-10-14 03:18:38 +08:00
|
|
|
err = extent_dalloc_default_impl(extent_base_get(extent),
|
2016-06-08 04:37:22 +08:00
|
|
|
extent_size_get(extent));
|
|
|
|
} else {
|
2016-12-04 07:38:25 +08:00
|
|
|
err = ((*r_extent_hooks)->dalloc == NULL ||
|
|
|
|
(*r_extent_hooks)->dalloc(*r_extent_hooks,
|
2016-06-08 04:37:22 +08:00
|
|
|
extent_base_get(extent), extent_size_get(extent),
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_committed_get(extent), arena_ind_get(arena)));
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!err) {
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_dalloc(tsdn, arena, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-04 09:21:59 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return err;
|
2017-01-04 09:21:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
2017-01-04 09:21:59 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Deregister first to avoid a race with other allocating threads, and
|
|
|
|
* reregister if deallocation fails.
|
|
|
|
*/
|
|
|
|
extent_deregister(tsdn, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
|
2016-06-02 03:59:02 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-04 09:21:59 +08:00
|
|
|
|
2016-06-06 11:39:25 +08:00
|
|
|
extent_reregister(tsdn, extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
/* Try to decommit; purge if that fails. */
|
2017-01-30 13:57:14 +08:00
|
|
|
bool zeroed;
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!extent_committed_get(extent)) {
|
2016-12-04 07:38:25 +08:00
|
|
|
zeroed = true;
|
2017-01-16 08:56:30 +08:00
|
|
|
} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
|
|
|
|
0, extent_size_get(extent))) {
|
2016-12-04 07:38:25 +08:00
|
|
|
zeroed = true;
|
2017-01-16 08:56:30 +08:00
|
|
|
} else if ((*r_extent_hooks)->purge_forced != NULL &&
|
2016-12-04 07:38:25 +08:00
|
|
|
!(*r_extent_hooks)->purge_forced(*r_extent_hooks,
|
|
|
|
extent_base_get(extent), extent_size_get(extent), 0,
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_size_get(extent), arena_ind_get(arena))) {
|
2016-12-04 07:38:25 +08:00
|
|
|
zeroed = true;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
} else if (extent_state_get(extent) == extent_state_muzzy ||
|
|
|
|
((*r_extent_hooks)->purge_lazy != NULL &&
|
|
|
|
!(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
|
|
|
|
extent_base_get(extent), extent_size_get(extent), 0,
|
|
|
|
extent_size_get(extent), arena_ind_get(arena)))) {
|
|
|
|
zeroed = false;
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-12-04 07:38:25 +08:00
|
|
|
zeroed = false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
extent_zeroed_set(extent, zeroed);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_prof) {
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_gdump_sub(tsdn, extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
|
|
|
|
extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2017-04-28 06:51:35 +08:00
|
|
|
static void
|
|
|
|
extent_destroy_default_impl(void *addr, size_t size) {
|
|
|
|
if (!have_dss || !extent_in_dss(addr)) {
|
|
|
|
pages_unmap(addr, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
|
|
|
bool committed, unsigned arena_ind) {
|
|
|
|
assert(extent_hooks == &extent_hooks_default);
|
|
|
|
|
|
|
|
extent_destroy_default_impl(addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent) {
|
|
|
|
assert(extent_base_get(extent) != NULL);
|
|
|
|
assert(extent_size_get(extent) != 0);
|
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
|
|
|
|
|
|
|
/* Deregister first to avoid a race with other allocating threads. */
|
|
|
|
extent_deregister(tsdn, extent);
|
|
|
|
|
|
|
|
extent_addr_set(extent, extent_base_get(extent));
|
|
|
|
|
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
|
|
|
/* Try to destroy; silently fail otherwise. */
|
|
|
|
if (*r_extent_hooks == &extent_hooks_default) {
|
|
|
|
/* Call directly to propagate tsdn. */
|
|
|
|
extent_destroy_default_impl(extent_base_get(extent),
|
|
|
|
extent_size_get(extent));
|
|
|
|
} else if ((*r_extent_hooks)->destroy != NULL) {
|
|
|
|
(*r_extent_hooks)->destroy(*r_extent_hooks,
|
|
|
|
extent_base_get(extent), extent_size_get(extent),
|
|
|
|
extent_committed_get(extent), arena_ind_get(arena));
|
|
|
|
}
|
|
|
|
|
|
|
|
extent_dalloc(tsdn, arena, extent);
|
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
static bool
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t offset, size_t length, unsigned arena_ind) {
|
2016-06-04 03:05:53 +08:00
|
|
|
assert(extent_hooks == &extent_hooks_default);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
|
|
|
|
length);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t length) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
2017-01-30 13:57:14 +08:00
|
|
|
bool err = ((*r_extent_hooks)->commit == NULL ||
|
2016-12-04 07:38:25 +08:00
|
|
|
(*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_size_get(extent), offset, length, arena_ind_get(arena)));
|
2016-06-06 14:24:52 +08:00
|
|
|
extent_committed_set(extent, extent_committed_get(extent) || !err);
|
2017-01-20 10:15:45 +08:00
|
|
|
return err;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t offset, size_t length, unsigned arena_ind) {
|
2016-06-04 03:05:53 +08:00
|
|
|
assert(extent_hooks == &extent_hooks_default);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
|
|
|
|
length);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t length) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
2016-06-06 14:24:52 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
bool err = ((*r_extent_hooks)->decommit == NULL ||
|
2016-12-04 07:38:25 +08:00
|
|
|
(*r_extent_hooks)->decommit(*r_extent_hooks,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_base_get(extent), extent_size_get(extent), offset, length,
|
2016-12-23 06:39:10 +08:00
|
|
|
arena_ind_get(arena)));
|
2016-06-06 14:24:52 +08:00
|
|
|
extent_committed_set(extent, extent_committed_get(extent) && err);
|
2017-01-20 10:15:45 +08:00
|
|
|
return err;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-12-04 07:38:25 +08:00
|
|
|
#ifdef PAGES_CAN_PURGE_LAZY
|
2016-06-02 03:59:02 +08:00
|
|
|
static bool
|
2016-12-04 07:38:25 +08:00
|
|
|
extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t offset, size_t length, unsigned arena_ind) {
|
2016-06-04 03:05:53 +08:00
|
|
|
assert(extent_hooks == &extent_hooks_default);
|
2016-06-02 03:59:02 +08:00
|
|
|
assert(addr != NULL);
|
|
|
|
assert((offset & PAGE_MASK) == 0);
|
|
|
|
assert(length != 0);
|
|
|
|
assert((length & PAGE_MASK) == 0);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
|
|
|
|
length);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
#endif
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
bool
|
2016-12-04 07:38:25 +08:00
|
|
|
extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t length) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
2016-12-04 07:38:25 +08:00
|
|
|
return ((*r_extent_hooks)->purge_lazy == NULL ||
|
|
|
|
(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_base_get(extent), extent_size_get(extent), offset, length,
|
2016-12-23 06:39:10 +08:00
|
|
|
arena_ind_get(arena)));
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-12-04 07:38:25 +08:00
|
|
|
#ifdef PAGES_CAN_PURGE_FORCED
|
|
|
|
static bool
|
|
|
|
extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t size, size_t offset, size_t length, unsigned arena_ind) {
|
2016-12-04 07:38:25 +08:00
|
|
|
assert(extent_hooks == &extent_hooks_default);
|
|
|
|
assert(addr != NULL);
|
|
|
|
assert((offset & PAGE_MASK) == 0);
|
|
|
|
assert(length != 0);
|
|
|
|
assert((length & PAGE_MASK) == 0);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return pages_purge_forced((void *)((uintptr_t)addr +
|
|
|
|
(uintptr_t)offset), length);
|
2016-12-04 07:38:25 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
bool
|
|
|
|
extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t length) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
|
|
|
|
2016-12-04 07:38:25 +08:00
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
|
|
|
return ((*r_extent_hooks)->purge_forced == NULL ||
|
|
|
|
(*r_extent_hooks)->purge_forced(*r_extent_hooks,
|
|
|
|
extent_base_get(extent), extent_size_get(extent), offset, length,
|
2016-12-23 06:39:10 +08:00
|
|
|
arena_ind_get(arena)));
|
2016-12-04 07:38:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_MAPS_COALESCE
|
2016-06-02 03:59:02 +08:00
|
|
|
static bool
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
|
2016-06-04 03:05:53 +08:00
|
|
|
assert(extent_hooks == &extent_hooks_default);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return !maps_coalesce;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
#endif
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
extent_t *
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
|
2017-03-17 08:57:52 +08:00
|
|
|
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
|
2017-01-30 13:57:14 +08:00
|
|
|
assert(extent_size_get(extent) == size_a + size_b);
|
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_t *trail;
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if ((*r_extent_hooks)->split == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
trail = extent_alloc(tsdn, arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (trail == NULL) {
|
2016-06-02 03:59:02 +08:00
|
|
|
goto label_error_a;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
{
|
|
|
|
extent_t lead;
|
|
|
|
|
|
|
|
extent_init(&lead, arena, extent_addr_get(extent), size_a,
|
2017-03-17 08:57:52 +08:00
|
|
|
slab_a, szind_a, extent_sn_get(extent),
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_state_get(extent), extent_zeroed_get(extent),
|
|
|
|
extent_committed_get(extent));
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2016-06-03 09:43:10 +08:00
|
|
|
if (extent_rtree_acquire(tsdn, rtree_ctx, &lead, false, true,
|
2017-01-16 08:56:30 +08:00
|
|
|
&lead_elm_a, &lead_elm_b)) {
|
2016-06-02 03:59:02 +08:00
|
|
|
goto label_error_b;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
2017-03-17 08:57:52 +08:00
|
|
|
size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
|
|
|
|
extent_state_get(extent), extent_zeroed_get(extent),
|
|
|
|
extent_committed_get(extent));
|
2016-06-03 09:43:10 +08:00
|
|
|
if (extent_rtree_acquire(tsdn, rtree_ctx, trail, false, true,
|
2017-01-16 08:56:30 +08:00
|
|
|
&trail_elm_a, &trail_elm_b)) {
|
2016-06-02 03:59:02 +08:00
|
|
|
goto label_error_c;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
if ((*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
|
|
|
|
size_a + size_b, size_a, size_b, extent_committed_get(extent),
|
2017-01-16 08:56:30 +08:00
|
|
|
arena_ind_get(arena))) {
|
2016-06-02 03:59:02 +08:00
|
|
|
goto label_error_d;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
extent_size_set(extent, size_a);
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_szind_set(extent, szind_a);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
|
|
|
|
szind_a, slab_a);
|
|
|
|
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
|
|
|
|
szind_b, slab_b);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
|
|
|
|
extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return trail;
|
2016-06-02 03:59:02 +08:00
|
|
|
label_error_d:
|
2016-06-06 13:08:20 +08:00
|
|
|
extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
|
2016-06-02 03:59:02 +08:00
|
|
|
label_error_c:
|
|
|
|
extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
|
|
|
|
label_error_b:
|
|
|
|
extent_dalloc(tsdn, arena, trail);
|
|
|
|
label_error_a:
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_merge_default_impl(void *addr_a, void *addr_b) {
|
|
|
|
if (!maps_coalesce) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-12-04 07:38:25 +08:00
|
|
|
#ifdef JEMALLOC_MAPS_COALESCE
|
2016-06-08 04:37:22 +08:00
|
|
|
static bool
|
|
|
|
extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
2017-01-16 08:56:30 +08:00
|
|
|
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
|
2016-06-08 04:37:22 +08:00
|
|
|
assert(extent_hooks == &extent_hooks_default);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_merge_default_impl(addr_a, addr_b);
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
#endif
|
2016-06-08 04:37:22 +08:00
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
bool
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
|
2017-01-30 13:57:14 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
2016-12-04 07:38:25 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if ((*r_extent_hooks)->merge == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
bool err;
|
2016-06-08 04:37:22 +08:00
|
|
|
if (*r_extent_hooks == &extent_hooks_default) {
|
|
|
|
/* Call directly to propagate tsdn. */
|
2016-10-14 03:18:38 +08:00
|
|
|
err = extent_merge_default_impl(extent_base_get(a),
|
2016-06-08 04:37:22 +08:00
|
|
|
extent_base_get(b));
|
|
|
|
} else {
|
|
|
|
err = (*r_extent_hooks)->merge(*r_extent_hooks,
|
|
|
|
extent_base_get(a), extent_size_get(a), extent_base_get(b),
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_size_get(b), extent_committed_get(a),
|
|
|
|
arena_ind_get(arena));
|
2016-06-08 04:37:22 +08:00
|
|
|
}
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (err) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The rtree writes must happen while all the relevant elements are
|
|
|
|
* owned, so the following code uses decomposed helper functions rather
|
|
|
|
* than extent_{,de}register() to do things in the right order.
|
|
|
|
*/
|
2017-01-30 13:57:14 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
|
2016-06-03 09:43:10 +08:00
|
|
|
extent_rtree_acquire(tsdn, rtree_ctx, a, true, false, &a_elm_a,
|
|
|
|
&a_elm_b);
|
|
|
|
extent_rtree_acquire(tsdn, rtree_ctx, b, true, false, &b_elm_a,
|
|
|
|
&b_elm_b);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
if (a_elm_b != NULL) {
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, true, NULL,
|
|
|
|
NSIZES, false);
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_release(tsdn, &extents_rtree, a_elm_b);
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
if (b_elm_b != NULL) {
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, true, NULL,
|
|
|
|
NSIZES, false);
|
2017-03-17 00:46:42 +08:00
|
|
|
rtree_leaf_elm_release(tsdn, &extents_rtree, b_elm_a);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-06-02 03:59:02 +08:00
|
|
|
b_elm_b = b_elm_a;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
|
|
|
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_szind_set(a, NSIZES);
|
2016-11-16 05:07:53 +08:00
|
|
|
extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
|
|
|
|
extent_sn_get(a) : extent_sn_get(b));
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
|
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_rtree_release(tsdn, a_elm_a, b_elm_b);
|
|
|
|
|
|
|
|
extent_dalloc(tsdn, extent_arena_get(b), b);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-06-02 03:59:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:10:39 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_boot(void) {
|
2017-03-23 07:38:03 +08:00
|
|
|
if (rtree_new(&extents_rtree, true)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:10:39 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (have_dss) {
|
2016-10-14 03:18:38 +08:00
|
|
|
extent_dss_boot();
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-06-02 03:10:39 +08:00
|
|
|
}
|