2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_BASE_C_
|
2017-04-11 09:17:55 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/assert.h"
|
2017-05-24 05:42:32 +08:00
|
|
|
#include "jemalloc/internal/extent_mmap.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-05-31 01:45:37 +08:00
|
|
|
#include "jemalloc/internal/sz.h"
|
2017-04-12 05:43:12 +08:00
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-08-11 04:14:26 +08:00
|
|
|
static base_t *b0;
|
|
|
|
|
2017-08-25 05:29:28 +08:00
|
|
|
metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
|
|
|
|
|
|
|
|
const char *metadata_thp_mode_names[] = {
|
|
|
|
"disabled",
|
|
|
|
"auto",
|
|
|
|
"always"
|
|
|
|
};
|
2014-11-28 03:22:36 +08:00
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
/******************************************************************************/
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-08-26 04:24:49 +08:00
|
|
|
static inline bool
|
|
|
|
metadata_thp_madvise(void) {
|
2018-02-17 06:19:19 +08:00
|
|
|
return (metadata_thp_enabled() &&
|
|
|
|
(init_system_thp_mode == thp_mode_default));
|
2017-08-26 04:24:49 +08:00
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
static void *
|
2019-12-03 06:19:22 +08:00
|
|
|
base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) {
|
2016-12-23 06:39:10 +08:00
|
|
|
void *addr;
|
|
|
|
bool zero = true;
|
|
|
|
bool commit = true;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-11-04 04:58:59 +08:00
|
|
|
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
|
2016-12-23 06:39:10 +08:00
|
|
|
assert(size == HUGEPAGE_CEILING(size));
|
2017-11-04 04:58:59 +08:00
|
|
|
size_t alignment = HUGEPAGE;
|
2019-12-03 06:19:22 +08:00
|
|
|
if (ehooks_are_default(ehooks)) {
|
2017-08-11 04:14:26 +08:00
|
|
|
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
|
2019-12-15 20:26:45 +08:00
|
|
|
if (have_madvise_huge && addr) {
|
|
|
|
pages_set_thp_state(addr, size);
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2019-12-03 08:42:44 +08:00
|
|
|
addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero,
|
2019-12-03 06:19:22 +08:00
|
|
|
&commit, ind);
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return addr;
|
2015-01-31 13:49:19 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2015-01-31 13:49:19 +08:00
|
|
|
static void
|
2019-12-03 06:19:22 +08:00
|
|
|
base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t size) {
|
2016-12-23 06:39:10 +08:00
|
|
|
/*
|
2017-04-17 08:40:16 +08:00
|
|
|
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
|
2016-12-23 06:39:10 +08:00
|
|
|
* stopping at first success. This cascade is performed for consistency
|
|
|
|
* with the cascade in extent_dalloc_wrapper() because an application's
|
|
|
|
* custom hooks may not support e.g. dalloc. This function is only ever
|
|
|
|
* called as a side effect of arena destruction, so although it might
|
|
|
|
* seem pointless to do anything besides dalloc here, the application
|
2017-04-17 08:40:16 +08:00
|
|
|
* may in fact want the end state of all associated virtual memory to be
|
|
|
|
* in some consistent-but-allocated state.
|
2016-12-23 06:39:10 +08:00
|
|
|
*/
|
2019-12-03 06:19:22 +08:00
|
|
|
if (ehooks_are_default(ehooks)) {
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!extent_dalloc_mmap(addr, size)) {
|
2017-08-11 04:14:26 +08:00
|
|
|
goto label_done;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (!pages_decommit(addr, size)) {
|
2017-08-11 04:14:26 +08:00
|
|
|
goto label_done;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-04-17 08:40:16 +08:00
|
|
|
if (!pages_purge_forced(addr, size)) {
|
2017-08-11 04:14:26 +08:00
|
|
|
goto label_done;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-04-17 08:40:16 +08:00
|
|
|
if (!pages_purge_lazy(addr, size)) {
|
2017-08-11 04:14:26 +08:00
|
|
|
goto label_done;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
/* Nothing worked. This should never happen. */
|
|
|
|
not_reached();
|
|
|
|
} else {
|
2017-06-23 07:18:30 +08:00
|
|
|
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
|
|
|
pre_reentrancy(tsd, NULL);
|
2019-12-03 06:19:22 +08:00
|
|
|
if (!ehooks_dalloc(ehooks, addr, size, true, ind)) {
|
2017-08-11 04:14:26 +08:00
|
|
|
goto label_post_reentrancy;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2019-12-03 06:19:22 +08:00
|
|
|
if (!ehooks_decommit(ehooks, addr, size, 0, size, ind)) {
|
2017-08-11 04:14:26 +08:00
|
|
|
goto label_post_reentrancy;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2019-12-03 06:19:22 +08:00
|
|
|
if (!ehooks_purge_forced(ehooks, addr, size, 0, size, ind)) {
|
2017-08-11 04:14:26 +08:00
|
|
|
goto label_post_reentrancy;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2019-12-03 06:19:22 +08:00
|
|
|
if (!ehooks_purge_lazy(ehooks, addr, size, 0, size, ind)) {
|
2017-08-11 04:14:26 +08:00
|
|
|
goto label_post_reentrancy;
|
2017-04-17 08:40:16 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
/* Nothing worked. That's the application's problem. */
|
2017-08-11 04:14:26 +08:00
|
|
|
label_post_reentrancy:
|
2017-06-23 07:18:30 +08:00
|
|
|
post_reentrancy(tsd);
|
2017-08-11 04:14:26 +08:00
|
|
|
}
|
|
|
|
label_done:
|
2017-08-26 04:24:49 +08:00
|
|
|
if (metadata_thp_madvise()) {
|
2017-08-11 04:14:26 +08:00
|
|
|
/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
|
|
|
|
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
|
|
|
|
(size & HUGEPAGE_MASK) == 0);
|
|
|
|
pages_nohuge(addr, size);
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2016-11-16 05:07:53 +08:00
|
|
|
static void
|
2016-12-23 06:39:10 +08:00
|
|
|
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t size) {
|
2016-12-23 06:39:10 +08:00
|
|
|
size_t sn;
|
|
|
|
|
|
|
|
sn = *extent_sn_next;
|
|
|
|
(*extent_sn_next)++;
|
2016-11-16 05:07:53 +08:00
|
|
|
|
2017-04-17 12:51:26 +08:00
|
|
|
extent_binit(extent, addr, size, sn);
|
2016-11-16 05:07:53 +08:00
|
|
|
}
|
|
|
|
|
2017-11-01 05:17:40 +08:00
|
|
|
static size_t
|
|
|
|
base_get_num_blocks(base_t *base, bool with_new_block) {
|
|
|
|
base_block_t *b = base->blocks;
|
|
|
|
assert(b != NULL);
|
|
|
|
|
|
|
|
size_t n_blocks = with_new_block ? 2 : 1;
|
|
|
|
while (b->next != NULL) {
|
|
|
|
n_blocks++;
|
|
|
|
b = b->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return n_blocks;
|
|
|
|
}
|
|
|
|
|
2017-10-05 09:41:51 +08:00
|
|
|
static void
|
2017-11-08 11:40:38 +08:00
|
|
|
base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
|
2017-10-05 09:41:51 +08:00
|
|
|
assert(opt_metadata_thp == metadata_thp_auto);
|
2017-11-08 11:40:38 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn, &base->mtx);
|
|
|
|
if (base->auto_thp_switched) {
|
|
|
|
return;
|
|
|
|
}
|
2017-10-05 09:41:51 +08:00
|
|
|
/* Called when adding a new block. */
|
|
|
|
bool should_switch;
|
|
|
|
if (base_ind_get(base) != 0) {
|
2017-11-01 05:17:40 +08:00
|
|
|
should_switch = (base_get_num_blocks(base, true) ==
|
|
|
|
BASE_AUTO_THP_THRESHOLD);
|
2017-10-05 09:41:51 +08:00
|
|
|
} else {
|
2017-11-01 05:17:40 +08:00
|
|
|
should_switch = (base_get_num_blocks(base, true) ==
|
|
|
|
BASE_AUTO_THP_THRESHOLD_A0);
|
2017-10-05 09:41:51 +08:00
|
|
|
}
|
|
|
|
if (!should_switch) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-11-08 11:40:38 +08:00
|
|
|
base->auto_thp_switched = true;
|
|
|
|
assert(!config_stats || base->n_thp == 0);
|
2017-10-05 09:41:51 +08:00
|
|
|
/* Make the initial blocks THP lazily. */
|
|
|
|
base_block_t *block = base->blocks;
|
|
|
|
while (block != NULL) {
|
|
|
|
assert((block->size & HUGEPAGE_MASK) == 0);
|
|
|
|
pages_huge(block, block->size);
|
|
|
|
if (config_stats) {
|
2017-11-08 11:40:38 +08:00
|
|
|
base->n_thp += HUGEPAGE_CEILING(block->size -
|
|
|
|
extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
|
2017-10-05 09:41:51 +08:00
|
|
|
}
|
|
|
|
block = block->next;
|
|
|
|
assert(block == NULL || (base_ind_get(base) == 0));
|
|
|
|
}
|
2017-08-26 04:24:49 +08:00
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
static void *
|
|
|
|
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t alignment) {
|
2016-12-23 06:39:10 +08:00
|
|
|
void *ret;
|
|
|
|
|
|
|
|
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
|
|
|
|
assert(size == ALIGNMENT_CEILING(size, alignment));
|
|
|
|
|
|
|
|
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
|
|
|
|
alignment) - (uintptr_t)extent_addr_get(extent);
|
|
|
|
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
|
2017-04-17 12:51:26 +08:00
|
|
|
assert(extent_bsize_get(extent) >= *gap_size + size);
|
|
|
|
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
|
|
|
|
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
|
|
|
|
extent_sn_get(extent));
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-04-17 03:08:27 +08:00
|
|
|
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
|
|
|
|
void *addr, size_t size) {
|
2017-04-17 12:51:26 +08:00
|
|
|
if (extent_bsize_get(extent) > 0) {
|
2016-12-23 06:39:10 +08:00
|
|
|
/*
|
|
|
|
* Compute the index for the largest size class that does not
|
|
|
|
* exceed extent's size.
|
|
|
|
*/
|
2017-05-31 01:45:37 +08:00
|
|
|
szind_t index_floor =
|
|
|
|
sz_size2index(extent_bsize_get(extent) + 1) - 1;
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_heap_insert(&base->avail[index_floor], extent);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (config_stats) {
|
|
|
|
base->allocated += size;
|
|
|
|
/*
|
|
|
|
* Add one PAGE to base_resident for every page boundary that is
|
2017-08-26 04:24:49 +08:00
|
|
|
* crossed by the new allocation. Adjust n_thp similarly when
|
|
|
|
* metadata_thp is enabled.
|
2016-12-23 06:39:10 +08:00
|
|
|
*/
|
|
|
|
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
|
|
|
|
PAGE_CEILING((uintptr_t)addr - gap_size);
|
|
|
|
assert(base->allocated <= base->resident);
|
|
|
|
assert(base->resident <= base->mapped);
|
2017-10-05 09:41:51 +08:00
|
|
|
if (metadata_thp_madvise() && (opt_metadata_thp ==
|
2017-11-08 11:40:38 +08:00
|
|
|
metadata_thp_always || base->auto_thp_switched)) {
|
2017-08-26 04:24:49 +08:00
|
|
|
base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
|
|
|
|
- HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
|
|
|
|
LG_HUGEPAGE;
|
|
|
|
assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
|
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
2018-04-17 03:08:27 +08:00
|
|
|
base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
|
|
|
|
size_t alignment) {
|
2016-12-23 06:39:10 +08:00
|
|
|
void *ret;
|
|
|
|
size_t gap_size;
|
|
|
|
|
|
|
|
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
|
2018-04-17 03:08:27 +08:00
|
|
|
base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a block of virtual memory that is large enough to start with a
|
|
|
|
* base_block_t header, followed by an object of specified size and alignment.
|
|
|
|
* On success a pointer to the initialized base_block_t header is returned.
|
|
|
|
*/
|
|
|
|
static base_block_t *
|
2019-12-03 06:19:22 +08:00
|
|
|
base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
|
|
|
|
pszind_t *pind_last, size_t *extent_sn_next, size_t size,
|
2017-04-17 09:39:14 +08:00
|
|
|
size_t alignment) {
|
2016-12-23 06:39:10 +08:00
|
|
|
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
|
2017-04-17 09:39:14 +08:00
|
|
|
size_t usize = ALIGNMENT_CEILING(size, alignment);
|
|
|
|
size_t header_size = sizeof(base_block_t);
|
|
|
|
size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
|
|
|
|
header_size;
|
|
|
|
/*
|
|
|
|
* Create increasingly larger blocks in order to limit the total number
|
|
|
|
* of disjoint virtual memory ranges. Choose the next size in the page
|
|
|
|
* size class series (skipping size classes that are not a multiple of
|
|
|
|
* HUGEPAGE), or a size large enough to satisfy the requested size and
|
|
|
|
* alignment, whichever is larger.
|
|
|
|
*/
|
2017-05-31 01:45:37 +08:00
|
|
|
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
|
|
|
|
+ usize));
|
2018-07-20 08:08:10 +08:00
|
|
|
pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
|
2017-12-15 04:46:39 +08:00
|
|
|
*pind_last + 1 : *pind_last;
|
2017-05-31 01:45:37 +08:00
|
|
|
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
|
2017-04-17 09:39:14 +08:00
|
|
|
size_t block_size = (min_block_size > next_block_size) ? min_block_size
|
|
|
|
: next_block_size;
|
2019-12-03 06:19:22 +08:00
|
|
|
base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind,
|
2017-04-17 09:39:14 +08:00
|
|
|
block_size);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (block == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-08-25 05:29:28 +08:00
|
|
|
|
2017-08-26 04:24:49 +08:00
|
|
|
if (metadata_thp_madvise()) {
|
2017-08-25 05:29:28 +08:00
|
|
|
void *addr = (void *)block;
|
|
|
|
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
|
|
|
|
(block_size & HUGEPAGE_MASK) == 0);
|
2017-10-05 09:41:51 +08:00
|
|
|
if (opt_metadata_thp == metadata_thp_always) {
|
2017-08-25 05:29:28 +08:00
|
|
|
pages_huge(addr, block_size);
|
2017-10-05 09:41:51 +08:00
|
|
|
} else if (opt_metadata_thp == metadata_thp_auto &&
|
|
|
|
base != NULL) {
|
|
|
|
/* base != NULL indicates this is not a new base. */
|
2017-11-08 11:40:38 +08:00
|
|
|
malloc_mutex_lock(tsdn, &base->mtx);
|
|
|
|
base_auto_thp_switch(tsdn, base);
|
|
|
|
if (base->auto_thp_switched) {
|
2017-10-05 09:41:51 +08:00
|
|
|
pages_huge(addr, block_size);
|
2017-08-25 05:29:28 +08:00
|
|
|
}
|
2017-11-08 11:40:38 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &base->mtx);
|
2017-08-25 05:29:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-31 01:45:37 +08:00
|
|
|
*pind_last = sz_psz2ind(block_size);
|
2016-12-23 06:39:10 +08:00
|
|
|
block->size = block_size;
|
|
|
|
block->next = NULL;
|
|
|
|
assert(block_size >= header_size);
|
|
|
|
base_extent_init(extent_sn_next, &block->extent,
|
|
|
|
(void *)((uintptr_t)block + header_size), block_size - header_size);
|
2017-01-20 10:15:45 +08:00
|
|
|
return block;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate an extent that is at least as large as specified size, with
|
|
|
|
* specified alignment.
|
|
|
|
*/
|
2016-03-24 12:09:28 +08:00
|
|
|
static extent_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn, &base->mtx);
|
|
|
|
|
2019-12-03 06:19:22 +08:00
|
|
|
ehooks_t *ehooks = base_ehooks_get(base);
|
2017-05-23 08:08:21 +08:00
|
|
|
/*
|
|
|
|
* Drop mutex during base_block_alloc(), because an extent hook will be
|
|
|
|
* called.
|
|
|
|
*/
|
|
|
|
malloc_mutex_unlock(tsdn, &base->mtx);
|
2019-12-03 06:19:22 +08:00
|
|
|
base_block_t *block = base_block_alloc(tsdn, base, ehooks,
|
2017-06-23 06:36:41 +08:00
|
|
|
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
|
|
|
|
alignment);
|
2017-05-23 08:08:21 +08:00
|
|
|
malloc_mutex_lock(tsdn, &base->mtx);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (block == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
block->next = base->blocks;
|
|
|
|
base->blocks = block;
|
|
|
|
if (config_stats) {
|
|
|
|
base->allocated += sizeof(base_block_t);
|
|
|
|
base->resident += PAGE_CEILING(sizeof(base_block_t));
|
|
|
|
base->mapped += block->size;
|
2017-10-05 09:41:51 +08:00
|
|
|
if (metadata_thp_madvise() &&
|
|
|
|
!(opt_metadata_thp == metadata_thp_auto
|
2017-11-08 11:40:38 +08:00
|
|
|
&& !base->auto_thp_switched)) {
|
2017-08-26 04:24:49 +08:00
|
|
|
assert(base->n_thp > 0);
|
|
|
|
base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
|
|
|
|
LG_HUGEPAGE;
|
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
assert(base->allocated <= base->resident);
|
|
|
|
assert(base->resident <= base->mapped);
|
2017-08-26 04:24:49 +08:00
|
|
|
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
|
2016-05-18 08:43:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return &block->extent;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
base_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
b0get(void) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return b0;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
base_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
2017-04-17 09:39:14 +08:00
|
|
|
pszind_t pind_last = 0;
|
|
|
|
size_t extent_sn_next = 0;
|
2019-12-03 06:19:22 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The base will contain the ehooks eventually, but it itself is
|
|
|
|
* allocated using them. So we use some stack ehooks to bootstrap its
|
|
|
|
* memory, and then initialize the ehooks within the base_t.
|
|
|
|
*/
|
|
|
|
ehooks_t fake_ehooks;
|
|
|
|
ehooks_init(&fake_ehooks, extent_hooks);
|
|
|
|
|
|
|
|
base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind,
|
2017-06-23 06:36:41 +08:00
|
|
|
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (block == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
|
2017-04-17 09:39:14 +08:00
|
|
|
size_t gap_size;
|
|
|
|
size_t base_alignment = CACHELINE;
|
|
|
|
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
|
|
|
|
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
|
2016-12-23 06:39:10 +08:00
|
|
|
&gap_size, base_size, base_alignment);
|
|
|
|
base->ind = ind;
|
2019-12-03 06:19:22 +08:00
|
|
|
ehooks_init(&base->ehooks, extent_hooks);
|
2017-05-16 06:38:15 +08:00
|
|
|
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
|
|
|
|
malloc_mutex_rank_exclusive)) {
|
2019-12-03 06:19:22 +08:00
|
|
|
base_unmap(tsdn, &fake_ehooks, ind, block, block->size);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2015-01-31 13:49:19 +08:00
|
|
|
}
|
2017-04-17 09:39:14 +08:00
|
|
|
base->pind_last = pind_last;
|
2016-12-23 06:39:10 +08:00
|
|
|
base->extent_sn_next = extent_sn_next;
|
|
|
|
base->blocks = block;
|
2017-11-08 11:40:38 +08:00
|
|
|
base->auto_thp_switched = false;
|
2017-12-15 04:46:39 +08:00
|
|
|
for (szind_t i = 0; i < SC_NSIZES; i++) {
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_heap_new(&base->avail[i]);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
if (config_stats) {
|
|
|
|
base->allocated = sizeof(base_block_t);
|
|
|
|
base->resident = PAGE_CEILING(sizeof(base_block_t));
|
|
|
|
base->mapped = block->size;
|
2017-08-26 04:24:49 +08:00
|
|
|
base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
|
|
|
|
metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
|
|
|
|
>> LG_HUGEPAGE : 0;
|
2016-12-23 06:39:10 +08:00
|
|
|
assert(base->allocated <= base->resident);
|
|
|
|
assert(base->resident <= base->mapped);
|
2017-08-26 04:24:49 +08:00
|
|
|
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
|
2015-01-31 13:49:19 +08:00
|
|
|
}
|
2018-04-17 03:08:27 +08:00
|
|
|
base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
|
2016-12-23 06:39:10 +08:00
|
|
|
base_size);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return base;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-06-23 06:36:41 +08:00
|
|
|
base_delete(tsdn_t *tsdn, base_t *base) {
|
2019-12-03 06:19:22 +08:00
|
|
|
ehooks_t *ehooks = base_ehooks_get(base);
|
2016-12-23 06:39:10 +08:00
|
|
|
base_block_t *next = base->blocks;
|
|
|
|
do {
|
|
|
|
base_block_t *block = next;
|
|
|
|
next = block->next;
|
2019-12-03 06:19:22 +08:00
|
|
|
base_unmap(tsdn, ehooks, base_ind_get(base), block,
|
2016-12-23 06:39:10 +08:00
|
|
|
block->size);
|
|
|
|
} while (next != NULL);
|
|
|
|
}
|
|
|
|
|
2019-12-03 06:19:22 +08:00
|
|
|
ehooks_t *
|
|
|
|
base_ehooks_get(base_t *base) {
|
|
|
|
return &base->ehooks;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extent_hooks_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
|
2019-12-03 06:19:22 +08:00
|
|
|
extent_hooks_t *old_extent_hooks =
|
|
|
|
ehooks_get_extent_hooks_ptr(&base->ehooks);
|
|
|
|
ehooks_init(&base->ehooks, extent_hooks);
|
2017-01-20 10:15:45 +08:00
|
|
|
return old_extent_hooks;
|
2015-01-31 13:49:19 +08:00
|
|
|
}
|
|
|
|
|
2017-04-17 12:51:26 +08:00
|
|
|
static void *
|
|
|
|
base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
|
|
|
|
size_t *esn) {
|
2016-12-23 06:39:10 +08:00
|
|
|
alignment = QUANTUM_CEILING(alignment);
|
2017-04-17 12:51:26 +08:00
|
|
|
size_t usize = ALIGNMENT_CEILING(size, alignment);
|
|
|
|
size_t asize = usize + alignment - QUANTUM;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-17 12:51:26 +08:00
|
|
|
extent_t *extent = NULL;
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &base->mtx);
|
2017-12-15 04:46:39 +08:00
|
|
|
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
|
2016-12-23 06:39:10 +08:00
|
|
|
extent = extent_heap_remove_first(&base->avail[i]);
|
2016-05-18 05:58:56 +08:00
|
|
|
if (extent != NULL) {
|
|
|
|
/* Use existing space. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (extent == NULL) {
|
2015-01-31 13:49:19 +08:00
|
|
|
/* Try to allocate more space. */
|
2016-12-23 06:39:10 +08:00
|
|
|
extent = base_extent_alloc(tsdn, base, usize, alignment);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2017-04-17 12:51:26 +08:00
|
|
|
void *ret;
|
2016-03-24 12:09:28 +08:00
|
|
|
if (extent == NULL) {
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
ret = NULL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2015-01-31 13:49:19 +08:00
|
|
|
|
2018-04-17 03:08:27 +08:00
|
|
|
ret = base_extent_bump_alloc(base, extent, usize, alignment);
|
2017-04-17 12:51:26 +08:00
|
|
|
if (esn != NULL) {
|
|
|
|
*esn = extent_sn_get(extent);
|
|
|
|
}
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
label_return:
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &base->mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2012-02-03 14:04:57 +08:00
|
|
|
}
|
|
|
|
|
2017-04-17 12:51:26 +08:00
|
|
|
/*
|
|
|
|
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
|
|
|
|
* auto arenas, in order to make multi-page sparse data structures such as radix
|
|
|
|
* tree nodes efficient with respect to physical memory usage. Upon success a
|
|
|
|
* pointer to at least size bytes with specified alignment is returned. Note
|
|
|
|
* that size is rounded up to the nearest multiple of alignment to avoid false
|
|
|
|
* sharing.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
|
|
|
return base_alloc_impl(tsdn, base, size, alignment, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
extent_t *
|
|
|
|
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
|
|
|
|
size_t esn;
|
|
|
|
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
|
|
|
|
CACHELINE, &esn);
|
|
|
|
if (extent == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
extent_esn_set(extent, esn);
|
|
|
|
return extent;
|
|
|
|
}
|
|
|
|
|
2015-03-24 08:25:57 +08:00
|
|
|
void
|
2016-12-23 06:39:10 +08:00
|
|
|
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
|
2017-08-26 04:24:49 +08:00
|
|
|
size_t *mapped, size_t *n_thp) {
|
2016-12-23 06:39:10 +08:00
|
|
|
cassert(config_stats);
|
|
|
|
|
|
|
|
malloc_mutex_lock(tsdn, &base->mtx);
|
|
|
|
assert(base->allocated <= base->resident);
|
|
|
|
assert(base->resident <= base->mapped);
|
|
|
|
*allocated = base->allocated;
|
|
|
|
*resident = base->resident;
|
|
|
|
*mapped = base->mapped;
|
2017-08-26 04:24:49 +08:00
|
|
|
*n_thp = base->n_thp;
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &base->mtx);
|
2014-11-28 03:22:36 +08:00
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
base_prefork(tsdn_t *tsdn, base_t *base) {
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &base->mtx);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
base_postfork_parent(tsdn_t *tsdn, base_t *base) {
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &base->mtx);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
base_postfork_child(tsdn_t *tsdn, base_t *base) {
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &base->mtx);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
base_boot(tsdn_t *tsdn) {
|
2016-12-23 06:39:10 +08:00
|
|
|
b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
|
|
|
|
return (b0 == NULL);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|