2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_BASE_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
static base_t *b0;
|
2014-11-28 03:22:36 +08:00
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
/******************************************************************************/
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
static void *
|
2017-01-16 08:56:30 +08:00
|
|
|
base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
|
2016-12-23 06:39:10 +08:00
|
|
|
void *addr;
|
|
|
|
bool zero = true;
|
|
|
|
bool commit = true;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
assert(size == HUGEPAGE_CEILING(size));
|
2016-04-18 03:33:39 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (extent_hooks == &extent_hooks_default) {
|
2016-12-23 06:39:10 +08:00
|
|
|
addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-12-23 06:39:10 +08:00
|
|
|
addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
|
|
|
|
&zero, &commit, ind);
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return addr;
|
2015-01-31 13:49:19 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2015-01-31 13:49:19 +08:00
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr,
|
|
|
|
size_t size) {
|
2016-12-23 06:39:10 +08:00
|
|
|
/*
|
|
|
|
* Cascade through dalloc, decommit, purge_lazy, and purge_forced,
|
|
|
|
* stopping at first success. This cascade is performed for consistency
|
|
|
|
* with the cascade in extent_dalloc_wrapper() because an application's
|
|
|
|
* custom hooks may not support e.g. dalloc. This function is only ever
|
|
|
|
* called as a side effect of arena destruction, so although it might
|
|
|
|
* seem pointless to do anything besides dalloc here, the application
|
|
|
|
* may in fact want the end state of all associated virtual memory to in
|
|
|
|
* some consistent-but-allocated state.
|
|
|
|
*/
|
|
|
|
if (extent_hooks == &extent_hooks_default) {
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!extent_dalloc_mmap(addr, size)) {
|
2016-12-23 06:39:10 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (!pages_decommit(addr, size)) {
|
2016-12-23 06:39:10 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (!pages_purge_lazy(addr, size)) {
|
2016-12-23 06:39:10 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (!pages_purge_forced(addr, size)) {
|
2016-12-23 06:39:10 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
/* Nothing worked. This should never happen. */
|
|
|
|
not_reached();
|
|
|
|
} else {
|
|
|
|
if (extent_hooks->dalloc != NULL &&
|
2017-01-16 08:56:30 +08:00
|
|
|
!extent_hooks->dalloc(extent_hooks, addr, size, true,
|
|
|
|
ind)) {
|
2016-12-23 06:39:10 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
if (extent_hooks->decommit != NULL &&
|
|
|
|
!extent_hooks->decommit(extent_hooks, addr, size, 0, size,
|
2017-01-16 08:56:30 +08:00
|
|
|
ind)) {
|
2016-12-23 06:39:10 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
if (extent_hooks->purge_lazy != NULL &&
|
|
|
|
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
|
2017-01-16 08:56:30 +08:00
|
|
|
ind)) {
|
2016-12-23 06:39:10 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
if (extent_hooks->purge_forced != NULL &&
|
|
|
|
!extent_hooks->purge_forced(extent_hooks, addr, size, 0,
|
2017-01-16 08:56:30 +08:00
|
|
|
size, ind)) {
|
2016-12-23 06:39:10 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
/* Nothing worked. That's the application's problem. */
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2016-11-16 05:07:53 +08:00
|
|
|
static void
|
2016-12-23 06:39:10 +08:00
|
|
|
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t size) {
|
2016-12-23 06:39:10 +08:00
|
|
|
size_t sn;
|
|
|
|
|
|
|
|
sn = *extent_sn_next;
|
|
|
|
(*extent_sn_next)++;
|
2016-11-16 05:07:53 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_init(extent, NULL, addr, size, 0, sn, extent_state_active, true,
|
|
|
|
true, false);
|
2016-11-16 05:07:53 +08:00
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
static void *
|
|
|
|
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t alignment) {
|
2016-12-23 06:39:10 +08:00
|
|
|
void *ret;
|
|
|
|
|
|
|
|
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
|
|
|
|
assert(size == ALIGNMENT_CEILING(size, alignment));
|
|
|
|
|
|
|
|
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
|
|
|
|
alignment) - (uintptr_t)extent_addr_get(extent);
|
|
|
|
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
|
|
|
|
assert(extent_size_get(extent) >= *gap_size + size);
|
|
|
|
extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
|
|
|
|
*gap_size + size), extent_size_get(extent) - *gap_size - size, 0,
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_sn_get(extent), extent_state_active, true, true, false);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t gap_size, void *addr, size_t size) {
|
2016-12-23 06:39:10 +08:00
|
|
|
if (extent_size_get(extent) > 0) {
|
|
|
|
/*
|
|
|
|
* Compute the index for the largest size class that does not
|
|
|
|
* exceed extent's size.
|
|
|
|
*/
|
|
|
|
szind_t index_floor = size2index(extent_size_get(extent) + 1) -
|
|
|
|
1;
|
|
|
|
extent_heap_insert(&base->avail[index_floor], extent);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (config_stats) {
|
|
|
|
base->allocated += size;
|
|
|
|
/*
|
|
|
|
* Add one PAGE to base_resident for every page boundary that is
|
|
|
|
* crossed by the new allocation.
|
|
|
|
*/
|
|
|
|
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
|
|
|
|
PAGE_CEILING((uintptr_t)addr - gap_size);
|
|
|
|
assert(base->allocated <= base->resident);
|
|
|
|
assert(base->resident <= base->mapped);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t size, size_t alignment) {
|
2016-12-23 06:39:10 +08:00
|
|
|
void *ret;
|
|
|
|
size_t gap_size;
|
|
|
|
|
|
|
|
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
|
|
|
|
base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a block of virtual memory that is large enough to start with a
|
|
|
|
* base_block_t header, followed by an object of specified size and alignment.
|
|
|
|
* On success a pointer to the initialized base_block_t header is returned.
|
|
|
|
*/
|
|
|
|
static base_block_t *
|
|
|
|
base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t *extent_sn_next, size_t size, size_t alignment) {
|
2016-12-23 06:39:10 +08:00
|
|
|
base_block_t *block;
|
|
|
|
size_t usize, header_size, gap_size, block_size;
|
|
|
|
|
|
|
|
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
|
|
|
|
usize = ALIGNMENT_CEILING(size, alignment);
|
|
|
|
header_size = sizeof(base_block_t);
|
|
|
|
gap_size = ALIGNMENT_CEILING(header_size, alignment) - header_size;
|
|
|
|
block_size = HUGEPAGE_CEILING(header_size + gap_size + usize);
|
|
|
|
block = (base_block_t *)base_map(extent_hooks, ind, block_size);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (block == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
block->size = block_size;
|
|
|
|
block->next = NULL;
|
|
|
|
assert(block_size >= header_size);
|
|
|
|
base_extent_init(extent_sn_next, &block->extent,
|
|
|
|
(void *)((uintptr_t)block + header_size), block_size - header_size);
|
2017-01-20 10:15:45 +08:00
|
|
|
return block;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate an extent that is at least as large as specified size, with
|
|
|
|
* specified alignment.
|
|
|
|
*/
|
2016-03-24 12:09:28 +08:00
|
|
|
static extent_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
|
|
|
|
base_block_t *block;
|
2015-01-31 13:49:19 +08:00
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn, &base->mtx);
|
|
|
|
|
|
|
|
block = base_block_alloc(extent_hooks, base_ind_get(base),
|
|
|
|
&base->extent_sn_next, size, alignment);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (block == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
block->next = base->blocks;
|
|
|
|
base->blocks = block;
|
|
|
|
if (config_stats) {
|
|
|
|
base->allocated += sizeof(base_block_t);
|
|
|
|
base->resident += PAGE_CEILING(sizeof(base_block_t));
|
|
|
|
base->mapped += block->size;
|
|
|
|
assert(base->allocated <= base->resident);
|
|
|
|
assert(base->resident <= base->mapped);
|
2016-05-18 08:43:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return &block->extent;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
base_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
b0get(void) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return b0;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
base_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
2016-12-23 06:39:10 +08:00
|
|
|
base_t *base;
|
|
|
|
size_t extent_sn_next, base_alignment, base_size, gap_size;
|
|
|
|
base_block_t *block;
|
|
|
|
szind_t i;
|
|
|
|
|
|
|
|
extent_sn_next = 0;
|
|
|
|
block = base_block_alloc(extent_hooks, ind, &extent_sn_next,
|
|
|
|
sizeof(base_t), QUANTUM);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (block == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
|
|
|
|
base_alignment = CACHELINE;
|
|
|
|
base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
|
|
|
|
base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
|
|
|
|
&gap_size, base_size, base_alignment);
|
|
|
|
base->ind = ind;
|
|
|
|
base->extent_hooks = extent_hooks;
|
|
|
|
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE)) {
|
|
|
|
base_unmap(extent_hooks, ind, block, block->size);
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2015-01-31 13:49:19 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
base->extent_sn_next = extent_sn_next;
|
|
|
|
base->blocks = block;
|
2017-01-16 08:56:30 +08:00
|
|
|
for (i = 0; i < NSIZES; i++) {
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_heap_new(&base->avail[i]);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
if (config_stats) {
|
|
|
|
base->allocated = sizeof(base_block_t);
|
|
|
|
base->resident = PAGE_CEILING(sizeof(base_block_t));
|
|
|
|
base->mapped = block->size;
|
|
|
|
assert(base->allocated <= base->resident);
|
|
|
|
assert(base->resident <= base->mapped);
|
2015-01-31 13:49:19 +08:00
|
|
|
}
|
2016-12-23 06:39:10 +08:00
|
|
|
base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
|
|
|
|
base_size);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return base;
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
base_delete(base_t *base) {
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
|
|
|
|
base_block_t *next = base->blocks;
|
|
|
|
do {
|
|
|
|
base_block_t *block = next;
|
|
|
|
next = block->next;
|
|
|
|
base_unmap(extent_hooks, base_ind_get(base), block,
|
|
|
|
block->size);
|
|
|
|
} while (next != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
extent_hooks_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
base_extent_hooks_get(base_t *base) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return (extent_hooks_t *)atomic_read_p(&base->extent_hooks_pun);
|
2016-12-23 06:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extent_hooks_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
|
2016-12-23 06:39:10 +08:00
|
|
|
extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
|
|
|
|
union {
|
|
|
|
extent_hooks_t **h;
|
|
|
|
void **v;
|
|
|
|
} u;
|
|
|
|
|
|
|
|
u.h = &base->extent_hooks;
|
|
|
|
atomic_write_p(u.v, extent_hooks);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return old_extent_hooks;
|
2015-01-31 13:49:19 +08:00
|
|
|
}
|
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
/*
|
2016-12-23 06:39:10 +08:00
|
|
|
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
|
|
|
|
* auto arenas, in order to make multi-page sparse data structures such as radix
|
|
|
|
* tree nodes efficient with respect to physical memory usage. Upon success a
|
|
|
|
* pointer to at least size bytes with specified alignment is returned. Note
|
|
|
|
* that size is rounded up to the nearest multiple of alignment to avoid false
|
|
|
|
* sharing.
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
*/
|
|
|
|
void *
|
2017-01-16 08:56:30 +08:00
|
|
|
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
2010-01-17 01:53:50 +08:00
|
|
|
void *ret;
|
2016-12-23 06:39:10 +08:00
|
|
|
size_t usize, asize;
|
2016-05-18 05:58:56 +08:00
|
|
|
szind_t i;
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_t *extent;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
alignment = QUANTUM_CEILING(alignment);
|
|
|
|
usize = ALIGNMENT_CEILING(size, alignment);
|
|
|
|
asize = usize + alignment - QUANTUM;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-18 05:58:56 +08:00
|
|
|
extent = NULL;
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &base->mtx);
|
|
|
|
for (i = size2index(asize); i < NSIZES; i++) {
|
|
|
|
extent = extent_heap_remove_first(&base->avail[i]);
|
2016-05-18 05:58:56 +08:00
|
|
|
if (extent != NULL) {
|
|
|
|
/* Use existing space. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (extent == NULL) {
|
2015-01-31 13:49:19 +08:00
|
|
|
/* Try to allocate more space. */
|
2016-12-23 06:39:10 +08:00
|
|
|
extent = base_extent_alloc(tsdn, base, usize, alignment);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2016-03-24 12:09:28 +08:00
|
|
|
if (extent == NULL) {
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
ret = NULL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2015-01-31 13:49:19 +08:00
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
label_return:
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &base->mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2012-02-03 14:04:57 +08:00
|
|
|
}
|
|
|
|
|
2015-03-24 08:25:57 +08:00
|
|
|
void
|
2016-12-23 06:39:10 +08:00
|
|
|
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t *mapped) {
|
2016-12-23 06:39:10 +08:00
|
|
|
cassert(config_stats);
|
|
|
|
|
|
|
|
malloc_mutex_lock(tsdn, &base->mtx);
|
|
|
|
assert(base->allocated <= base->resident);
|
|
|
|
assert(base->resident <= base->mapped);
|
|
|
|
*allocated = base->allocated;
|
|
|
|
*resident = base->resident;
|
|
|
|
*mapped = base->mapped;
|
|
|
|
malloc_mutex_unlock(tsdn, &base->mtx);
|
2014-11-28 03:22:36 +08:00
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
base_prefork(tsdn_t *tsdn, base_t *base) {
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &base->mtx);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
base_postfork_parent(tsdn_t *tsdn, base_t *base) {
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &base->mtx);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
base_postfork_child(tsdn_t *tsdn, base_t *base) {
|
2016-12-23 06:39:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &base->mtx);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
base_boot(tsdn_t *tsdn) {
|
2016-12-23 06:39:10 +08:00
|
|
|
b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
|
|
|
|
return (b0 == NULL);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|