server-skynet-source-3rd-je.../src/base.c

194 lines
4.3 KiB
C
Raw Normal View History

#define JEMALLOC_BASE_C_
2010-02-12 06:45:59 +08:00
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
static malloc_mutex_t base_mtx;
static extent_heap_t base_avail[NSIZES];
2016-03-24 12:09:28 +08:00
static extent_t *base_extents;
static size_t base_allocated;
2015-03-24 08:25:57 +08:00
static size_t base_resident;
static size_t base_mapped;
/******************************************************************************/
2016-03-24 12:09:28 +08:00
static extent_t *
base_extent_try_alloc(tsdn_t *tsdn)
{
2016-03-24 12:09:28 +08:00
extent_t *extent;
malloc_mutex_assert_owner(tsdn, &base_mtx);
2016-03-24 12:09:28 +08:00
if (base_extents == NULL)
return (NULL);
2016-03-24 12:09:28 +08:00
extent = base_extents;
base_extents = *(extent_t **)extent;
return (extent);
}
static void
2016-03-24 12:09:28 +08:00
base_extent_dalloc(tsdn_t *tsdn, extent_t *extent)
{
malloc_mutex_assert_owner(tsdn, &base_mtx);
2016-03-24 12:09:28 +08:00
*(extent_t **)extent = base_extents;
base_extents = extent;
}
2016-03-24 12:09:28 +08:00
static extent_t *
base_extent_alloc(tsdn_t *tsdn, size_t minsize)
{
2016-03-24 12:09:28 +08:00
extent_t *extent;
size_t csize, nsize;
void *addr;
malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0);
2016-03-24 12:09:28 +08:00
extent = base_extent_try_alloc(tsdn);
/* Allocate enough space to also carve an extent out if necessary. */
nsize = (extent == NULL) ? CACHELINE_CEILING(sizeof(extent_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize);
/*
* Directly call extent_alloc_mmap() because it's critical to allocate
* untouched demand-zeroed virtual memory.
*/
{
bool zero = true;
bool commit = true;
addr = extent_alloc_mmap(NULL, csize, PAGE, &zero, &commit);
}
if (addr == NULL) {
2016-03-24 12:09:28 +08:00
if (extent != NULL)
base_extent_dalloc(tsdn, extent);
return (NULL);
}
2015-03-24 08:25:57 +08:00
base_mapped += csize;
2016-03-24 12:09:28 +08:00
if (extent == NULL) {
extent = (extent_t *)addr;
2015-03-24 08:25:57 +08:00
addr = (void *)((uintptr_t)addr + nsize);
csize -= nsize;
2015-03-24 08:25:57 +08:00
if (config_stats) {
base_allocated += nsize;
2015-03-24 08:25:57 +08:00
base_resident += PAGE_CEILING(nsize);
}
}
2016-05-30 09:34:50 +08:00
extent_init(extent, NULL, addr, csize, 0, true, true, true, false);
2016-03-24 12:09:28 +08:00
return (extent);
}
/*
* base_alloc() guarantees demand-zeroed memory, in order to make multi-page
* sparse data structures such as radix tree nodes efficient with respect to
* physical memory usage.
*/
void *
base_alloc(tsdn_t *tsdn, size_t size)
{
void *ret;
size_t csize;
szind_t i;
2016-03-24 12:09:28 +08:00
extent_t *extent;
/*
* Round size up to nearest multiple of the cacheline size, so that
* there is no chance of false cache line sharing.
*/
csize = CACHELINE_CEILING(size);
extent = NULL;
malloc_mutex_lock(tsdn, &base_mtx);
for (i = size2index(csize); i < NSIZES; i++) {
extent = extent_heap_remove_first(&base_avail[i]);
if (extent != NULL) {
/* Use existing space. */
break;
}
}
if (extent == NULL) {
/* Try to allocate more space. */
extent = base_extent_alloc(tsdn, csize);
}
2016-03-24 12:09:28 +08:00
if (extent == NULL) {
ret = NULL;
goto label_return;
}
2016-03-24 12:09:28 +08:00
ret = extent_addr_get(extent);
if (extent_size_get(extent) > csize) {
szind_t index_floor;
2016-03-24 12:09:28 +08:00
extent_addr_set(extent, (void *)((uintptr_t)ret + csize));
extent_size_set(extent, extent_size_get(extent) - csize);
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
index_floor = size2index(extent_size_get(extent) + 1) - 1;
extent_heap_insert(&base_avail[index_floor], extent);
} else
2016-03-24 12:09:28 +08:00
base_extent_dalloc(tsdn, extent);
2015-03-24 08:25:57 +08:00
if (config_stats) {
base_allocated += csize;
2015-03-24 08:25:57 +08:00
/*
* Add one PAGE to base_resident for every page boundary that is
* crossed by the new allocation.
*/
base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
PAGE_CEILING((uintptr_t)ret);
}
label_return:
malloc_mutex_unlock(tsdn, &base_mtx);
return (ret);
}
2015-03-24 08:25:57 +08:00
void
base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
size_t *mapped)
{
malloc_mutex_lock(tsdn, &base_mtx);
assert(base_allocated <= base_resident);
assert(base_resident <= base_mapped);
2015-03-24 08:25:57 +08:00
*allocated = base_allocated;
*resident = base_resident;
*mapped = base_mapped;
malloc_mutex_unlock(tsdn, &base_mtx);
}
bool
base_boot(void)
{
szind_t i;
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true);
for (i = 0; i < NSIZES; i++)
extent_heap_new(&base_avail[i]);
2016-03-24 12:09:28 +08:00
base_extents = NULL;
return (false);
}
void
base_prefork(tsdn_t *tsdn)
{
malloc_mutex_prefork(tsdn, &base_mtx);
}
void
base_postfork_parent(tsdn_t *tsdn)
{
malloc_mutex_postfork_parent(tsdn, &base_mtx);
}
void
base_postfork_child(tsdn_t *tsdn)
{
malloc_mutex_postfork_child(tsdn, &base_mtx);
}