server-skynet-source-3rd-je.../src/chunk_dss.c
Daniel Micay 879e76a9e5 teach the dss chunk allocator to handle new_addr
This provides in-place expansion of huge allocations when the end of the
allocation is at the end of the sbrk heap. There's already the ability
to extend in-place via recycled chunks but this handles the initial
growth of the heap via repeated vector / string reallocations.

A possible future extension could allow realloc to go from the following:

    | huge allocation | recycled chunks |
                                        ^ dss_end

To a larger allocation built from recycled *and* new chunks:

    |                      huge allocation                      |
                                                                ^ dss_end

Doing that would involve teaching the chunk recycling code to request
new chunks to satisfy the request. The chunk_dss code wouldn't require
any further changes.

    #include <stdlib.h>

    int main(void) {
        size_t chunk = 4 * 1024 * 1024;
        void *ptr = NULL;
        for (size_t size = chunk; size < chunk * 128; size *= 2) {
            ptr = realloc(ptr, size);
            if (!ptr) return 1;
        }
    }

dss:secondary: 0.083s
dss:primary: 0.083s

After:

dss:secondary: 0.083s
dss:primary: 0.003s

The dss heap grows in the upwards direction, so the oldest chunks are at
the low addresses and they are used first. Linux prefers to grow the
mmap heap downwards, so the trick will not work in the *current* mmap
chunk allocator as a huge allocation will only be at the top of the heap
in a contrived case.
2014-11-28 16:11:19 -08:00

209 lines
4.4 KiB
C

#define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const char *dss_prec_names[] = {
"disabled",
"primary",
"secondary",
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
static malloc_mutex_t dss_mtx;
/* Base address of the DSS. */
static void *dss_base;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static void *dss_prev;
/* Current upper limit on DSS addresses. */
static void *dss_max;
/******************************************************************************/
static void *
chunk_dss_sbrk(intptr_t increment)
{
#ifdef JEMALLOC_DSS
return (sbrk(increment));
#else
not_implemented();
return (NULL);
#endif
}
dss_prec_t
chunk_dss_prec_get(void)
{
dss_prec_t ret;
if (!have_dss)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_prec_set(dss_prec_t dss_prec)
{
if (!have_dss)
return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
return (false);
}
void *
chunk_alloc_dss(void *new_addr, size_t size, size_t alignment, bool *zero)
{
void *ret;
cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a huge allocation request as a negative increment.
*/
if ((intptr_t)size < 0)
return (NULL);
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
size_t gap_size, cpad_size;
void *cpad, *dss_next;
intptr_t incr;
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
do {
/* Avoid an unnecessary system call. */
if (new_addr != NULL && dss_max != new_addr)
break;
/* Get the current end of the DSS. */
dss_max = chunk_dss_sbrk(0);
/* Make sure the earlier condition still holds. */
if (new_addr != NULL && dss_max != new_addr)
break;
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
chunksize_mask;
/*
* Compute how much chunk-aligned pad space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
cpad = (void *)((uintptr_t)dss_max + gap_size);
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
alignment);
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
/* Wrap-around. */
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
incr = gap_size + cpad_size + size;
dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == dss_max) {
/* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0)
chunk_unmap(cpad, cpad_size);
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
ret, size);
memset(ret, 0, size);
}
return (ret);
}
} while (dss_prev != (void *)-1);
}
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
bool
chunk_in_dss(void *chunk)
{
bool ret;
cassert(have_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_boot(void)
{
cassert(have_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
return (false);
}
void
chunk_dss_prefork(void)
{
if (have_dss)
malloc_mutex_prefork(&dss_mtx);
}
void
chunk_dss_postfork_parent(void)
{
if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
void
chunk_dss_postfork_child(void)
{
if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
}
/******************************************************************************/