Attempt mmap-based in-place huge reallocation.

Attempt mmap-based in-place huge reallocation by plumbing new_addr into
chunk_alloc_mmap().  This can dramatically speed up incremental huge
reallocation.

This resolves #335.
This commit is contained in:
Jason Evans 2016-02-24 17:18:44 -08:00
parent 5ec703dd33
commit c7a9a6c86b
3 changed files with 12 additions and 13 deletions

View File

@ -9,8 +9,8 @@
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
bool *commit); bool *zero, bool *commit);
bool chunk_dalloc_mmap(void *chunk, size_t size); bool chunk_dalloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */

View File

@ -350,12 +350,9 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
NULL) NULL)
return (ret); return (ret);
/* /* mmap. */
* mmap. Requesting an address is not implemented for if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
* chunk_alloc_mmap(), so only call it if (new_addr == NULL). NULL)
*/
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
commit)) != NULL)
return (ret); return (ret);
/* "secondary" dss. */ /* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret = if (have_dss && dss_prec == dss_prec_secondary && (ret =
@ -380,7 +377,7 @@ chunk_alloc_base(size_t size)
*/ */
zero = true; zero = true;
commit = true; commit = true;
ret = chunk_alloc_mmap(size, chunksize, &zero, &commit); ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
if (config_valgrind) if (config_valgrind)

View File

@ -32,7 +32,8 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
} }
void * void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit) chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit)
{ {
void *ret; void *ret;
size_t offset; size_t offset;
@ -53,9 +54,10 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
assert(alignment != 0); assert(alignment != 0);
assert((alignment & chunksize_mask) == 0); assert((alignment & chunksize_mask) == 0);
ret = pages_map(NULL, size); ret = pages_map(new_addr, size);
if (ret == NULL) if (ret == NULL || ret == new_addr)
return (NULL); return (ret);
assert(new_addr == NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) { if (offset != 0) {
pages_unmap(ret, size); pages_unmap(ret, size);