Add alignment support to chunk_alloc().
This commit is contained in:
parent
c5851eaf6e
commit
eae269036c
@ -42,7 +42,7 @@ extern size_t chunk_npages;
|
||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
void *chunk_alloc(size_t size, bool base, bool *zero);
|
||||
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
|
||||
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
||||
bool chunk_boot0(void);
|
||||
bool chunk_boot1(void);
|
||||
|
@ -9,7 +9,7 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *chunk_alloc_dss(size_t size, bool *zero);
|
||||
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
|
||||
bool chunk_in_dss(void *chunk);
|
||||
bool chunk_dealloc_dss(void *chunk, size_t size);
|
||||
bool chunk_dss_boot(void);
|
||||
|
@ -9,7 +9,7 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *chunk_alloc_mmap(size_t size);
|
||||
void *chunk_alloc_mmap(size_t size, size_t alignment);
|
||||
void chunk_dealloc_mmap(void *chunk, size_t size);
|
||||
|
||||
bool chunk_mmap_boot(void);
|
||||
|
@ -357,7 +357,8 @@ arena_chunk_alloc(arena_t *arena)
|
||||
|
||||
zero = false;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
chunk = (arena_chunk_t *)chunk_alloc(chunksize, false, &zero);
|
||||
chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
|
||||
false, &zero);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if (chunk == NULL)
|
||||
return (NULL);
|
||||
|
@ -32,7 +32,7 @@ base_pages_alloc(size_t minsize)
|
||||
assert(minsize != 0);
|
||||
csize = CHUNK_CEILING(minsize);
|
||||
zero = false;
|
||||
base_pages = chunk_alloc(csize, true, &zero);
|
||||
base_pages = chunk_alloc(csize, chunksize, true, &zero);
|
||||
if (base_pages == NULL)
|
||||
return (true);
|
||||
base_next_addr = base_pages;
|
||||
|
@ -27,19 +27,20 @@ size_t arena_maxclass; /* Max size class for arenas. */
|
||||
* advantage of them if they are returned.
|
||||
*/
|
||||
void *
|
||||
chunk_alloc(size_t size, bool base, bool *zero)
|
||||
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
if (config_dss) {
|
||||
ret = chunk_alloc_dss(size, zero);
|
||||
ret = chunk_alloc_dss(size, alignment, zero);
|
||||
if (ret != NULL)
|
||||
goto RETURN;
|
||||
}
|
||||
ret = chunk_alloc_mmap(size);
|
||||
ret = chunk_alloc_mmap(size, alignment);
|
||||
if (ret != NULL) {
|
||||
*zero = true;
|
||||
goto RETURN;
|
||||
|
@ -28,41 +28,50 @@ static extent_tree_t dss_chunks_ad;
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void *chunk_recycle_dss(size_t size, bool *zero);
|
||||
static void *chunk_recycle_dss(size_t size, size_t alignment, bool *zero);
|
||||
static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
chunk_recycle_dss(size_t size, bool *zero)
|
||||
chunk_recycle_dss(size_t size, size_t alignment, bool *zero)
|
||||
{
|
||||
extent_node_t *node, key;
|
||||
|
||||
cassert(config_dss);
|
||||
|
||||
key.addr = NULL;
|
||||
key.size = size;
|
||||
key.size = size + alignment - chunksize;
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
node = extent_tree_szad_nsearch(&dss_chunks_szad, &key);
|
||||
if (node != NULL) {
|
||||
void *ret = node->addr;
|
||||
size_t offset = (size_t)((uintptr_t)(node->addr) & (alignment -
|
||||
1));
|
||||
void *ret;
|
||||
if (offset > 0)
|
||||
offset = alignment - offset;
|
||||
ret = (void *)((uintptr_t)(node->addr) + offset);
|
||||
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(&dss_chunks_szad, node);
|
||||
if (node->size == size) {
|
||||
extent_tree_ad_remove(&dss_chunks_ad, node);
|
||||
base_node_dealloc(node);
|
||||
} else {
|
||||
/*
|
||||
* Insert the remainder of node's address range as a
|
||||
* smaller chunk. Its position within dss_chunks_ad
|
||||
* does not change.
|
||||
*/
|
||||
assert(node->size > size);
|
||||
node->addr = (void *)((uintptr_t)node->addr + size);
|
||||
node->size -= size;
|
||||
extent_tree_ad_remove(&dss_chunks_ad, node);
|
||||
if (offset > 0) {
|
||||
/* Insert the leading space as a smaller chunk. */
|
||||
node->size = offset;
|
||||
extent_tree_szad_insert(&dss_chunks_szad, node);
|
||||
extent_tree_ad_insert(&dss_chunks_ad, node);
|
||||
}
|
||||
if (alignment - chunksize > offset) {
|
||||
if (offset > 0)
|
||||
node = base_node_alloc();
|
||||
/* Insert the trailing space as a smaller chunk. */
|
||||
node->addr = (void *)((uintptr_t)(ret) + size);
|
||||
node->size = alignment - chunksize - offset;
|
||||
extent_tree_szad_insert(&dss_chunks_szad, node);
|
||||
extent_tree_ad_insert(&dss_chunks_ad, node);
|
||||
} else if (offset == 0)
|
||||
base_node_dealloc(node);
|
||||
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
||||
if (*zero)
|
||||
@ -75,13 +84,15 @@ chunk_recycle_dss(size_t size, bool *zero)
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_dss(size_t size, bool *zero)
|
||||
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
cassert(config_dss);
|
||||
assert(size > 0 && (size & chunksize_mask) == 0);
|
||||
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
|
||||
|
||||
ret = chunk_recycle_dss(size, zero);
|
||||
ret = chunk_recycle_dss(size, alignment, zero);
|
||||
if (ret != NULL)
|
||||
return (ret);
|
||||
|
||||
@ -94,6 +105,8 @@ chunk_alloc_dss(size_t size, bool *zero)
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if (dss_prev != (void *)-1) {
|
||||
size_t gap_size, cpad_size;
|
||||
void *cpad, *dss_next;
|
||||
intptr_t incr;
|
||||
|
||||
/*
|
||||
@ -104,25 +117,36 @@ chunk_alloc_dss(size_t size, bool *zero)
|
||||
do {
|
||||
/* Get the current end of the DSS. */
|
||||
dss_max = sbrk(0);
|
||||
|
||||
/*
|
||||
* Calculate how much padding is necessary to
|
||||
* chunk-align the end of the DSS.
|
||||
*/
|
||||
incr = (intptr_t)size
|
||||
- (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
|
||||
if (incr == (intptr_t)size)
|
||||
ret = dss_max;
|
||||
else {
|
||||
ret = (void *)((intptr_t)dss_max + incr);
|
||||
incr += size;
|
||||
gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
|
||||
chunksize_mask;
|
||||
/*
|
||||
* Compute how much chunk-aligned pad space (if any) is
|
||||
* necessary to satisfy alignment. This space can be
|
||||
* recycled for later use.
|
||||
*/
|
||||
cpad = (void *)((uintptr_t)dss_max + gap_size);
|
||||
ret = (void *)(((uintptr_t)dss_max + (alignment - 1)) &
|
||||
~(alignment - 1));
|
||||
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
|
||||
dss_next = (void *)((uintptr_t)ret + size);
|
||||
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
||||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
|
||||
/* Wrap-around. */
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
incr = gap_size + cpad_size + size;
|
||||
dss_prev = sbrk(incr);
|
||||
if (dss_prev == dss_max) {
|
||||
/* Success. */
|
||||
dss_max = (void *)((intptr_t)dss_prev + incr);
|
||||
dss_max = dss_next;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
if (cpad_size != 0)
|
||||
chunk_dealloc_dss(cpad, cpad_size);
|
||||
*zero = true;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -17,8 +17,9 @@ malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
|
||||
|
||||
static void *pages_map(void *addr, size_t size);
|
||||
static void pages_unmap(void *addr, size_t size);
|
||||
static void *chunk_alloc_mmap_slow(size_t size, bool unaligned);
|
||||
static void *chunk_alloc_mmap_internal(size_t size);
|
||||
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
|
||||
bool unaligned);
|
||||
static void *chunk_alloc_mmap_internal(size_t size, size_t alignment);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
@ -73,7 +74,7 @@ pages_unmap(void *addr, size_t size)
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_mmap_slow(size_t size, bool unaligned)
|
||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
|
||||
{
|
||||
void *ret;
|
||||
size_t offset;
|
||||
@ -82,29 +83,26 @@ chunk_alloc_mmap_slow(size_t size, bool unaligned)
|
||||
if (size + chunksize <= size)
|
||||
return (NULL);
|
||||
|
||||
ret = pages_map(NULL, size + chunksize);
|
||||
ret = pages_map(NULL, size + alignment);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
/* Clean up unneeded leading/trailing space. */
|
||||
offset = CHUNK_ADDR2OFFSET(ret);
|
||||
offset = (size_t)((uintptr_t)(ret) & (alignment - 1));
|
||||
if (offset != 0) {
|
||||
/* Note that mmap() returned an unaligned mapping. */
|
||||
unaligned = true;
|
||||
|
||||
/* Leading space. */
|
||||
pages_unmap(ret, chunksize - offset);
|
||||
pages_unmap(ret, alignment - offset);
|
||||
|
||||
ret = (void *)((uintptr_t)ret +
|
||||
(chunksize - offset));
|
||||
ret = (void *)((uintptr_t)ret + (alignment - offset));
|
||||
|
||||
/* Trailing space. */
|
||||
pages_unmap((void *)((uintptr_t)ret + size),
|
||||
offset);
|
||||
pages_unmap((void *)((uintptr_t)ret + size), offset);
|
||||
} else {
|
||||
/* Trailing space only. */
|
||||
pages_unmap((void *)((uintptr_t)ret + size),
|
||||
chunksize);
|
||||
pages_unmap((void *)((uintptr_t)ret + size), alignment);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -121,7 +119,7 @@ chunk_alloc_mmap_slow(size_t size, bool unaligned)
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_mmap_internal(size_t size)
|
||||
chunk_alloc_mmap_internal(size_t size, size_t alignment)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
@ -160,7 +158,7 @@ chunk_alloc_mmap_internal(size_t size)
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
offset = CHUNK_ADDR2OFFSET(ret);
|
||||
offset = (size_t)((uintptr_t)(ret) & (alignment - 1));
|
||||
if (offset != 0) {
|
||||
bool mu = true;
|
||||
mmap_unaligned_tsd_set(&mu);
|
||||
@ -172,7 +170,8 @@ chunk_alloc_mmap_internal(size_t size)
|
||||
* the reliable-but-expensive method.
|
||||
*/
|
||||
pages_unmap(ret, size);
|
||||
ret = chunk_alloc_mmap_slow(size, true);
|
||||
ret = chunk_alloc_mmap_slow(size, alignment,
|
||||
true);
|
||||
} else {
|
||||
/* Clean up unneeded leading space. */
|
||||
pages_unmap(ret, chunksize - offset);
|
||||
@ -181,16 +180,16 @@ chunk_alloc_mmap_internal(size_t size)
|
||||
}
|
||||
}
|
||||
} else
|
||||
ret = chunk_alloc_mmap_slow(size, false);
|
||||
ret = chunk_alloc_mmap_slow(size, alignment, false);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_mmap(size_t size)
|
||||
chunk_alloc_mmap(size_t size, size_t alignment)
|
||||
{
|
||||
|
||||
return (chunk_alloc_mmap_internal(size));
|
||||
return (chunk_alloc_mmap_internal(size, alignment));
|
||||
}
|
||||
|
||||
void
|
||||
|
92
src/huge.c
92
src/huge.c
@ -17,6 +17,13 @@ static extent_tree_t huge;
|
||||
|
||||
void *
|
||||
huge_malloc(size_t size, bool zero)
|
||||
{
|
||||
|
||||
return (huge_palloc(size, chunksize, zero));
|
||||
}
|
||||
|
||||
void *
|
||||
huge_palloc(size_t size, size_t alignment, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize;
|
||||
@ -35,7 +42,7 @@ huge_malloc(size_t size, bool zero)
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
|
||||
ret = chunk_alloc(csize, false, &zero);
|
||||
ret = chunk_alloc(csize, alignment, false, &zero);
|
||||
if (ret == NULL) {
|
||||
base_node_dealloc(node);
|
||||
return (NULL);
|
||||
@ -64,89 +71,6 @@ huge_malloc(size_t size, bool zero)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/* Only handles large allocations that require more than chunk alignment. */
|
||||
void *
|
||||
huge_palloc(size_t size, size_t alignment, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t alloc_size, chunk_size, offset;
|
||||
extent_node_t *node;
|
||||
|
||||
/*
|
||||
* This allocation requires alignment that is even larger than chunk
|
||||
* alignment. This means that huge_malloc() isn't good enough.
|
||||
*
|
||||
* Allocate almost twice as many chunks as are demanded by the size or
|
||||
* alignment, in order to assure the alignment can be achieved, then
|
||||
* unmap leading and trailing chunks.
|
||||
*/
|
||||
assert(alignment > chunksize);
|
||||
|
||||
chunk_size = CHUNK_CEILING(size);
|
||||
|
||||
if (size >= alignment)
|
||||
alloc_size = chunk_size + alignment - chunksize;
|
||||
else
|
||||
alloc_size = (alignment << 1) - chunksize;
|
||||
|
||||
/* Allocate an extent node with which to track the chunk. */
|
||||
node = base_node_alloc();
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
|
||||
ret = chunk_alloc(alloc_size, false, &zero);
|
||||
if (ret == NULL) {
|
||||
base_node_dealloc(node);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
offset = (uintptr_t)ret & (alignment - 1);
|
||||
assert((offset & chunksize_mask) == 0);
|
||||
assert(offset < alloc_size);
|
||||
if (offset == 0) {
|
||||
/* Trim trailing space. */
|
||||
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
|
||||
- chunk_size, true);
|
||||
} else {
|
||||
size_t trailsize;
|
||||
|
||||
/* Trim leading space. */
|
||||
chunk_dealloc(ret, alignment - offset, true);
|
||||
|
||||
ret = (void *)((uintptr_t)ret + (alignment - offset));
|
||||
|
||||
trailsize = alloc_size - (alignment - offset) - chunk_size;
|
||||
if (trailsize != 0) {
|
||||
/* Trim trailing space. */
|
||||
assert(trailsize < alloc_size);
|
||||
chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
|
||||
trailsize, true);
|
||||
}
|
||||
}
|
||||
|
||||
/* Insert node into huge. */
|
||||
node->addr = ret;
|
||||
node->size = chunk_size;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
extent_tree_ad_insert(&huge, node);
|
||||
if (config_stats) {
|
||||
stats_cactive_add(chunk_size);
|
||||
huge_nmalloc++;
|
||||
huge_allocated += chunk_size;
|
||||
}
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
if (config_fill && zero == false) {
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, chunk_size);
|
||||
else if (opt_zero)
|
||||
memset(ret, 0, chunk_size);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user