Modify chunk_alloc() to support optional zeroing.
Use optional zeroing in arena_chunk_alloc() to avoid needless zeroing of chunks. This is particularly important in the context of swapfile and DSS allocation, since a long-lived application may commonly recycle chunks.
This commit is contained in:
parent
bc25a47ee0
commit
41631d0061
@ -43,7 +43,7 @@ extern size_t chunk_npages;
|
|||||||
extern size_t arena_chunk_header_npages;
|
extern size_t arena_chunk_header_npages;
|
||||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||||
|
|
||||||
void *chunk_alloc(size_t size, bool zero);
|
void *chunk_alloc(size_t size, bool *zero);
|
||||||
void chunk_dealloc(void *chunk, size_t size);
|
void chunk_dealloc(void *chunk, size_t size);
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
*/
|
*/
|
||||||
extern malloc_mutex_t dss_mtx;
|
extern malloc_mutex_t dss_mtx;
|
||||||
|
|
||||||
void *chunk_alloc_dss(size_t size, bool zero);
|
void *chunk_alloc_dss(size_t size, bool *zero);
|
||||||
bool chunk_dealloc_dss(void *chunk, size_t size);
|
bool chunk_dealloc_dss(void *chunk, size_t size);
|
||||||
bool chunk_dss_boot(void);
|
bool chunk_dss_boot(void);
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ extern bool swap_enabled;
|
|||||||
extern size_t swap_avail;
|
extern size_t swap_avail;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void *chunk_alloc_swap(size_t size, bool zero);
|
void *chunk_alloc_swap(size_t size, bool *zero);
|
||||||
bool chunk_dealloc_swap(void *chunk, size_t size);
|
bool chunk_dealloc_swap(void *chunk, size_t size);
|
||||||
bool chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed);
|
bool chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed);
|
||||||
bool chunk_swap_boot(void);
|
bool chunk_swap_boot(void);
|
||||||
|
@ -586,7 +586,11 @@ arena_chunk_alloc(arena_t *arena)
|
|||||||
chunk = arena->spare;
|
chunk = arena->spare;
|
||||||
arena->spare = NULL;
|
arena->spare = NULL;
|
||||||
} else {
|
} else {
|
||||||
chunk = (arena_chunk_t *)chunk_alloc(chunksize, true);
|
bool zero;
|
||||||
|
size_t zeroed;
|
||||||
|
|
||||||
|
zero = false;
|
||||||
|
chunk = (arena_chunk_t *)chunk_alloc(chunksize, &zero);
|
||||||
if (chunk == NULL)
|
if (chunk == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
@ -604,15 +608,16 @@ arena_chunk_alloc(arena_t *arena)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the map to contain one maximal free untouched run.
|
* Initialize the map to contain one maximal free untouched run.
|
||||||
|
* Mark the pages as zeroed iff chunk_alloc() returned a zeroed
|
||||||
|
* chunk.
|
||||||
*/
|
*/
|
||||||
|
zeroed = zero ? CHUNK_MAP_ZEROED : 0;
|
||||||
for (i = 0; i < arena_chunk_header_npages; i++)
|
for (i = 0; i < arena_chunk_header_npages; i++)
|
||||||
chunk->map[i].bits = 0;
|
chunk->map[i].bits = 0;
|
||||||
chunk->map[i].bits = arena_maxclass | CHUNK_MAP_ZEROED;
|
chunk->map[i].bits = arena_maxclass | zeroed;
|
||||||
for (i++; i < chunk_npages-1; i++) {
|
for (i++; i < chunk_npages-1; i++)
|
||||||
chunk->map[i].bits = CHUNK_MAP_ZEROED;
|
chunk->map[i].bits = zeroed;
|
||||||
}
|
chunk->map[chunk_npages-1].bits = arena_maxclass | zeroed;
|
||||||
chunk->map[chunk_npages-1].bits = arena_maxclass |
|
|
||||||
CHUNK_MAP_ZEROED;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Insert the run into the runs_avail tree. */
|
/* Insert the run into the runs_avail tree. */
|
||||||
|
@ -27,10 +27,12 @@ static bool
|
|||||||
base_pages_alloc(size_t minsize)
|
base_pages_alloc(size_t minsize)
|
||||||
{
|
{
|
||||||
size_t csize;
|
size_t csize;
|
||||||
|
bool zero;
|
||||||
|
|
||||||
assert(minsize != 0);
|
assert(minsize != 0);
|
||||||
csize = CHUNK_CEILING(minsize);
|
csize = CHUNK_CEILING(minsize);
|
||||||
base_pages = chunk_alloc(csize, false);
|
zero = false;
|
||||||
|
base_pages = chunk_alloc(csize, &zero);
|
||||||
if (base_pages == NULL)
|
if (base_pages == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
base_next_addr = base_pages;
|
base_next_addr = base_pages;
|
||||||
|
@ -22,8 +22,14 @@ size_t arena_maxclass; /* Max size class for arenas. */
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the caller specifies (*zero == false), it is still possible to receive
|
||||||
|
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
|
||||||
|
* takes advantage of this to avoid demanding zeroed chunks, but taking
|
||||||
|
* advantage of them if they are returned.
|
||||||
|
*/
|
||||||
void *
|
void *
|
||||||
chunk_alloc(size_t size, bool zero)
|
chunk_alloc(size_t size, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
@ -45,8 +51,10 @@ chunk_alloc(size_t size, bool zero)
|
|||||||
goto RETURN;
|
goto RETURN;
|
||||||
#endif
|
#endif
|
||||||
ret = chunk_alloc_mmap(size);
|
ret = chunk_alloc_mmap(size);
|
||||||
if (ret != NULL)
|
if (ret != NULL) {
|
||||||
|
*zero = true;
|
||||||
goto RETURN;
|
goto RETURN;
|
||||||
|
}
|
||||||
#ifdef JEMALLOC_SWAP
|
#ifdef JEMALLOC_SWAP
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -25,13 +25,13 @@ static extent_tree_t dss_chunks_ad;
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
/* Function prototypes for non-inline static functions. */
|
||||||
|
|
||||||
static void *chunk_recycle_dss(size_t size, bool zero);
|
static void *chunk_recycle_dss(size_t size, bool *zero);
|
||||||
static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
|
static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_recycle_dss(size_t size, bool zero)
|
chunk_recycle_dss(size_t size, bool *zero)
|
||||||
{
|
{
|
||||||
extent_node_t *node, key;
|
extent_node_t *node, key;
|
||||||
|
|
||||||
@ -60,7 +60,7 @@ chunk_recycle_dss(size_t size, bool zero)
|
|||||||
}
|
}
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(&dss_mtx);
|
||||||
|
|
||||||
if (zero)
|
if (*zero)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -70,7 +70,7 @@ chunk_recycle_dss(size_t size, bool zero)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_dss(size_t size, bool zero)
|
chunk_alloc_dss(size_t size, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
@ -116,6 +116,7 @@ chunk_alloc_dss(size_t size, bool zero)
|
|||||||
/* Success. */
|
/* Success. */
|
||||||
dss_max = (void *)((intptr_t)dss_prev + incr);
|
dss_max = (void *)((intptr_t)dss_prev + incr);
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(&dss_mtx);
|
||||||
|
*zero = true;
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
} while (dss_prev != (void *)-1);
|
} while (dss_prev != (void *)-1);
|
||||||
|
@ -31,13 +31,13 @@ static extent_tree_t swap_chunks_ad;
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
/* Function prototypes for non-inline static functions. */
|
||||||
|
|
||||||
static void *chunk_recycle_swap(size_t size, bool zero);
|
static void *chunk_recycle_swap(size_t size, bool *zero);
|
||||||
static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size);
|
static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size);
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_recycle_swap(size_t size, bool zero)
|
chunk_recycle_swap(size_t size, bool *zero)
|
||||||
{
|
{
|
||||||
extent_node_t *node, key;
|
extent_node_t *node, key;
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ chunk_recycle_swap(size_t size, bool zero)
|
|||||||
#endif
|
#endif
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
malloc_mutex_unlock(&swap_mtx);
|
||||||
|
|
||||||
if (zero)
|
if (*zero)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -79,7 +79,7 @@ chunk_recycle_swap(size_t size, bool zero)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_swap(size_t size, bool zero)
|
chunk_alloc_swap(size_t size, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
@ -98,7 +98,9 @@ chunk_alloc_swap(size_t size, bool zero)
|
|||||||
#endif
|
#endif
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
malloc_mutex_unlock(&swap_mtx);
|
||||||
|
|
||||||
if (zero && swap_prezeroed == false)
|
if (swap_prezeroed)
|
||||||
|
*zero = true;
|
||||||
|
else if (*zero)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
} else {
|
} else {
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
malloc_mutex_unlock(&swap_mtx);
|
||||||
|
@ -37,7 +37,7 @@ huge_malloc(size_t size, bool zero)
|
|||||||
if (node == NULL)
|
if (node == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
ret = chunk_alloc(csize, zero);
|
ret = chunk_alloc(csize, &zero);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
base_node_dealloc(node);
|
base_node_dealloc(node);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -74,6 +74,7 @@ huge_palloc(size_t alignment, size_t size)
|
|||||||
void *ret;
|
void *ret;
|
||||||
size_t alloc_size, chunk_size, offset;
|
size_t alloc_size, chunk_size, offset;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
|
bool zero;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This allocation requires alignment that is even larger than chunk
|
* This allocation requires alignment that is even larger than chunk
|
||||||
@ -97,7 +98,8 @@ huge_palloc(size_t alignment, size_t size)
|
|||||||
if (node == NULL)
|
if (node == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
ret = chunk_alloc(alloc_size, false);
|
zero = false;
|
||||||
|
ret = chunk_alloc(alloc_size, &zero);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
base_node_dealloc(node);
|
base_node_dealloc(node);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
Loading…
Reference in New Issue
Block a user