Modify chunk_alloc() to support optional zeroing.

Use optional zeroing in arena_chunk_alloc() to avoid needless zeroing of
chunks.  This is particularly important in the context of swapfile and
DSS allocation, since a long-lived application may commonly recycle
chunks.
This commit is contained in:
Jason Evans 2010-01-24 17:13:07 -08:00
parent bc25a47ee0
commit 41631d0061
9 changed files with 44 additions and 24 deletions

View File

@ -43,7 +43,7 @@ extern size_t chunk_npages;
extern size_t arena_chunk_header_npages;
extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc(size_t size, bool zero);
void *chunk_alloc(size_t size, bool *zero);
void chunk_dealloc(void *chunk, size_t size);
bool chunk_boot(void);

View File

@ -16,7 +16,7 @@
*/
extern malloc_mutex_t dss_mtx;
void *chunk_alloc_dss(size_t size, bool zero);
void *chunk_alloc_dss(size_t size, bool *zero);
bool chunk_dealloc_dss(void *chunk, size_t size);
bool chunk_dss_boot(void);

View File

@ -16,7 +16,7 @@ extern bool swap_enabled;
extern size_t swap_avail;
#endif
void *chunk_alloc_swap(size_t size, bool zero);
void *chunk_alloc_swap(size_t size, bool *zero);
bool chunk_dealloc_swap(void *chunk, size_t size);
bool chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed);
bool chunk_swap_boot(void);

View File

@ -586,7 +586,11 @@ arena_chunk_alloc(arena_t *arena)
chunk = arena->spare;
arena->spare = NULL;
} else {
chunk = (arena_chunk_t *)chunk_alloc(chunksize, true);
bool zero;
size_t zeroed;
zero = false;
chunk = (arena_chunk_t *)chunk_alloc(chunksize, &zero);
if (chunk == NULL)
return (NULL);
#ifdef JEMALLOC_STATS
@ -604,15 +608,16 @@ arena_chunk_alloc(arena_t *arena)
/*
* Initialize the map to contain one maximal free untouched run.
* Mark the pages as zeroed iff chunk_alloc() returned a zeroed
* chunk.
*/
zeroed = zero ? CHUNK_MAP_ZEROED : 0;
for (i = 0; i < arena_chunk_header_npages; i++)
chunk->map[i].bits = 0;
chunk->map[i].bits = arena_maxclass | CHUNK_MAP_ZEROED;
for (i++; i < chunk_npages-1; i++) {
chunk->map[i].bits = CHUNK_MAP_ZEROED;
}
chunk->map[chunk_npages-1].bits = arena_maxclass |
CHUNK_MAP_ZEROED;
chunk->map[i].bits = arena_maxclass | zeroed;
for (i++; i < chunk_npages-1; i++)
chunk->map[i].bits = zeroed;
chunk->map[chunk_npages-1].bits = arena_maxclass | zeroed;
}
/* Insert the run into the runs_avail tree. */

View File

@ -27,10 +27,12 @@ static bool
base_pages_alloc(size_t minsize)
{
size_t csize;
bool zero;
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
base_pages = chunk_alloc(csize, false);
zero = false;
base_pages = chunk_alloc(csize, &zero);
if (base_pages == NULL)
return (true);
base_next_addr = base_pages;

View File

@ -22,8 +22,14 @@ size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
/*
* If the caller specifies (*zero == false), it is still possible to receive
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
* takes advantage of this to avoid demanding zeroed chunks, but taking
* advantage of them if they are returned.
*/
void *
chunk_alloc(size_t size, bool zero)
chunk_alloc(size_t size, bool *zero)
{
void *ret;
@ -45,8 +51,10 @@ chunk_alloc(size_t size, bool zero)
goto RETURN;
#endif
ret = chunk_alloc_mmap(size);
if (ret != NULL)
if (ret != NULL) {
*zero = true;
goto RETURN;
}
#ifdef JEMALLOC_SWAP
}
#endif

View File

@ -25,13 +25,13 @@ static extent_tree_t dss_chunks_ad;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *chunk_recycle_dss(size_t size, bool zero);
static void *chunk_recycle_dss(size_t size, bool *zero);
static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
/******************************************************************************/
static void *
chunk_recycle_dss(size_t size, bool zero)
chunk_recycle_dss(size_t size, bool *zero)
{
extent_node_t *node, key;
@ -60,7 +60,7 @@ chunk_recycle_dss(size_t size, bool zero)
}
malloc_mutex_unlock(&dss_mtx);
if (zero)
if (*zero)
memset(ret, 0, size);
return (ret);
}
@ -70,7 +70,7 @@ chunk_recycle_dss(size_t size, bool zero)
}
void *
chunk_alloc_dss(size_t size, bool zero)
chunk_alloc_dss(size_t size, bool *zero)
{
void *ret;
@ -116,6 +116,7 @@ chunk_alloc_dss(size_t size, bool zero)
/* Success. */
dss_max = (void *)((intptr_t)dss_prev + incr);
malloc_mutex_unlock(&dss_mtx);
*zero = true;
return (ret);
}
} while (dss_prev != (void *)-1);

View File

@ -31,13 +31,13 @@ static extent_tree_t swap_chunks_ad;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *chunk_recycle_swap(size_t size, bool zero);
static void *chunk_recycle_swap(size_t size, bool *zero);
static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size);
/******************************************************************************/
static void *
chunk_recycle_swap(size_t size, bool zero)
chunk_recycle_swap(size_t size, bool *zero)
{
extent_node_t *node, key;
@ -69,7 +69,7 @@ chunk_recycle_swap(size_t size, bool zero)
#endif
malloc_mutex_unlock(&swap_mtx);
if (zero)
if (*zero)
memset(ret, 0, size);
return (ret);
}
@ -79,7 +79,7 @@ chunk_recycle_swap(size_t size, bool zero)
}
void *
chunk_alloc_swap(size_t size, bool zero)
chunk_alloc_swap(size_t size, bool *zero)
{
void *ret;
@ -98,7 +98,9 @@ chunk_alloc_swap(size_t size, bool zero)
#endif
malloc_mutex_unlock(&swap_mtx);
if (zero && swap_prezeroed == false)
if (swap_prezeroed)
*zero = true;
else if (*zero)
memset(ret, 0, size);
} else {
malloc_mutex_unlock(&swap_mtx);

View File

@ -37,7 +37,7 @@ huge_malloc(size_t size, bool zero)
if (node == NULL)
return (NULL);
ret = chunk_alloc(csize, zero);
ret = chunk_alloc(csize, &zero);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@ -74,6 +74,7 @@ huge_palloc(size_t alignment, size_t size)
void *ret;
size_t alloc_size, chunk_size, offset;
extent_node_t *node;
bool zero;
/*
* This allocation requires alignment that is even larger than chunk
@ -97,7 +98,8 @@ huge_palloc(size_t alignment, size_t size)
if (node == NULL)
return (NULL);
ret = chunk_alloc(alloc_size, false);
zero = false;
ret = chunk_alloc(alloc_size, &zero);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);