teach the dss chunk allocator to handle new_addr
This provides in-place expansion of huge allocations when the end of the allocation is at the end of the sbrk heap. There's already the ability to extend in-place via recycled chunks but this handles the initial growth of the heap via repeated vector / string reallocations. A possible future extension could allow realloc to go from the following: | huge allocation | recycled chunks | ^ dss_end To a larger allocation built from recycled *and* new chunks: | huge allocation | ^ dss_end Doing that would involve teaching the chunk recycling code to request new chunks to satisfy the request. The chunk_dss code wouldn't require any further changes. #include <stdlib.h> int main(void) { size_t chunk = 4 * 1024 * 1024; void *ptr = NULL; for (size_t size = chunk; size < chunk * 128; size *= 2) { ptr = realloc(ptr, size); if (!ptr) return 1; } } dss:secondary: 0.083s dss:primary: 0.083s After: dss:secondary: 0.083s dss:primary: 0.003s The dss heap grows in the upwards direction, so the oldest chunks are at the low addresses and they are used first. Linux prefers to grow the mmap heap downwards, so the trick will not work in the *current* mmap chunk allocator as a huge allocation will only be at the top of the heap in a contrived case.
This commit is contained in:
parent
a2136025c4
commit
879e76a9e5
@ -23,7 +23,8 @@ extern const char *dss_prec_names[];
|
|||||||
|
|
||||||
dss_prec_t chunk_dss_prec_get(void);
|
dss_prec_t chunk_dss_prec_get(void);
|
||||||
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
||||||
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
|
void *chunk_alloc_dss(void *new_addr, size_t size, size_t alignment,
|
||||||
|
bool *zero);
|
||||||
bool chunk_in_dss(void *chunk);
|
bool chunk_in_dss(void *chunk);
|
||||||
bool chunk_dss_boot(void);
|
bool chunk_dss_boot(void);
|
||||||
void chunk_dss_prefork(void);
|
void chunk_dss_prefork(void);
|
||||||
|
12
src/chunk.c
12
src/chunk.c
@ -154,16 +154,15 @@ chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
|
|||||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
|
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
|
||||||
new_addr, size, alignment, base, zero)) != NULL)
|
new_addr, size, alignment, base, zero)) != NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
/* requesting an address only implemented for recycle */
|
if ((ret = chunk_alloc_dss(new_addr, size, alignment, zero))
|
||||||
if (new_addr == NULL
|
!= NULL)
|
||||||
&& (ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
/* mmap. */
|
/* mmap. */
|
||||||
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, new_addr,
|
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, new_addr,
|
||||||
size, alignment, base, zero)) != NULL)
|
size, alignment, base, zero)) != NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
/* requesting an address only implemented for recycle */
|
/* requesting an address not implemented for chunk_alloc_mmap */
|
||||||
if (new_addr == NULL &&
|
if (new_addr == NULL &&
|
||||||
(ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
|
(ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -172,9 +171,8 @@ chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
|
|||||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
|
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
|
||||||
new_addr, size, alignment, base, zero)) != NULL)
|
new_addr, size, alignment, base, zero)) != NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
/* requesting an address only implemented for recycle */
|
if ((ret = chunk_alloc_dss(new_addr, size, alignment, zero))
|
||||||
if (new_addr == NULL &&
|
!= NULL)
|
||||||
(ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
chunk_alloc_dss(void *new_addr, size_t size, size_t alignment, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
@ -93,8 +93,17 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
|||||||
* malloc.
|
* malloc.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
|
/* Avoid an unnecessary system call. */
|
||||||
|
if (new_addr != NULL && dss_max != new_addr)
|
||||||
|
break;
|
||||||
|
|
||||||
/* Get the current end of the DSS. */
|
/* Get the current end of the DSS. */
|
||||||
dss_max = chunk_dss_sbrk(0);
|
dss_max = chunk_dss_sbrk(0);
|
||||||
|
|
||||||
|
/* Make sure the earlier condition still holds. */
|
||||||
|
if (new_addr != NULL && dss_max != new_addr)
|
||||||
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate how much padding is necessary to
|
* Calculate how much padding is necessary to
|
||||||
* chunk-align the end of the DSS.
|
* chunk-align the end of the DSS.
|
||||||
|
Loading…
Reference in New Issue
Block a user