Optimize malloc() and free() fast paths.
Embed the bin index for small page runs into the chunk page map, in order to omit [...] in the following dependent load sequence: ptr-->mapelm-->[run-->bin-->]bin_info Move various non-critcal code out of the inlined function chain into helper functions (tcache_event_hard(), arena_dalloc_small(), and locking).
This commit is contained in:
parent
fd97b1dfc7
commit
203484e2ea
@ -109,7 +109,8 @@ struct arena_chunk_map_s {
|
||||
*
|
||||
* p : run page offset
|
||||
* s : run size
|
||||
* c : (binind+1) for size class (used only if prof_promote is true)
|
||||
* n : binind for size class; large objects set these to BININD_INVALID
|
||||
* except for promoted allocations (see prof_promote)
|
||||
* x : don't care
|
||||
* - : 0
|
||||
* + : 1
|
||||
@ -117,35 +118,38 @@ struct arena_chunk_map_s {
|
||||
* [dula] : bit unset
|
||||
*
|
||||
* Unallocated (clean):
|
||||
* ssssssss ssssssss ssss---- ----du-a
|
||||
* xxxxxxxx xxxxxxxx xxxx---- -----Uxx
|
||||
* ssssssss ssssssss ssss---- ----dU-a
|
||||
* ssssssss ssssssss ssss1111 1111du-a
|
||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
|
||||
* ssssssss ssssssss ssss1111 1111dU-a
|
||||
*
|
||||
* Unallocated (dirty):
|
||||
* ssssssss ssssssss ssss---- ----D--a
|
||||
* xxxxxxxx xxxxxxxx xxxx---- ----xxxx
|
||||
* ssssssss ssssssss ssss---- ----D--a
|
||||
* ssssssss ssssssss ssss1111 1111D--a
|
||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
||||
* ssssssss ssssssss ssss1111 1111D--a
|
||||
*
|
||||
* Small:
|
||||
* pppppppp pppppppp pppp---- ----d--A
|
||||
* pppppppp pppppppp pppp---- -------A
|
||||
* pppppppp pppppppp pppp---- ----d--A
|
||||
* pppppppp pppppppp ppppnnnn nnnnd--A
|
||||
* pppppppp pppppppp ppppnnnn nnnn---A
|
||||
* pppppppp pppppppp ppppnnnn nnnnd--A
|
||||
*
|
||||
* Large:
|
||||
* ssssssss ssssssss ssss---- ----D-LA
|
||||
* xxxxxxxx xxxxxxxx xxxx---- ----xxxx
|
||||
* -------- -------- -------- ----D-LA
|
||||
* ssssssss ssssssss ssss1111 1111D-LA
|
||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
||||
* -------- -------- ----1111 1111D-LA
|
||||
*
|
||||
* Large (sampled, size <= PAGE):
|
||||
* ssssssss ssssssss sssscccc ccccD-LA
|
||||
* ssssssss ssssssss ssssnnnn nnnnD-LA
|
||||
*
|
||||
* Large (not sampled, size == PAGE):
|
||||
* ssssssss ssssssss ssss---- ----D-LA
|
||||
* ssssssss ssssssss ssss1111 1111D-LA
|
||||
*/
|
||||
size_t bits;
|
||||
#define CHUNK_MAP_CLASS_SHIFT 4
|
||||
#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U)
|
||||
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU)
|
||||
#define CHUNK_MAP_BININD_SHIFT 4
|
||||
#define BININD_INVALID ((size_t)0xffU)
|
||||
/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
|
||||
#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U)
|
||||
#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
|
||||
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU)
|
||||
#define CHUNK_MAP_DIRTY ((size_t)0x8U)
|
||||
#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
|
||||
#define CHUNK_MAP_LARGE ((size_t)0x2U)
|
||||
@ -409,8 +413,14 @@ void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
|
||||
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
|
||||
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
|
||||
void arena_prof_promoted(const void *ptr, size_t size);
|
||||
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
arena_chunk_map_t *mapelm);
|
||||
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t pageind, arena_chunk_map_t *mapelm);
|
||||
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t pageind);
|
||||
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
|
||||
void *ptr);
|
||||
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
||||
void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
|
||||
arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
||||
@ -430,6 +440,30 @@ void arena_postfork_child(arena_t *arena);
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
|
||||
size_t pageind);
|
||||
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
|
||||
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t size, size_t flags);
|
||||
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t size);
|
||||
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t size, size_t flags);
|
||||
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t binind);
|
||||
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t runind, size_t binind, size_t flags);
|
||||
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t unzeroed);
|
||||
size_t arena_ptr_binind(const void *ptr, size_t mapbits);
|
||||
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
||||
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
||||
const void *ptr);
|
||||
@ -442,6 +476,203 @@ void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
||||
# ifdef JEMALLOC_ARENA_INLINE_A
|
||||
JEMALLOC_INLINE arena_chunk_map_t *
|
||||
arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
|
||||
assert(pageind >= map_bias);
|
||||
assert(pageind < chunk_npages);
|
||||
|
||||
return (&chunk->map[pageind-map_bias]);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t *
|
||||
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
|
||||
return (&arena_mapp_get(chunk, pageind)->bits);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
|
||||
return (*arena_mapbitsp_get(chunk, pageind));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
size_t mapbits;
|
||||
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
||||
return (mapbits & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
size_t mapbits;
|
||||
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
|
||||
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
|
||||
return (mapbits & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
size_t mapbits;
|
||||
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
|
||||
CHUNK_MAP_ALLOCATED);
|
||||
return (mapbits >> LG_PAGE);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
size_t mapbits;
|
||||
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
return (mapbits & CHUNK_MAP_DIRTY);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
size_t mapbits;
|
||||
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
return (mapbits & CHUNK_MAP_UNZEROED);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
size_t mapbits;
|
||||
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
return (mapbits & CHUNK_MAP_LARGE);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
size_t mapbits;
|
||||
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
return (mapbits & CHUNK_MAP_ALLOCATED);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
||||
size_t flags)
|
||||
{
|
||||
size_t *mapbitsp;
|
||||
|
||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
||||
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t size)
|
||||
{
|
||||
size_t *mapbitsp;
|
||||
|
||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
||||
*mapbitsp = size | (*mapbitsp & PAGE_MASK);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
||||
size_t flags)
|
||||
{
|
||||
size_t *mapbitsp;
|
||||
|
||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
||||
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
|
||||
CHUNK_MAP_ALLOCATED;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t binind)
|
||||
{
|
||||
size_t *mapbitsp;
|
||||
|
||||
assert(binind <= BININD_INVALID);
|
||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
|
||||
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
|
||||
CHUNK_MAP_BININD_SHIFT);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
|
||||
size_t binind, size_t flags)
|
||||
{
|
||||
size_t *mapbitsp;
|
||||
|
||||
assert(binind < BININD_INVALID);
|
||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
assert(pageind - runind >= map_bias);
|
||||
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
||||
*mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
|
||||
flags | CHUNK_MAP_ALLOCATED;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t unzeroed)
|
||||
{
|
||||
size_t *mapbitsp;
|
||||
|
||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_ptr_binind(const void *ptr, size_t mapbits)
|
||||
{
|
||||
size_t binind;
|
||||
|
||||
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
||||
|
||||
if (config_debug) {
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena_t *arena = chunk->arena;
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
size_t actual_mapbits = arena_mapbits_get(chunk, pageind);
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (actual_mapbits >> LG_PAGE)) <<
|
||||
LG_PAGE)); arena_bin_t *bin = run->bin;
|
||||
size_t actual_binind = bin - arena->bins;
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[actual_binind];
|
||||
|
||||
assert(mapbits == actual_mapbits);
|
||||
assert(binind == actual_binind);
|
||||
assert(((uintptr_t)ptr - ((uintptr_t)run +
|
||||
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
|
||||
== 0);
|
||||
}
|
||||
|
||||
return (binind);
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_A */
|
||||
|
||||
# ifdef JEMALLOC_ARENA_INLINE_B
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
||||
{
|
||||
@ -535,7 +766,7 @@ arena_prof_ctx_get(const void *ptr)
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapbits = chunk->map[pageind-map_bias].bits;
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
if (prof_promote)
|
||||
@ -544,7 +775,7 @@ arena_prof_ctx_get(const void *ptr)
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
size_t binind = arena_bin_index(chunk->arena, run->bin);
|
||||
size_t binind = arena_ptr_binind(ptr, mapbits);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
unsigned regind;
|
||||
|
||||
@ -554,7 +785,7 @@ arena_prof_ctx_get(const void *ptr)
|
||||
sizeof(prof_ctx_t *)));
|
||||
}
|
||||
} else
|
||||
ret = chunk->map[pageind-map_bias].prof_ctx;
|
||||
ret = arena_mapp_get(chunk, pageind)->prof_ctx;
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@ -571,19 +802,18 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapbits = chunk->map[pageind-map_bias].bits;
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
if (prof_promote == false) {
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
arena_bin_t *bin = run->bin;
|
||||
size_t binind;
|
||||
arena_bin_info_t *bin_info;
|
||||
unsigned regind;
|
||||
|
||||
binind = arena_bin_index(chunk->arena, bin);
|
||||
binind = arena_ptr_binind(ptr, mapbits);
|
||||
bin_info = &arena_bin_info[binind];
|
||||
regind = arena_run_regind(run, bin_info, ptr);
|
||||
|
||||
@ -592,7 +822,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
||||
} else
|
||||
assert((uintptr_t)ctx == (uintptr_t)1U);
|
||||
} else
|
||||
chunk->map[pageind-map_bias].prof_ctx = ctx;
|
||||
arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
@ -638,26 +868,24 @@ arena_salloc(const void *ptr, bool demote)
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapbits = chunk->map[pageind-map_bias].bits;
|
||||
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
|
||||
size_t binind = arena_bin_index(chunk->arena, run->bin);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
assert(((uintptr_t)ptr - ((uintptr_t)run +
|
||||
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
|
||||
== 0);
|
||||
ret = bin_info->reg_size;
|
||||
size_t binind = arena_ptr_binind(ptr, mapbits);
|
||||
ret = arena_bin_info[binind].reg_size;
|
||||
} else {
|
||||
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
||||
ret = mapbits & ~PAGE_MASK;
|
||||
ret = arena_mapbits_large_size_get(chunk, pageind);
|
||||
if (config_prof && demote && prof_promote && ret == PAGE &&
|
||||
(mapbits & CHUNK_MAP_CLASS_MASK) != 0) {
|
||||
size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
|
||||
CHUNK_MAP_CLASS_SHIFT) - 1;
|
||||
(mapbits & CHUNK_MAP_BININD_MASK) !=
|
||||
CHUNK_MAP_BININD_MASK) {
|
||||
size_t binind = ((mapbits & CHUNK_MAP_BININD_MASK) >>
|
||||
CHUNK_MAP_BININD_SHIFT);
|
||||
assert(binind < NBINS);
|
||||
ret = arena_bin_info[binind].reg_size;
|
||||
} else {
|
||||
assert(demote == false || (mapbits &
|
||||
CHUNK_MAP_BININD_MASK) == CHUNK_MAP_BININD_MASK);
|
||||
}
|
||||
assert(ret != 0);
|
||||
}
|
||||
@ -668,8 +896,7 @@ arena_salloc(const void *ptr, bool demote)
|
||||
JEMALLOC_INLINE void
|
||||
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
|
||||
{
|
||||
size_t pageind;
|
||||
arena_chunk_map_t *mapelm;
|
||||
size_t pageind, mapbits;
|
||||
tcache_t *tcache;
|
||||
|
||||
assert(arena != NULL);
|
||||
@ -678,47 +905,31 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapelm = &chunk->map[pageind-map_bias];
|
||||
assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
/* Small allocation. */
|
||||
if (try_tcache && (tcache = tcache_get(false)) != NULL)
|
||||
tcache_dalloc_small(tcache, ptr);
|
||||
else {
|
||||
arena_run_t *run;
|
||||
arena_bin_t *bin;
|
||||
if (try_tcache && (tcache = tcache_get(false)) != NULL) {
|
||||
size_t binind;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
bin = run->bin;
|
||||
if (config_debug) {
|
||||
size_t binind = arena_bin_index(arena, bin);
|
||||
UNUSED arena_bin_info_t *bin_info =
|
||||
&arena_bin_info[binind];
|
||||
assert(((uintptr_t)ptr - ((uintptr_t)run +
|
||||
(uintptr_t)bin_info->reg0_offset)) %
|
||||
bin_info->reg_interval == 0);
|
||||
}
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
arena_dalloc_bin(arena, chunk, ptr, mapelm);
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
}
|
||||
binind = arena_ptr_binind(ptr, mapbits);
|
||||
assert(binind < NBINS);
|
||||
tcache_dalloc_small(tcache, ptr, binind);
|
||||
} else
|
||||
arena_dalloc_small(arena, chunk, ptr, pageind);
|
||||
} else {
|
||||
size_t size = mapelm->bits & ~PAGE_MASK;
|
||||
size_t size = arena_mapbits_large_size_get(chunk, pageind);
|
||||
|
||||
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
||||
|
||||
if (try_tcache && size <= tcache_maxclass && (tcache =
|
||||
tcache_get(false)) != NULL) {
|
||||
tcache_dalloc_large(tcache, ptr, size);
|
||||
} else {
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
} else
|
||||
arena_dalloc_large(arena, chunk, ptr);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
|
@ -685,8 +685,17 @@ choose_arena(arena_t *arena)
|
||||
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/tcache.h"
|
||||
/*
|
||||
* Include arena.h twice in order to resolve circular dependencies with
|
||||
* tcache.h.
|
||||
*/
|
||||
#define JEMALLOC_ARENA_INLINE_A
|
||||
#include "jemalloc/internal/arena.h"
|
||||
#undef JEMALLOC_ARENA_INLINE_A
|
||||
#include "jemalloc/internal/tcache.h"
|
||||
#define JEMALLOC_ARENA_INLINE_B
|
||||
#include "jemalloc/internal/arena.h"
|
||||
#undef JEMALLOC_ARENA_INLINE_B
|
||||
#include "jemalloc/internal/hash.h"
|
||||
#include "jemalloc/internal/quarantine.h"
|
||||
|
||||
|
@ -7,11 +7,30 @@
|
||||
#define arena_boot JEMALLOC_N(arena_boot)
|
||||
#define arena_dalloc JEMALLOC_N(arena_dalloc)
|
||||
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
|
||||
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
|
||||
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
|
||||
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
|
||||
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
|
||||
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
|
||||
#define arena_malloc JEMALLOC_N(arena_malloc)
|
||||
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
|
||||
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
|
||||
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
|
||||
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
|
||||
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
|
||||
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
|
||||
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
|
||||
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
|
||||
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
|
||||
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
|
||||
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
|
||||
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
|
||||
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
|
||||
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
|
||||
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
|
||||
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
|
||||
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
|
||||
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
|
||||
#define arena_maxclass JEMALLOC_N(arena_maxclass)
|
||||
#define arena_new JEMALLOC_N(arena_new)
|
||||
#define arena_palloc JEMALLOC_N(arena_palloc)
|
||||
@ -22,6 +41,7 @@
|
||||
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
|
||||
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
|
||||
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
|
||||
#define arena_ptr_binind JEMALLOC_N(arena_ptr_binind)
|
||||
#define arena_purge_all JEMALLOC_N(arena_purge_all)
|
||||
#define arena_ralloc JEMALLOC_N(arena_ralloc)
|
||||
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
|
||||
@ -296,6 +316,7 @@
|
||||
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
|
||||
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
|
||||
#define tcache_event JEMALLOC_N(tcache_event)
|
||||
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
|
||||
#define tcache_initialized JEMALLOC_N(tcache_initialized)
|
||||
#define tcache_flush JEMALLOC_N(tcache_flush)
|
||||
#define tcache_get JEMALLOC_N(tcache_get)
|
||||
|
@ -101,6 +101,7 @@ extern size_t nhbins;
|
||||
extern size_t tcache_maxclass;
|
||||
|
||||
size_t tcache_salloc(const void *ptr);
|
||||
void tcache_event_hard(tcache_t *tcache);
|
||||
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
|
||||
size_t binind);
|
||||
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
@ -132,7 +133,7 @@ void tcache_enabled_set(bool enabled);
|
||||
void *tcache_alloc_easy(tcache_bin_t *tbin);
|
||||
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
|
||||
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
|
||||
void tcache_dalloc_small(tcache_t *tcache, void *ptr);
|
||||
void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
|
||||
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
|
||||
#endif
|
||||
|
||||
@ -266,47 +267,8 @@ tcache_event(tcache_t *tcache)
|
||||
|
||||
tcache->ev_cnt++;
|
||||
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
|
||||
if (tcache->ev_cnt == TCACHE_GC_INCR) {
|
||||
size_t binind = tcache->next_gc_bin;
|
||||
tcache_bin_t *tbin = &tcache->tbins[binind];
|
||||
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||
|
||||
if (tbin->low_water > 0) {
|
||||
/*
|
||||
* Flush (ceiling) 3/4 of the objects below the low
|
||||
* water mark.
|
||||
*/
|
||||
if (binind < NBINS) {
|
||||
tcache_bin_flush_small(tbin, binind,
|
||||
tbin->ncached - tbin->low_water +
|
||||
(tbin->low_water >> 2), tcache);
|
||||
} else {
|
||||
tcache_bin_flush_large(tbin, binind,
|
||||
tbin->ncached - tbin->low_water +
|
||||
(tbin->low_water >> 2), tcache);
|
||||
}
|
||||
/*
|
||||
* Reduce fill count by 2X. Limit lg_fill_div such that
|
||||
* the fill count is always at least 1.
|
||||
*/
|
||||
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
|
||||
>= 1)
|
||||
tbin->lg_fill_div++;
|
||||
} else if (tbin->low_water < 0) {
|
||||
/*
|
||||
* Increase fill count by 2X. Make sure lg_fill_div
|
||||
* stays greater than 0.
|
||||
*/
|
||||
if (tbin->lg_fill_div > 1)
|
||||
tbin->lg_fill_div--;
|
||||
}
|
||||
tbin->low_water = tbin->ncached;
|
||||
|
||||
tcache->next_gc_bin++;
|
||||
if (tcache->next_gc_bin == nhbins)
|
||||
tcache->next_gc_bin = 0;
|
||||
tcache->ev_cnt = 0;
|
||||
}
|
||||
if (tcache->ev_cnt == TCACHE_GC_INCR)
|
||||
tcache_event_hard(tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
@ -390,13 +352,13 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
} else {
|
||||
if (config_prof) {
|
||||
if (config_prof && prof_promote && size == PAGE) {
|
||||
arena_chunk_t *chunk =
|
||||
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
||||
LG_PAGE);
|
||||
chunk->map[pageind-map_bias].bits &=
|
||||
~CHUNK_MAP_CLASS_MASK;
|
||||
arena_mapbits_large_binind_set(chunk, pageind,
|
||||
BININD_INVALID);
|
||||
}
|
||||
if (zero == false) {
|
||||
if (config_fill) {
|
||||
@ -421,30 +383,13 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
tcache_dalloc_small(tcache_t *tcache, void *ptr)
|
||||
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
|
||||
{
|
||||
arena_t *arena;
|
||||
arena_chunk_t *chunk;
|
||||
arena_run_t *run;
|
||||
arena_bin_t *bin;
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
size_t pageind, binind;
|
||||
arena_chunk_map_t *mapelm;
|
||||
|
||||
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena = chunk->arena;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapelm = &chunk->map[pageind-map_bias];
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
||||
(mapelm->bits >> LG_PAGE)) << LG_PAGE));
|
||||
bin = run->bin;
|
||||
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
|
||||
sizeof(arena_bin_t);
|
||||
assert(binind < NBINS);
|
||||
|
||||
if (config_fill && opt_junk)
|
||||
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
||||
|
||||
|
483
src/arena.c
483
src/arena.c
@ -41,11 +41,11 @@ const uint8_t small_size2bin[] = {
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
|
||||
bool large, bool zero);
|
||||
bool large, size_t binind, bool zero);
|
||||
static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
|
||||
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
|
||||
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
|
||||
bool zero);
|
||||
size_t binind, bool zero);
|
||||
static void arena_purge(arena_t *arena, bool all);
|
||||
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
|
||||
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
|
||||
@ -152,7 +152,9 @@ static inline void
|
||||
arena_run_reg_dalloc(arena_run_t *run, void *ptr)
|
||||
{
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||
size_t binind = arena_bin_index(chunk->arena, run->bin);
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
size_t mapbits = arena_mapbits_get(chunk, pageind);
|
||||
size_t binind = arena_ptr_binind(ptr, mapbits);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
unsigned regind = arena_run_regind(run, bin_info, ptr);
|
||||
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
|
||||
@ -184,28 +186,31 @@ arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
|
||||
|
||||
static void
|
||||
arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
bool zero)
|
||||
size_t binind, bool zero)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
size_t run_ind, total_pages, need_pages, rem_pages, i;
|
||||
size_t flag_dirty;
|
||||
arena_avail_tree_t *runs_avail;
|
||||
|
||||
assert((large && binind == BININD_INVALID) || (large == false && binind
|
||||
!= BININD_INVALID));
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
|
||||
flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY;
|
||||
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
|
||||
runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
|
||||
&arena->runs_avail_clean;
|
||||
total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >>
|
||||
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
|
||||
LG_PAGE;
|
||||
assert((chunk->map[run_ind+total_pages-1-map_bias].bits &
|
||||
CHUNK_MAP_DIRTY) == flag_dirty);
|
||||
assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
|
||||
flag_dirty);
|
||||
need_pages = (size >> LG_PAGE);
|
||||
assert(need_pages > 0);
|
||||
assert(need_pages <= total_pages);
|
||||
rem_pages = total_pages - need_pages;
|
||||
|
||||
arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
|
||||
arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, run_ind));
|
||||
if (config_stats) {
|
||||
/*
|
||||
* Update stats_cactive if nactive is crossing a chunk
|
||||
@ -222,22 +227,23 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
/* Keep track of trailing unused pages for later use. */
|
||||
if (rem_pages > 0) {
|
||||
if (flag_dirty != 0) {
|
||||
chunk->map[run_ind+need_pages-map_bias].bits =
|
||||
(rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
|
||||
chunk->map[run_ind+total_pages-1-map_bias].bits =
|
||||
(rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
|
||||
arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
|
||||
(rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
|
||||
arena_mapbits_unallocated_set(chunk,
|
||||
run_ind+total_pages-1, (rem_pages << LG_PAGE),
|
||||
CHUNK_MAP_DIRTY);
|
||||
} else {
|
||||
chunk->map[run_ind+need_pages-map_bias].bits =
|
||||
(rem_pages << LG_PAGE) |
|
||||
(chunk->map[run_ind+need_pages-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED);
|
||||
chunk->map[run_ind+total_pages-1-map_bias].bits =
|
||||
(rem_pages << LG_PAGE) |
|
||||
(chunk->map[run_ind+total_pages-1-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED);
|
||||
arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
|
||||
(rem_pages << LG_PAGE),
|
||||
arena_mapbits_unzeroed_get(chunk,
|
||||
run_ind+need_pages));
|
||||
arena_mapbits_unallocated_set(chunk,
|
||||
run_ind+total_pages-1, (rem_pages << LG_PAGE),
|
||||
arena_mapbits_unzeroed_get(chunk,
|
||||
run_ind+total_pages-1));
|
||||
}
|
||||
arena_avail_tree_insert(runs_avail,
|
||||
&chunk->map[run_ind+need_pages-map_bias]);
|
||||
arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
|
||||
run_ind+need_pages));
|
||||
}
|
||||
|
||||
/* Update dirty page accounting. */
|
||||
@ -258,8 +264,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
* zeroed (i.e. never before touched).
|
||||
*/
|
||||
for (i = 0; i < need_pages; i++) {
|
||||
if ((chunk->map[run_ind+i-map_bias].bits
|
||||
& CHUNK_MAP_UNZEROED) != 0) {
|
||||
if (arena_mapbits_unzeroed_get(chunk,
|
||||
run_ind+i) != 0) {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(
|
||||
(void *)((uintptr_t)
|
||||
chunk + ((run_ind+i) <<
|
||||
@ -293,10 +299,9 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
* Set the last element first, in case the run only contains one
|
||||
* page (i.e. both statements set the same element).
|
||||
*/
|
||||
chunk->map[run_ind+need_pages-1-map_bias].bits =
|
||||
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty;
|
||||
chunk->map[run_ind-map_bias].bits = size | flag_dirty |
|
||||
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
|
||||
flag_dirty);
|
||||
arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
|
||||
} else {
|
||||
assert(zero == false);
|
||||
/*
|
||||
@ -304,34 +309,30 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
* small run, so that arena_dalloc_bin_run() has the ability to
|
||||
* conditionally trim clean pages.
|
||||
*/
|
||||
chunk->map[run_ind-map_bias].bits =
|
||||
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
|
||||
CHUNK_MAP_ALLOCATED | flag_dirty;
|
||||
arena_mapbits_small_set(chunk, run_ind, 0, binind,
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind) | flag_dirty);
|
||||
/*
|
||||
* The first page will always be dirtied during small run
|
||||
* initialization, so a validation failure here would not
|
||||
* actually cause an observable failure.
|
||||
*/
|
||||
if (config_debug && flag_dirty == 0 &&
|
||||
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
|
||||
== 0)
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
|
||||
arena_chunk_validate_zeroed(chunk, run_ind);
|
||||
for (i = 1; i < need_pages - 1; i++) {
|
||||
chunk->map[run_ind+i-map_bias].bits = (i << LG_PAGE)
|
||||
| (chunk->map[run_ind+i-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
|
||||
arena_mapbits_small_set(chunk, run_ind+i, i,
|
||||
binind, arena_mapbits_unzeroed_get(chunk,
|
||||
run_ind+i));
|
||||
if (config_debug && flag_dirty == 0 &&
|
||||
(chunk->map[run_ind+i-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) == 0)
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
|
||||
arena_chunk_validate_zeroed(chunk, run_ind+i);
|
||||
}
|
||||
chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
|
||||
- 1) << LG_PAGE) |
|
||||
(chunk->map[run_ind+need_pages-1-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
|
||||
arena_mapbits_small_set(chunk, run_ind+need_pages-1,
|
||||
need_pages-1, binind, arena_mapbits_unzeroed_get(chunk,
|
||||
run_ind+need_pages-1) | flag_dirty);
|
||||
if (config_debug && flag_dirty == 0 &&
|
||||
(chunk->map[run_ind+need_pages-1-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) == 0) {
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
|
||||
0) {
|
||||
arena_chunk_validate_zeroed(chunk,
|
||||
run_ind+need_pages-1);
|
||||
}
|
||||
@ -351,17 +352,18 @@ arena_chunk_alloc(arena_t *arena)
|
||||
arena->spare = NULL;
|
||||
|
||||
/* Insert the run into the appropriate runs_avail_* tree. */
|
||||
if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
|
||||
if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
|
||||
runs_avail = &arena->runs_avail_clean;
|
||||
else
|
||||
runs_avail = &arena->runs_avail_dirty;
|
||||
assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass);
|
||||
assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK)
|
||||
== arena_maxclass);
|
||||
assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) ==
|
||||
(chunk->map[chunk_npages-1-map_bias].bits &
|
||||
CHUNK_MAP_DIRTY));
|
||||
arena_avail_tree_insert(runs_avail, &chunk->map[0]);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
|
||||
arena_maxclass);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk,
|
||||
chunk_npages-1) == arena_maxclass);
|
||||
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
|
||||
arena_mapbits_dirty_get(chunk, chunk_npages-1));
|
||||
arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
|
||||
map_bias));
|
||||
} else {
|
||||
bool zero;
|
||||
size_t unzeroed;
|
||||
@ -392,24 +394,27 @@ arena_chunk_alloc(arena_t *arena)
|
||||
* chunk.
|
||||
*/
|
||||
unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
|
||||
chunk->map[0].bits = arena_maxclass | unzeroed;
|
||||
arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
|
||||
unzeroed);
|
||||
/*
|
||||
* There is no need to initialize the internal page map entries
|
||||
* unless the chunk is not zeroed.
|
||||
*/
|
||||
if (zero == false) {
|
||||
for (i = map_bias+1; i < chunk_npages-1; i++)
|
||||
chunk->map[i-map_bias].bits = unzeroed;
|
||||
arena_mapbits_unzeroed_set(chunk, i, unzeroed);
|
||||
} else if (config_debug) {
|
||||
for (i = map_bias+1; i < chunk_npages-1; i++)
|
||||
assert(chunk->map[i-map_bias].bits == unzeroed);
|
||||
for (i = map_bias+1; i < chunk_npages-1; i++) {
|
||||
assert(arena_mapbits_unzeroed_get(chunk, i) ==
|
||||
unzeroed);
|
||||
}
|
||||
chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
|
||||
unzeroed;
|
||||
}
|
||||
arena_mapbits_unallocated_set(chunk, chunk_npages-1,
|
||||
arena_maxclass, unzeroed);
|
||||
|
||||
/* Insert the run into the runs_avail_clean tree. */
|
||||
arena_avail_tree_insert(&arena->runs_avail_clean,
|
||||
&chunk->map[0]);
|
||||
arena_mapp_get(chunk, map_bias));
|
||||
}
|
||||
|
||||
return (chunk);
|
||||
@ -424,11 +429,11 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
||||
* Remove run from the appropriate runs_avail_* tree, so that the arena
|
||||
* does not use it.
|
||||
*/
|
||||
if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
|
||||
if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
|
||||
runs_avail = &arena->runs_avail_clean;
|
||||
else
|
||||
runs_avail = &arena->runs_avail_dirty;
|
||||
arena_avail_tree_remove(runs_avail, &chunk->map[0]);
|
||||
arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, map_bias));
|
||||
|
||||
if (arena->spare != NULL) {
|
||||
arena_chunk_t *spare = arena->spare;
|
||||
@ -449,7 +454,8 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
||||
}
|
||||
|
||||
static arena_run_t *
|
||||
arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
|
||||
bool zero)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
arena_run_t *run;
|
||||
@ -457,6 +463,8 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
|
||||
assert(size <= arena_maxclass);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert((large && binind == BININD_INVALID) || (large == false && binind
|
||||
!= BININD_INVALID));
|
||||
|
||||
/* Search the arena's chunks for the lowest best fit. */
|
||||
key.bits = size | CHUNK_MAP_KEY;
|
||||
@ -469,7 +477,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
|
||||
LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, zero);
|
||||
arena_run_split(arena, run, size, large, binind, zero);
|
||||
return (run);
|
||||
}
|
||||
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
|
||||
@ -481,7 +489,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
|
||||
LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, zero);
|
||||
arena_run_split(arena, run, size, large, binind, zero);
|
||||
return (run);
|
||||
}
|
||||
|
||||
@ -491,7 +499,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
chunk = arena_chunk_alloc(arena);
|
||||
if (chunk != NULL) {
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, zero);
|
||||
arena_run_split(arena, run, size, large, binind, zero);
|
||||
return (run);
|
||||
}
|
||||
|
||||
@ -509,7 +517,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
|
||||
LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, zero);
|
||||
arena_run_split(arena, run, size, large, binind, zero);
|
||||
return (run);
|
||||
}
|
||||
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
|
||||
@ -521,7 +529,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
|
||||
LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, zero);
|
||||
arena_run_split(arena, run, size, large, binind, zero);
|
||||
return (run);
|
||||
}
|
||||
|
||||
@ -579,40 +587,38 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
* run.
|
||||
*/
|
||||
if (chunk == arena->spare) {
|
||||
assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0);
|
||||
assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
|
||||
arena_chunk_alloc(arena);
|
||||
}
|
||||
|
||||
/* Temporarily allocate all free dirty runs within chunk. */
|
||||
for (pageind = map_bias; pageind < chunk_npages;) {
|
||||
mapelm = &chunk->map[pageind-map_bias];
|
||||
if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) {
|
||||
mapelm = arena_mapp_get(chunk, pageind);
|
||||
if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
|
||||
size_t npages;
|
||||
|
||||
npages = mapelm->bits >> LG_PAGE;
|
||||
npages = arena_mapbits_unallocated_size_get(chunk,
|
||||
pageind) >> LG_PAGE;
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
if (mapelm->bits & CHUNK_MAP_DIRTY) {
|
||||
if (arena_mapbits_dirty_get(chunk, pageind)) {
|
||||
size_t i;
|
||||
|
||||
arena_avail_tree_remove(
|
||||
&arena->runs_avail_dirty, mapelm);
|
||||
|
||||
mapelm->bits = (npages << LG_PAGE) |
|
||||
flag_unzeroed | CHUNK_MAP_LARGE |
|
||||
CHUNK_MAP_ALLOCATED;
|
||||
arena_mapbits_large_set(chunk, pageind,
|
||||
(npages << LG_PAGE), flag_unzeroed);
|
||||
/*
|
||||
* Update internal elements in the page map, so
|
||||
* that CHUNK_MAP_UNZEROED is properly set.
|
||||
*/
|
||||
for (i = 1; i < npages - 1; i++) {
|
||||
chunk->map[pageind+i-map_bias].bits =
|
||||
flag_unzeroed;
|
||||
arena_mapbits_unzeroed_set(chunk,
|
||||
pageind+i, flag_unzeroed);
|
||||
}
|
||||
if (npages > 1) {
|
||||
chunk->map[
|
||||
pageind+npages-1-map_bias].bits =
|
||||
flag_unzeroed | CHUNK_MAP_LARGE |
|
||||
CHUNK_MAP_ALLOCATED;
|
||||
arena_mapbits_large_set(chunk,
|
||||
pageind+npages-1, 0, flag_unzeroed);
|
||||
}
|
||||
|
||||
if (config_stats) {
|
||||
@ -637,15 +643,17 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
pageind += npages;
|
||||
} else {
|
||||
/* Skip allocated run. */
|
||||
if (mapelm->bits & CHUNK_MAP_LARGE)
|
||||
pageind += mapelm->bits >> LG_PAGE;
|
||||
if (arena_mapbits_large_get(chunk, pageind))
|
||||
pageind += arena_mapbits_large_size_get(chunk,
|
||||
pageind) >> LG_PAGE;
|
||||
else {
|
||||
size_t binind;
|
||||
arena_bin_info_t *bin_info;
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)
|
||||
chunk + (uintptr_t)(pageind << LG_PAGE));
|
||||
|
||||
assert((mapelm->bits >> LG_PAGE) == 0);
|
||||
assert(arena_mapbits_small_runind_get(chunk,
|
||||
pageind) == 0);
|
||||
binind = arena_bin_index(arena, run->bin);
|
||||
bin_info = &arena_bin_info[binind];
|
||||
pageind += bin_info->run_size >> LG_PAGE;
|
||||
@ -669,7 +677,8 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
ql_foreach(mapelm, &mapelms, u.ql_link) {
|
||||
size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
|
||||
sizeof(arena_chunk_map_t)) + map_bias;
|
||||
size_t npages = mapelm->bits >> LG_PAGE;
|
||||
size_t npages = arena_mapbits_large_size_get(chunk, pageind) >>
|
||||
LG_PAGE;
|
||||
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
assert(ndirty >= npages);
|
||||
@ -806,15 +815,11 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
|
||||
assert(run_ind >= map_bias);
|
||||
assert(run_ind < chunk_npages);
|
||||
if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) {
|
||||
size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK;
|
||||
if (arena_mapbits_large_get(chunk, run_ind) != 0) {
|
||||
size = arena_mapbits_large_size_get(chunk, run_ind);
|
||||
assert(size == PAGE ||
|
||||
(chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
|
||||
~PAGE_MASK) == 0);
|
||||
assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
|
||||
CHUNK_MAP_LARGE) != 0);
|
||||
assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
|
||||
CHUNK_MAP_ALLOCATED) != 0);
|
||||
arena_mapbits_large_size_get(chunk,
|
||||
run_ind+(size>>LG_PAGE)-1) == 0);
|
||||
} else {
|
||||
size_t binind = arena_bin_index(arena, run->bin);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
@ -837,7 +842,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||
* The run is dirty if the caller claims to have dirtied it, as well as
|
||||
* if it was already dirty before being allocated.
|
||||
*/
|
||||
if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0)
|
||||
if (arena_mapbits_dirty_get(chunk, run_ind) != 0)
|
||||
dirty = true;
|
||||
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
|
||||
runs_avail = dirty ? &arena->runs_avail_dirty :
|
||||
@ -845,58 +850,52 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||
|
||||
/* Mark pages as unallocated in the chunk map. */
|
||||
if (dirty) {
|
||||
chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY;
|
||||
chunk->map[run_ind+run_pages-1-map_bias].bits = size |
|
||||
CHUNK_MAP_DIRTY;
|
||||
arena_mapbits_unallocated_set(chunk, run_ind, size,
|
||||
CHUNK_MAP_DIRTY);
|
||||
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
|
||||
CHUNK_MAP_DIRTY);
|
||||
|
||||
chunk->ndirty += run_pages;
|
||||
arena->ndirty += run_pages;
|
||||
} else {
|
||||
chunk->map[run_ind-map_bias].bits = size |
|
||||
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED);
|
||||
chunk->map[run_ind+run_pages-1-map_bias].bits = size |
|
||||
(chunk->map[run_ind+run_pages-1-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED);
|
||||
arena_mapbits_unallocated_set(chunk, run_ind, size,
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind));
|
||||
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
|
||||
}
|
||||
|
||||
/* Try to coalesce forward. */
|
||||
if (run_ind + run_pages < chunk_npages &&
|
||||
(chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED)
|
||||
== 0 && (chunk->map[run_ind+run_pages-map_bias].bits &
|
||||
CHUNK_MAP_DIRTY) == flag_dirty) {
|
||||
size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
|
||||
~PAGE_MASK;
|
||||
arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
|
||||
arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
|
||||
size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
|
||||
run_ind+run_pages);
|
||||
size_t nrun_pages = nrun_size >> LG_PAGE;
|
||||
|
||||
/*
|
||||
* Remove successor from runs_avail; the coalesced run is
|
||||
* inserted later.
|
||||
*/
|
||||
assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
|
||||
& ~PAGE_MASK) == nrun_size);
|
||||
assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
|
||||
& CHUNK_MAP_ALLOCATED) == 0);
|
||||
assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
|
||||
& CHUNK_MAP_DIRTY) == flag_dirty);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk,
|
||||
run_ind+run_pages+nrun_pages-1) == nrun_size);
|
||||
assert(arena_mapbits_dirty_get(chunk,
|
||||
run_ind+run_pages+nrun_pages-1) == flag_dirty);
|
||||
arena_avail_tree_remove(runs_avail,
|
||||
&chunk->map[run_ind+run_pages-map_bias]);
|
||||
arena_mapp_get(chunk, run_ind+run_pages));
|
||||
|
||||
size += nrun_size;
|
||||
run_pages += nrun_pages;
|
||||
|
||||
chunk->map[run_ind-map_bias].bits = size |
|
||||
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
|
||||
chunk->map[run_ind+run_pages-1-map_bias].bits = size |
|
||||
(chunk->map[run_ind+run_pages-1-map_bias].bits &
|
||||
CHUNK_MAP_FLAGS_MASK);
|
||||
arena_mapbits_unallocated_size_set(chunk, run_ind, size);
|
||||
arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
|
||||
size);
|
||||
}
|
||||
|
||||
/* Try to coalesce backward. */
|
||||
if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits &
|
||||
CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits &
|
||||
CHUNK_MAP_DIRTY) == flag_dirty) {
|
||||
size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
|
||||
~PAGE_MASK;
|
||||
if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
|
||||
== 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
|
||||
size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
|
||||
run_ind-1);
|
||||
size_t prun_pages = prun_size >> LG_PAGE;
|
||||
|
||||
run_ind -= prun_pages;
|
||||
@ -905,31 +904,26 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||
* Remove predecessor from runs_avail; the coalesced run is
|
||||
* inserted later.
|
||||
*/
|
||||
assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK)
|
||||
== prun_size);
|
||||
assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED)
|
||||
== 0);
|
||||
assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY)
|
||||
== flag_dirty);
|
||||
arena_avail_tree_remove(runs_avail,
|
||||
&chunk->map[run_ind-map_bias]);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
|
||||
prun_size);
|
||||
assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
|
||||
arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk,
|
||||
run_ind));
|
||||
|
||||
size += prun_size;
|
||||
run_pages += prun_pages;
|
||||
|
||||
chunk->map[run_ind-map_bias].bits = size |
|
||||
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
|
||||
chunk->map[run_ind+run_pages-1-map_bias].bits = size |
|
||||
(chunk->map[run_ind+run_pages-1-map_bias].bits &
|
||||
CHUNK_MAP_FLAGS_MASK);
|
||||
arena_mapbits_unallocated_size_set(chunk, run_ind, size);
|
||||
arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
|
||||
size);
|
||||
}
|
||||
|
||||
/* Insert into runs_avail, now that coalescing is complete. */
|
||||
assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) ==
|
||||
(chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK));
|
||||
assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) ==
|
||||
(chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY));
|
||||
arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
|
||||
arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
|
||||
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
|
||||
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
|
||||
arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, run_ind));
|
||||
|
||||
if (dirty) {
|
||||
/*
|
||||
@ -943,14 +937,15 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Deallocate chunk if it is now completely unused. The bit
|
||||
* manipulation checks whether the first run is unallocated and extends
|
||||
* to the end of the chunk.
|
||||
*/
|
||||
if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) ==
|
||||
arena_maxclass)
|
||||
/* Deallocate chunk if it is now completely unused. */
|
||||
if (size == arena_maxclass) {
|
||||
assert(run_ind == map_bias);
|
||||
assert(run_pages == (arena_maxclass >> LG_PAGE));
|
||||
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
|
||||
arena_maxclass);
|
||||
arena_chunk_dealloc(arena, chunk);
|
||||
}
|
||||
|
||||
/*
|
||||
* It is okay to do dirty page processing here even if the chunk was
|
||||
@ -969,7 +964,7 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
{
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
|
||||
size_t head_npages = (oldsize - newsize) >> LG_PAGE;
|
||||
size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY;
|
||||
size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
|
||||
|
||||
assert(oldsize > newsize);
|
||||
|
||||
@ -978,29 +973,21 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
* leading run as separately allocated. Set the last element of each
|
||||
* run first, in case of single-page runs.
|
||||
*/
|
||||
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
|
||||
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
|
||||
(chunk->map[pageind+head_npages-1-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
chunk->map[pageind-map_bias].bits = (oldsize - newsize)
|
||||
| flag_dirty | (chunk->map[pageind-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
|
||||
arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
|
||||
arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
|
||||
arena_mapbits_unzeroed_get(chunk, pageind));
|
||||
|
||||
if (config_debug) {
|
||||
UNUSED size_t tail_npages = newsize >> LG_PAGE;
|
||||
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
|
||||
.bits & ~PAGE_MASK) == 0);
|
||||
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
|
||||
.bits & CHUNK_MAP_DIRTY) == flag_dirty);
|
||||
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
|
||||
.bits & CHUNK_MAP_LARGE) != 0);
|
||||
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
|
||||
.bits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
assert(arena_mapbits_large_size_get(chunk,
|
||||
pageind+head_npages+tail_npages-1) == 0);
|
||||
assert(arena_mapbits_dirty_get(chunk,
|
||||
pageind+head_npages+tail_npages-1) == flag_dirty);
|
||||
}
|
||||
chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
|
||||
(chunk->map[pageind+head_npages-map_bias].bits &
|
||||
CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages, newsize, flag_dirty
|
||||
| arena_mapbits_unzeroed_get(chunk, pageind+head_npages));
|
||||
|
||||
arena_run_dalloc(arena, run, false);
|
||||
}
|
||||
@ -1011,9 +998,7 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
{
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
|
||||
size_t head_npages = newsize >> LG_PAGE;
|
||||
size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
|
||||
size_t flag_dirty = chunk->map[pageind-map_bias].bits &
|
||||
CHUNK_MAP_DIRTY;
|
||||
size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
|
||||
|
||||
assert(oldsize > newsize);
|
||||
|
||||
@ -1022,28 +1007,22 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
* trailing run as separately allocated. Set the last element of each
|
||||
* run first, in case of single-page runs.
|
||||
*/
|
||||
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
|
||||
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
|
||||
(chunk->map[pageind+head_npages-1-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
chunk->map[pageind-map_bias].bits = newsize | flag_dirty |
|
||||
(chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) |
|
||||
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
|
||||
arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
|
||||
arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
|
||||
arena_mapbits_unzeroed_get(chunk, pageind));
|
||||
|
||||
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
|
||||
~PAGE_MASK) == 0);
|
||||
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
|
||||
CHUNK_MAP_LARGE) != 0);
|
||||
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
|
||||
CHUNK_MAP_ALLOCATED) != 0);
|
||||
chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits =
|
||||
flag_dirty |
|
||||
(chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) |
|
||||
flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
if (config_debug) {
|
||||
UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
|
||||
assert(arena_mapbits_large_size_get(chunk,
|
||||
pageind+head_npages+tail_npages-1) == 0);
|
||||
assert(arena_mapbits_dirty_get(chunk,
|
||||
pageind+head_npages+tail_npages-1) == flag_dirty);
|
||||
}
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
|
||||
flag_dirty | arena_mapbits_unzeroed_get(chunk,
|
||||
pageind+head_npages));
|
||||
|
||||
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
|
||||
dirty);
|
||||
@ -1061,8 +1040,8 @@ arena_bin_runs_first(arena_bin_t *bin)
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
|
||||
pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
|
||||
sizeof(arena_chunk_map_t))) + map_bias;
|
||||
run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
||||
arena_mapbits_small_runind_get(chunk, pageind)) <<
|
||||
LG_PAGE));
|
||||
return (run);
|
||||
}
|
||||
@ -1075,7 +1054,7 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
|
||||
{
|
||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
|
||||
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
|
||||
|
||||
assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
|
||||
|
||||
@ -1087,7 +1066,7 @@ arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
|
||||
{
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
|
||||
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
|
||||
|
||||
assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
|
||||
|
||||
@ -1126,7 +1105,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
/******************************/
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
run = arena_run_alloc(arena, bin_info->run_size, false, false);
|
||||
run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
|
||||
if (run != NULL) {
|
||||
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
|
||||
(uintptr_t)bin_info->bitmap_offset);
|
||||
@ -1384,7 +1363,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
|
||||
/* Large allocation. */
|
||||
size = PAGE_CEILING(size);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
ret = (void *)arena_run_alloc(arena, size, true, zero);
|
||||
ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
|
||||
if (ret == NULL) {
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
return (NULL);
|
||||
@ -1428,7 +1407,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
|
||||
alloc_size = size + alignment - PAGE;
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
run = arena_run_alloc(arena, alloc_size, true, zero);
|
||||
run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
|
||||
if (run == NULL) {
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
return (NULL);
|
||||
@ -1485,8 +1464,7 @@ arena_prof_promoted(const void *ptr, size_t size)
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
binind = SMALL_SIZE2BIN(size);
|
||||
assert(binind < NBINS);
|
||||
chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
|
||||
~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
|
||||
arena_mapbits_large_binind_set(chunk, pageind, binind);
|
||||
|
||||
assert(isalloc(ptr, false) == PAGE);
|
||||
assert(isalloc(ptr, true) == size);
|
||||
@ -1524,8 +1502,9 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
size_t npages, run_ind, past;
|
||||
|
||||
assert(run != bin->runcur);
|
||||
assert(arena_run_tree_search(&bin->runs, &chunk->map[
|
||||
(((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)-map_bias]) == NULL);
|
||||
assert(arena_run_tree_search(&bin->runs,
|
||||
arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
|
||||
== NULL);
|
||||
|
||||
binind = arena_bin_index(chunk->arena, run->bin);
|
||||
bin_info = &arena_bin_info[binind];
|
||||
@ -1545,18 +1524,16 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
* trim the clean pages before deallocating the dirty portion of the
|
||||
* run.
|
||||
*/
|
||||
if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past
|
||||
- run_ind < npages) {
|
||||
if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
|
||||
npages) {
|
||||
/*
|
||||
* Trim clean pages. Convert to large run beforehand. Set the
|
||||
* last map element first, in case this is a one-page run.
|
||||
*/
|
||||
chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE |
|
||||
(chunk->map[run_ind+npages-1-map_bias].bits &
|
||||
CHUNK_MAP_FLAGS_MASK);
|
||||
chunk->map[run_ind-map_bias].bits = bin_info->run_size |
|
||||
CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
|
||||
CHUNK_MAP_FLAGS_MASK);
|
||||
arena_mapbits_large_set(chunk, run_ind+npages-1, 0,
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind+npages-1));
|
||||
arena_mapbits_large_set(chunk, run_ind, bin_info->run_size,
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind));
|
||||
arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
|
||||
((past - run_ind) << LG_PAGE), false);
|
||||
/* npages = past - run_ind; */
|
||||
@ -1591,7 +1568,7 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
}
|
||||
|
||||
void
|
||||
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
arena_chunk_map_t *mapelm)
|
||||
{
|
||||
size_t pageind;
|
||||
@ -1602,9 +1579,9 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
||||
(mapelm->bits >> LG_PAGE)) << LG_PAGE));
|
||||
arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
|
||||
bin = run->bin;
|
||||
binind = arena_bin_index(arena, bin);
|
||||
binind = arena_ptr_binind(ptr, mapelm->bits);
|
||||
bin_info = &arena_bin_info[binind];
|
||||
if (config_fill || config_stats)
|
||||
size = bin_info->reg_size;
|
||||
@ -1625,6 +1602,34 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t pageind, arena_chunk_map_t *mapelm)
|
||||
{
|
||||
arena_run_t *run;
|
||||
arena_bin_t *bin;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
||||
arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
|
||||
bin = run->bin;
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
}
|
||||
|
||||
void
|
||||
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t pageind)
|
||||
{
|
||||
arena_chunk_map_t *mapelm;
|
||||
|
||||
if (config_debug) {
|
||||
assert(arena_ptr_binind(ptr, arena_mapbits_get(chunk, pageind))
|
||||
!= BININD_INVALID);
|
||||
}
|
||||
mapelm = arena_mapp_get(chunk, pageind);
|
||||
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
|
||||
}
|
||||
void
|
||||
arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
|
||||
arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
||||
@ -1673,12 +1678,12 @@ arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
|
||||
}
|
||||
|
||||
void
|
||||
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
{
|
||||
|
||||
if (config_fill || config_stats) {
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
|
||||
size_t size = arena_mapbits_large_size_get(chunk, pageind);
|
||||
|
||||
if (config_fill && config_stats && opt_junk)
|
||||
memset(ptr, 0x5a, size);
|
||||
@ -1693,6 +1698,15 @@ arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
arena_run_dalloc(arena, (arena_run_t *)ptr, true);
|
||||
}
|
||||
|
||||
void
|
||||
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_dalloc_large_locked(arena, chunk, ptr);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t oldsize, size_t size)
|
||||
@ -1731,16 +1745,15 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t npages = oldsize >> LG_PAGE;
|
||||
size_t followsize;
|
||||
|
||||
assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK));
|
||||
assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
|
||||
|
||||
/* Try to extend the run. */
|
||||
assert(size + extra > oldsize);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if (pageind + npages < chunk_npages &&
|
||||
(chunk->map[pageind+npages-map_bias].bits
|
||||
& CHUNK_MAP_ALLOCATED) == 0 && (followsize =
|
||||
chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size -
|
||||
oldsize) {
|
||||
arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
|
||||
(followsize = arena_mapbits_unallocated_size_get(chunk,
|
||||
pageind+npages)) >= size - oldsize) {
|
||||
/*
|
||||
* The next run is available and sufficiently large. Split the
|
||||
* following run, then merge the first part with the existing
|
||||
@ -1750,7 +1763,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t splitsize = (oldsize + followsize <= size + extra)
|
||||
? followsize : size + extra - oldsize;
|
||||
arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
|
||||
((pageind+npages) << LG_PAGE)), splitsize, true, zero);
|
||||
((pageind+npages) << LG_PAGE)), splitsize, true,
|
||||
BININD_INVALID, zero);
|
||||
|
||||
size = oldsize + splitsize;
|
||||
npages = size >> LG_PAGE;
|
||||
@ -1763,29 +1777,22 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
* arena_run_dalloc() with the dirty argument set to false
|
||||
* (which is when dirty flag consistency would really matter).
|
||||
*/
|
||||
flag_dirty = (chunk->map[pageind-map_bias].bits &
|
||||
CHUNK_MAP_DIRTY) |
|
||||
(chunk->map[pageind+npages-1-map_bias].bits &
|
||||
CHUNK_MAP_DIRTY);
|
||||
chunk->map[pageind-map_bias].bits = size | flag_dirty
|
||||
| CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
|
||||
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
|
||||
arena_mapbits_dirty_get(chunk, pageind+npages-1);
|
||||
arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
|
||||
arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
|
||||
|
||||
if (config_stats) {
|
||||
arena->stats.ndalloc_large++;
|
||||
arena->stats.allocated_large -= oldsize;
|
||||
arena->stats.lstats[(oldsize >> LG_PAGE)
|
||||
- 1].ndalloc++;
|
||||
arena->stats.lstats[(oldsize >> LG_PAGE)
|
||||
- 1].curruns--;
|
||||
arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
|
||||
arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
|
||||
|
||||
arena->stats.nmalloc_large++;
|
||||
arena->stats.nrequests_large++;
|
||||
arena->stats.allocated_large += size;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
|
||||
arena->stats.lstats[(size >> LG_PAGE)
|
||||
- 1].nrequests++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
|
59
src/tcache.c
59
src/tcache.c
@ -24,6 +24,46 @@ size_t tcache_salloc(const void *ptr)
|
||||
return (arena_salloc(ptr, false));
|
||||
}
|
||||
|
||||
void
|
||||
tcache_event_hard(tcache_t *tcache)
|
||||
{
|
||||
size_t binind = tcache->next_gc_bin;
|
||||
tcache_bin_t *tbin = &tcache->tbins[binind];
|
||||
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||
|
||||
if (tbin->low_water > 0) {
|
||||
/*
|
||||
* Flush (ceiling) 3/4 of the objects below the low water mark.
|
||||
*/
|
||||
if (binind < NBINS) {
|
||||
tcache_bin_flush_small(tbin, binind, tbin->ncached -
|
||||
tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
} else {
|
||||
tcache_bin_flush_large(tbin, binind, tbin->ncached -
|
||||
tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
}
|
||||
/*
|
||||
* Reduce fill count by 2X. Limit lg_fill_div such that the
|
||||
* fill count is always at least 1.
|
||||
*/
|
||||
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
|
||||
tbin->lg_fill_div++;
|
||||
} else if (tbin->low_water < 0) {
|
||||
/*
|
||||
* Increase fill count by 2X. Make sure lg_fill_div stays
|
||||
* greater than 0.
|
||||
*/
|
||||
if (tbin->lg_fill_div > 1)
|
||||
tbin->lg_fill_div--;
|
||||
}
|
||||
tbin->low_water = tbin->ncached;
|
||||
|
||||
tcache->next_gc_bin++;
|
||||
if (tcache->next_gc_bin == nhbins)
|
||||
tcache->next_gc_bin = 0;
|
||||
tcache->ev_cnt = 0;
|
||||
}
|
||||
|
||||
void *
|
||||
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
|
||||
{
|
||||
@ -80,12 +120,13 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_t *mapelm =
|
||||
&chunk->map[pageind-map_bias];
|
||||
arena_mapp_get(chunk, pageind);
|
||||
if (config_fill && opt_junk) {
|
||||
arena_alloc_junk_small(ptr,
|
||||
&arena_bin_info[binind], true);
|
||||
}
|
||||
arena_dalloc_bin(arena, chunk, ptr, mapelm);
|
||||
arena_dalloc_bin_locked(arena, chunk, ptr,
|
||||
mapelm);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
@ -158,7 +199,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == arena)
|
||||
arena_dalloc_large(arena, chunk, ptr);
|
||||
arena_dalloc_large_locked(arena, chunk, ptr);
|
||||
else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
@ -314,22 +355,14 @@ tcache_destroy(tcache_t *tcache)
|
||||
arena_t *arena = chunk->arena;
|
||||
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
|
||||
LG_PAGE;
|
||||
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
arena_bin_t *bin = run->bin;
|
||||
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
|
||||
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
arena_dalloc_bin(arena, chunk, tcache, mapelm);
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
|
||||
} else if (tcache_size <= tcache_maxclass) {
|
||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
||||
arena_t *arena = chunk->arena;
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_dalloc_large(arena, chunk, tcache);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
} else
|
||||
idalloc(tcache);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user