Optimize malloc() and free() fast paths.

Embed the bin index for small page runs into the chunk page map, in
order to omit [...] in the following dependent load sequence:
  ptr-->mapelm-->[run-->bin-->]bin_info

Move various non-critcal code out of the inlined function chain into
helper functions (tcache_event_hard(), arena_dalloc_small(), and
locking).
This commit is contained in:
Jason Evans 2012-05-02 00:30:36 -07:00
parent fd97b1dfc7
commit 203484e2ea
7 changed files with 613 additions and 387 deletions

View File

@ -109,7 +109,8 @@ struct arena_chunk_map_s {
* *
* p : run page offset * p : run page offset
* s : run size * s : run size
* c : (binind+1) for size class (used only if prof_promote is true) * n : binind for size class; large objects set these to BININD_INVALID
* except for promoted allocations (see prof_promote)
* x : don't care * x : don't care
* - : 0 * - : 0
* + : 1 * + : 1
@ -117,35 +118,38 @@ struct arena_chunk_map_s {
* [dula] : bit unset * [dula] : bit unset
* *
* Unallocated (clean): * Unallocated (clean):
* ssssssss ssssssss ssss---- ----du-a * ssssssss ssssssss ssss1111 1111du-a
* xxxxxxxx xxxxxxxx xxxx---- -----Uxx * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
* ssssssss ssssssss ssss---- ----dU-a * ssssssss ssssssss ssss1111 1111dU-a
* *
* Unallocated (dirty): * Unallocated (dirty):
* ssssssss ssssssss ssss---- ----D--a * ssssssss ssssssss ssss1111 1111D--a
* xxxxxxxx xxxxxxxx xxxx---- ----xxxx * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
* ssssssss ssssssss ssss---- ----D--a * ssssssss ssssssss ssss1111 1111D--a
* *
* Small: * Small:
* pppppppp pppppppp pppp---- ----d--A * pppppppp pppppppp ppppnnnn nnnnd--A
* pppppppp pppppppp pppp---- -------A * pppppppp pppppppp ppppnnnn nnnn---A
* pppppppp pppppppp pppp---- ----d--A * pppppppp pppppppp ppppnnnn nnnnd--A
* *
* Large: * Large:
* ssssssss ssssssss ssss---- ----D-LA * ssssssss ssssssss ssss1111 1111D-LA
* xxxxxxxx xxxxxxxx xxxx---- ----xxxx * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
* -------- -------- -------- ----D-LA * -------- -------- ----1111 1111D-LA
* *
* Large (sampled, size <= PAGE): * Large (sampled, size <= PAGE):
* ssssssss ssssssss sssscccc ccccD-LA * ssssssss ssssssss ssssnnnn nnnnD-LA
* *
* Large (not sampled, size == PAGE): * Large (not sampled, size == PAGE):
* ssssssss ssssssss ssss---- ----D-LA * ssssssss ssssssss ssss1111 1111D-LA
*/ */
size_t bits; size_t bits;
#define CHUNK_MAP_CLASS_SHIFT 4 #define CHUNK_MAP_BININD_SHIFT 4
#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U) #define BININD_INVALID ((size_t)0xffU)
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU) /* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U)
#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU)
#define CHUNK_MAP_DIRTY ((size_t)0x8U) #define CHUNK_MAP_DIRTY ((size_t)0x8U)
#define CHUNK_MAP_UNZEROED ((size_t)0x4U) #define CHUNK_MAP_UNZEROED ((size_t)0x4U)
#define CHUNK_MAP_LARGE ((size_t)0x2U) #define CHUNK_MAP_LARGE ((size_t)0x2U)
@ -409,8 +413,14 @@ void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero); void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
void arena_prof_promoted(const void *ptr, size_t size); void arena_prof_promoted(const void *ptr, size_t size);
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm); arena_chunk_map_t *mapelm);
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm);
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind);
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats, arena_stats_t *astats, malloc_bin_stats_t *bstats,
@ -430,6 +440,30 @@ void arena_postfork_child(arena_t *arena);
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size);
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
size_t binind);
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
size_t runind, size_t binind, size_t flags);
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed);
size_t arena_ptr_binind(const void *ptr, size_t mapbits);
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr); const void *ptr);
@ -442,6 +476,203 @@ void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_INLINE arena_chunk_map_t *
arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
assert(pageind < chunk_npages);
return (&chunk->map[pageind-map_bias]);
}
JEMALLOC_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
{
return (&arena_mapp_get(chunk, pageind)->bits);
}
JEMALLOC_INLINE size_t
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
return (*arena_mapbitsp_get(chunk, pageind));
}
JEMALLOC_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
return (mapbits & ~PAGE_MASK);
}
JEMALLOC_INLINE size_t
arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
return (mapbits & ~PAGE_MASK);
}
JEMALLOC_INLINE size_t
arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
CHUNK_MAP_ALLOCATED);
return (mapbits >> LG_PAGE);
}
JEMALLOC_INLINE size_t
arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
return (mapbits & CHUNK_MAP_DIRTY);
}
JEMALLOC_INLINE size_t
arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
return (mapbits & CHUNK_MAP_UNZEROED);
}
JEMALLOC_INLINE size_t
arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
return (mapbits & CHUNK_MAP_LARGE);
}
JEMALLOC_INLINE size_t
arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
return (mapbits & CHUNK_MAP_ALLOCATED);
}
JEMALLOC_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp;
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
}
JEMALLOC_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
size_t *mapbitsp;
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
*mapbitsp = size | (*mapbitsp & PAGE_MASK);
}
JEMALLOC_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp;
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
}
JEMALLOC_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
size_t binind)
{
size_t *mapbitsp;
assert(binind <= BININD_INVALID);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
CHUNK_MAP_BININD_SHIFT);
}
JEMALLOC_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
size_t binind, size_t flags)
{
size_t *mapbitsp;
assert(binind < BININD_INVALID);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(pageind - runind >= map_bias);
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
*mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
flags | CHUNK_MAP_ALLOCATED;
}
JEMALLOC_INLINE void
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed)
{
size_t *mapbitsp;
mapbitsp = arena_mapbitsp_get(chunk, pageind);
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
}
JEMALLOC_INLINE size_t
arena_ptr_binind(const void *ptr, size_t mapbits)
{
size_t binind;
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
if (config_debug) {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t actual_mapbits = arena_mapbits_get(chunk, pageind);
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (actual_mapbits >> LG_PAGE)) <<
LG_PAGE)); arena_bin_t *bin = run->bin;
size_t actual_binind = bin - arena->bins;
arena_bin_info_t *bin_info = &arena_bin_info[actual_binind];
assert(mapbits == actual_mapbits);
assert(binind == actual_binind);
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
== 0);
}
return (binind);
}
# endif /* JEMALLOC_ARENA_INLINE_A */
# ifdef JEMALLOC_ARENA_INLINE_B
JEMALLOC_INLINE size_t JEMALLOC_INLINE size_t
arena_bin_index(arena_t *arena, arena_bin_t *bin) arena_bin_index(arena_t *arena, arena_bin_t *bin)
{ {
@ -535,7 +766,7 @@ arena_prof_ctx_get(const void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = chunk->map[pageind-map_bias].bits; mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) { if ((mapbits & CHUNK_MAP_LARGE) == 0) {
if (prof_promote) if (prof_promote)
@ -544,7 +775,7 @@ arena_prof_ctx_get(const void *ptr)
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) << (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
LG_PAGE)); LG_PAGE));
size_t binind = arena_bin_index(chunk->arena, run->bin); size_t binind = arena_ptr_binind(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind]; arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind; unsigned regind;
@ -554,7 +785,7 @@ arena_prof_ctx_get(const void *ptr)
sizeof(prof_ctx_t *))); sizeof(prof_ctx_t *)));
} }
} else } else
ret = chunk->map[pageind-map_bias].prof_ctx; ret = arena_mapp_get(chunk, pageind)->prof_ctx;
return (ret); return (ret);
} }
@ -571,19 +802,18 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = chunk->map[pageind-map_bias].bits; mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) { if ((mapbits & CHUNK_MAP_LARGE) == 0) {
if (prof_promote == false) { if (prof_promote == false) {
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) << (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
LG_PAGE)); LG_PAGE));
arena_bin_t *bin = run->bin;
size_t binind; size_t binind;
arena_bin_info_t *bin_info; arena_bin_info_t *bin_info;
unsigned regind; unsigned regind;
binind = arena_bin_index(chunk->arena, bin); binind = arena_ptr_binind(ptr, mapbits);
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
regind = arena_run_regind(run, bin_info, ptr); regind = arena_run_regind(run, bin_info, ptr);
@ -592,7 +822,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
} else } else
assert((uintptr_t)ctx == (uintptr_t)1U); assert((uintptr_t)ctx == (uintptr_t)1U);
} else } else
chunk->map[pageind-map_bias].prof_ctx = ctx; arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
} }
JEMALLOC_INLINE void * JEMALLOC_INLINE void *
@ -638,26 +868,24 @@ arena_salloc(const void *ptr, bool demote)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = chunk->map[pageind-map_bias].bits; assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); mapbits = arena_mapbits_get(chunk, pageind);
if ((mapbits & CHUNK_MAP_LARGE) == 0) { if ((mapbits & CHUNK_MAP_LARGE) == 0) {
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + size_t binind = arena_ptr_binind(ptr, mapbits);
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE)); ret = arena_bin_info[binind].reg_size;
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
== 0);
ret = bin_info->reg_size;
} else { } else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0); assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = mapbits & ~PAGE_MASK; ret = arena_mapbits_large_size_get(chunk, pageind);
if (config_prof && demote && prof_promote && ret == PAGE && if (config_prof && demote && prof_promote && ret == PAGE &&
(mapbits & CHUNK_MAP_CLASS_MASK) != 0) { (mapbits & CHUNK_MAP_BININD_MASK) !=
size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >> CHUNK_MAP_BININD_MASK) {
CHUNK_MAP_CLASS_SHIFT) - 1; size_t binind = ((mapbits & CHUNK_MAP_BININD_MASK) >>
CHUNK_MAP_BININD_SHIFT);
assert(binind < NBINS); assert(binind < NBINS);
ret = arena_bin_info[binind].reg_size; ret = arena_bin_info[binind].reg_size;
} else {
assert(demote == false || (mapbits &
CHUNK_MAP_BININD_MASK) == CHUNK_MAP_BININD_MASK);
} }
assert(ret != 0); assert(ret != 0);
} }
@ -668,8 +896,7 @@ arena_salloc(const void *ptr, bool demote)
JEMALLOC_INLINE void JEMALLOC_INLINE void
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache) arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
{ {
size_t pageind; size_t pageind, mapbits;
arena_chunk_map_t *mapelm;
tcache_t *tcache; tcache_t *tcache;
assert(arena != NULL); assert(arena != NULL);
@ -678,47 +905,31 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
assert(CHUNK_ADDR2BASE(ptr) != ptr); assert(CHUNK_ADDR2BASE(ptr) != ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapelm = &chunk->map[pageind-map_bias]; mapbits = arena_mapbits_get(chunk, pageind);
assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { if ((mapbits & CHUNK_MAP_LARGE) == 0) {
/* Small allocation. */ /* Small allocation. */
if (try_tcache && (tcache = tcache_get(false)) != NULL) if (try_tcache && (tcache = tcache_get(false)) != NULL) {
tcache_dalloc_small(tcache, ptr); size_t binind;
else {
arena_run_t *run;
arena_bin_t *bin;
run = (arena_run_t *)((uintptr_t)chunk + binind = arena_ptr_binind(ptr, mapbits);
(uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) << assert(binind < NBINS);
LG_PAGE)); tcache_dalloc_small(tcache, ptr, binind);
bin = run->bin; } else
if (config_debug) { arena_dalloc_small(arena, chunk, ptr, pageind);
size_t binind = arena_bin_index(arena, bin);
UNUSED arena_bin_info_t *bin_info =
&arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) %
bin_info->reg_interval == 0);
}
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin(arena, chunk, ptr, mapelm);
malloc_mutex_unlock(&bin->lock);
}
} else { } else {
size_t size = mapelm->bits & ~PAGE_MASK; size_t size = arena_mapbits_large_size_get(chunk, pageind);
assert(((uintptr_t)ptr & PAGE_MASK) == 0); assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (try_tcache && size <= tcache_maxclass && (tcache = if (try_tcache && size <= tcache_maxclass && (tcache =
tcache_get(false)) != NULL) { tcache_get(false)) != NULL) {
tcache_dalloc_large(tcache, ptr, size); tcache_dalloc_large(tcache, ptr, size);
} else { } else
malloc_mutex_lock(&arena->lock);
arena_dalloc_large(arena, chunk, ptr); arena_dalloc_large(arena, chunk, ptr);
malloc_mutex_unlock(&arena->lock);
}
} }
} }
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif #endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */

View File

@ -685,8 +685,17 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h" /*
* Include arena.h twice in order to resolve circular dependencies with
* tcache.h.
*/
#define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h" #include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/tcache.h"
#define JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"

View File

@ -7,11 +7,30 @@
#define arena_boot JEMALLOC_N(arena_boot) #define arena_boot JEMALLOC_N(arena_boot)
#define arena_dalloc JEMALLOC_N(arena_dalloc) #define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) #define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) #define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_malloc JEMALLOC_N(arena_malloc) #define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large) #define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small) #define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass) #define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new) #define arena_new JEMALLOC_N(arena_new)
#define arena_palloc JEMALLOC_N(arena_palloc) #define arena_palloc JEMALLOC_N(arena_palloc)
@ -22,6 +41,7 @@
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get) #define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set) #define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) #define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_ptr_binind JEMALLOC_N(arena_ptr_binind)
#define arena_purge_all JEMALLOC_N(arena_purge_all) #define arena_purge_all JEMALLOC_N(arena_purge_all)
#define arena_ralloc JEMALLOC_N(arena_ralloc) #define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) #define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
@ -296,6 +316,7 @@
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get) #define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set) #define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event) #define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_initialized JEMALLOC_N(tcache_initialized) #define tcache_initialized JEMALLOC_N(tcache_initialized)
#define tcache_flush JEMALLOC_N(tcache_flush) #define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get) #define tcache_get JEMALLOC_N(tcache_get)

View File

@ -101,6 +101,7 @@ extern size_t nhbins;
extern size_t tcache_maxclass; extern size_t tcache_maxclass;
size_t tcache_salloc(const void *ptr); size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tcache_t *tcache);
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
size_t binind); size_t binind);
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
@ -132,7 +133,7 @@ void tcache_enabled_set(bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin); void *tcache_alloc_easy(tcache_bin_t *tbin);
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero); void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero); void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
void tcache_dalloc_small(tcache_t *tcache, void *ptr); void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size); void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
#endif #endif
@ -266,47 +267,8 @@ tcache_event(tcache_t *tcache)
tcache->ev_cnt++; tcache->ev_cnt++;
assert(tcache->ev_cnt <= TCACHE_GC_INCR); assert(tcache->ev_cnt <= TCACHE_GC_INCR);
if (tcache->ev_cnt == TCACHE_GC_INCR) { if (tcache->ev_cnt == TCACHE_GC_INCR)
size_t binind = tcache->next_gc_bin; tcache_event_hard(tcache);
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low
* water mark.
*/
if (binind < NBINS) {
tcache_bin_flush_small(tbin, binind,
tbin->ncached - tbin->low_water +
(tbin->low_water >> 2), tcache);
} else {
tcache_bin_flush_large(tbin, binind,
tbin->ncached - tbin->low_water +
(tbin->low_water >> 2), tcache);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that
* the fill count is always at least 1.
*/
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
>= 1)
tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
* Increase fill count by 2X. Make sure lg_fill_div
* stays greater than 0.
*/
if (tbin->lg_fill_div > 1)
tbin->lg_fill_div--;
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
}
} }
JEMALLOC_INLINE void * JEMALLOC_INLINE void *
@ -390,13 +352,13 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
} else { } else {
if (config_prof) { if (config_prof && prof_promote && size == PAGE) {
arena_chunk_t *chunk = arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret); (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
LG_PAGE); LG_PAGE);
chunk->map[pageind-map_bias].bits &= arena_mapbits_large_binind_set(chunk, pageind,
~CHUNK_MAP_CLASS_MASK; BININD_INVALID);
} }
if (zero == false) { if (zero == false) {
if (config_fill) { if (config_fill) {
@ -421,30 +383,13 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
} }
JEMALLOC_INLINE void JEMALLOC_INLINE void
tcache_dalloc_small(tcache_t *tcache, void *ptr) tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
{ {
arena_t *arena;
arena_chunk_t *chunk;
arena_run_t *run;
arena_bin_t *bin;
tcache_bin_t *tbin; tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info; tcache_bin_info_t *tbin_info;
size_t pageind, binind;
arena_chunk_map_t *mapelm;
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapelm = &chunk->map[pageind-map_bias];
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> LG_PAGE)) << LG_PAGE));
bin = run->bin;
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
sizeof(arena_bin_t);
assert(binind < NBINS);
if (config_fill && opt_junk) if (config_fill && opt_junk)
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);

View File

@ -41,11 +41,11 @@ const uint8_t small_size2bin[] = {
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size, static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
bool large, bool zero); bool large, size_t binind, bool zero);
static arena_chunk_t *arena_chunk_alloc(arena_t *arena); static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large, static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
bool zero); size_t binind, bool zero);
static void arena_purge(arena_t *arena, bool all); static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty); static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
@ -152,7 +152,9 @@ static inline void
arena_run_reg_dalloc(arena_run_t *run, void *ptr) arena_run_reg_dalloc(arena_run_t *run, void *ptr)
{ {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t binind = arena_bin_index(chunk->arena, run->bin); size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
size_t binind = arena_ptr_binind(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind]; arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind = arena_run_regind(run, bin_info, ptr); unsigned regind = arena_run_regind(run, bin_info, ptr);
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
@ -184,28 +186,31 @@ arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
static void static void
arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
bool zero) size_t binind, bool zero)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
size_t run_ind, total_pages, need_pages, rem_pages, i; size_t run_ind, total_pages, need_pages, rem_pages, i;
size_t flag_dirty; size_t flag_dirty;
arena_avail_tree_t *runs_avail; arena_avail_tree_t *runs_avail;
assert((large && binind == BININD_INVALID) || (large == false && binind
!= BININD_INVALID));
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY; flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty : runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
&arena->runs_avail_clean; &arena->runs_avail_clean;
total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >> total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
LG_PAGE; LG_PAGE;
assert((chunk->map[run_ind+total_pages-1-map_bias].bits & assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
CHUNK_MAP_DIRTY) == flag_dirty); flag_dirty);
need_pages = (size >> LG_PAGE); need_pages = (size >> LG_PAGE);
assert(need_pages > 0); assert(need_pages > 0);
assert(need_pages <= total_pages); assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages; rem_pages = total_pages - need_pages;
arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]); arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, run_ind));
if (config_stats) { if (config_stats) {
/* /*
* Update stats_cactive if nactive is crossing a chunk * Update stats_cactive if nactive is crossing a chunk
@ -222,22 +227,23 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
/* Keep track of trailing unused pages for later use. */ /* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) { if (rem_pages > 0) {
if (flag_dirty != 0) { if (flag_dirty != 0) {
chunk->map[run_ind+need_pages-map_bias].bits = arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
(rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY; (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
chunk->map[run_ind+total_pages-1-map_bias].bits = arena_mapbits_unallocated_set(chunk,
(rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY; run_ind+total_pages-1, (rem_pages << LG_PAGE),
CHUNK_MAP_DIRTY);
} else { } else {
chunk->map[run_ind+need_pages-map_bias].bits = arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
(rem_pages << LG_PAGE) | (rem_pages << LG_PAGE),
(chunk->map[run_ind+need_pages-map_bias].bits & arena_mapbits_unzeroed_get(chunk,
CHUNK_MAP_UNZEROED); run_ind+need_pages));
chunk->map[run_ind+total_pages-1-map_bias].bits = arena_mapbits_unallocated_set(chunk,
(rem_pages << LG_PAGE) | run_ind+total_pages-1, (rem_pages << LG_PAGE),
(chunk->map[run_ind+total_pages-1-map_bias].bits & arena_mapbits_unzeroed_get(chunk,
CHUNK_MAP_UNZEROED); run_ind+total_pages-1));
} }
arena_avail_tree_insert(runs_avail, arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
&chunk->map[run_ind+need_pages-map_bias]); run_ind+need_pages));
} }
/* Update dirty page accounting. */ /* Update dirty page accounting. */
@ -258,8 +264,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
* zeroed (i.e. never before touched). * zeroed (i.e. never before touched).
*/ */
for (i = 0; i < need_pages; i++) { for (i = 0; i < need_pages; i++) {
if ((chunk->map[run_ind+i-map_bias].bits if (arena_mapbits_unzeroed_get(chunk,
& CHUNK_MAP_UNZEROED) != 0) { run_ind+i) != 0) {
VALGRIND_MAKE_MEM_UNDEFINED( VALGRIND_MAKE_MEM_UNDEFINED(
(void *)((uintptr_t) (void *)((uintptr_t)
chunk + ((run_ind+i) << chunk + ((run_ind+i) <<
@ -293,10 +299,9 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
* Set the last element first, in case the run only contains one * Set the last element first, in case the run only contains one
* page (i.e. both statements set the same element). * page (i.e. both statements set the same element).
*/ */
chunk->map[run_ind+need_pages-1-map_bias].bits = arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty; flag_dirty);
chunk->map[run_ind-map_bias].bits = size | flag_dirty | arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
} else { } else {
assert(zero == false); assert(zero == false);
/* /*
@ -304,34 +309,30 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
* small run, so that arena_dalloc_bin_run() has the ability to * small run, so that arena_dalloc_bin_run() has the ability to
* conditionally trim clean pages. * conditionally trim clean pages.
*/ */
chunk->map[run_ind-map_bias].bits = arena_mapbits_small_set(chunk, run_ind, 0, binind,
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) | arena_mapbits_unzeroed_get(chunk, run_ind) | flag_dirty);
CHUNK_MAP_ALLOCATED | flag_dirty;
/* /*
* The first page will always be dirtied during small run * The first page will always be dirtied during small run
* initialization, so a validation failure here would not * initialization, so a validation failure here would not
* actually cause an observable failure. * actually cause an observable failure.
*/ */
if (config_debug && flag_dirty == 0 && if (config_debug && flag_dirty == 0 &&
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
== 0)
arena_chunk_validate_zeroed(chunk, run_ind); arena_chunk_validate_zeroed(chunk, run_ind);
for (i = 1; i < need_pages - 1; i++) { for (i = 1; i < need_pages - 1; i++) {
chunk->map[run_ind+i-map_bias].bits = (i << LG_PAGE) arena_mapbits_small_set(chunk, run_ind+i, i,
| (chunk->map[run_ind+i-map_bias].bits & binind, arena_mapbits_unzeroed_get(chunk,
CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED; run_ind+i));
if (config_debug && flag_dirty == 0 && if (config_debug && flag_dirty == 0 &&
(chunk->map[run_ind+i-map_bias].bits & arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
CHUNK_MAP_UNZEROED) == 0)
arena_chunk_validate_zeroed(chunk, run_ind+i); arena_chunk_validate_zeroed(chunk, run_ind+i);
} }
chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages arena_mapbits_small_set(chunk, run_ind+need_pages-1,
- 1) << LG_PAGE) | need_pages-1, binind, arena_mapbits_unzeroed_get(chunk,
(chunk->map[run_ind+need_pages-1-map_bias].bits & run_ind+need_pages-1) | flag_dirty);
CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
if (config_debug && flag_dirty == 0 && if (config_debug && flag_dirty == 0 &&
(chunk->map[run_ind+need_pages-1-map_bias].bits & arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
CHUNK_MAP_UNZEROED) == 0) { 0) {
arena_chunk_validate_zeroed(chunk, arena_chunk_validate_zeroed(chunk,
run_ind+need_pages-1); run_ind+need_pages-1);
} }
@ -351,17 +352,18 @@ arena_chunk_alloc(arena_t *arena)
arena->spare = NULL; arena->spare = NULL;
/* Insert the run into the appropriate runs_avail_* tree. */ /* Insert the run into the appropriate runs_avail_* tree. */
if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0) if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
runs_avail = &arena->runs_avail_clean; runs_avail = &arena->runs_avail_clean;
else else
runs_avail = &arena->runs_avail_dirty; runs_avail = &arena->runs_avail_dirty;
assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass); assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK) arena_maxclass);
== arena_maxclass); assert(arena_mapbits_unallocated_size_get(chunk,
assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) == chunk_npages-1) == arena_maxclass);
(chunk->map[chunk_npages-1-map_bias].bits & assert(arena_mapbits_dirty_get(chunk, map_bias) ==
CHUNK_MAP_DIRTY)); arena_mapbits_dirty_get(chunk, chunk_npages-1));
arena_avail_tree_insert(runs_avail, &chunk->map[0]); arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
map_bias));
} else { } else {
bool zero; bool zero;
size_t unzeroed; size_t unzeroed;
@ -392,24 +394,27 @@ arena_chunk_alloc(arena_t *arena)
* chunk. * chunk.
*/ */
unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
chunk->map[0].bits = arena_maxclass | unzeroed; arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
unzeroed);
/* /*
* There is no need to initialize the internal page map entries * There is no need to initialize the internal page map entries
* unless the chunk is not zeroed. * unless the chunk is not zeroed.
*/ */
if (zero == false) { if (zero == false) {
for (i = map_bias+1; i < chunk_npages-1; i++) for (i = map_bias+1; i < chunk_npages-1; i++)
chunk->map[i-map_bias].bits = unzeroed; arena_mapbits_unzeroed_set(chunk, i, unzeroed);
} else if (config_debug) { } else if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(chunk->map[i-map_bias].bits == unzeroed); assert(arena_mapbits_unzeroed_get(chunk, i) ==
unzeroed);
} }
chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass | }
unzeroed; arena_mapbits_unallocated_set(chunk, chunk_npages-1,
arena_maxclass, unzeroed);
/* Insert the run into the runs_avail_clean tree. */ /* Insert the run into the runs_avail_clean tree. */
arena_avail_tree_insert(&arena->runs_avail_clean, arena_avail_tree_insert(&arena->runs_avail_clean,
&chunk->map[0]); arena_mapp_get(chunk, map_bias));
} }
return (chunk); return (chunk);
@ -424,11 +429,11 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
* Remove run from the appropriate runs_avail_* tree, so that the arena * Remove run from the appropriate runs_avail_* tree, so that the arena
* does not use it. * does not use it.
*/ */
if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0) if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
runs_avail = &arena->runs_avail_clean; runs_avail = &arena->runs_avail_clean;
else else
runs_avail = &arena->runs_avail_dirty; runs_avail = &arena->runs_avail_dirty;
arena_avail_tree_remove(runs_avail, &chunk->map[0]); arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, map_bias));
if (arena->spare != NULL) { if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare; arena_chunk_t *spare = arena->spare;
@ -449,7 +454,8 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
} }
static arena_run_t * static arena_run_t *
arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero) arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
bool zero)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
arena_run_t *run; arena_run_t *run;
@ -457,6 +463,8 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
assert(size <= arena_maxclass); assert(size <= arena_maxclass);
assert((size & PAGE_MASK) == 0); assert((size & PAGE_MASK) == 0);
assert((large && binind == BININD_INVALID) || (large == false && binind
!= BININD_INVALID));
/* Search the arena's chunks for the lowest best fit. */ /* Search the arena's chunks for the lowest best fit. */
key.bits = size | CHUNK_MAP_KEY; key.bits = size | CHUNK_MAP_KEY;
@ -469,7 +477,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE)); LG_PAGE));
arena_run_split(arena, run, size, large, zero); arena_run_split(arena, run, size, large, binind, zero);
return (run); return (run);
} }
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key); mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
@ -481,7 +489,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE)); LG_PAGE));
arena_run_split(arena, run, size, large, zero); arena_run_split(arena, run, size, large, binind, zero);
return (run); return (run);
} }
@ -491,7 +499,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
chunk = arena_chunk_alloc(arena); chunk = arena_chunk_alloc(arena);
if (chunk != NULL) { if (chunk != NULL) {
run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
arena_run_split(arena, run, size, large, zero); arena_run_split(arena, run, size, large, binind, zero);
return (run); return (run);
} }
@ -509,7 +517,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE)); LG_PAGE));
arena_run_split(arena, run, size, large, zero); arena_run_split(arena, run, size, large, binind, zero);
return (run); return (run);
} }
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key); mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
@ -521,7 +529,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE)); LG_PAGE));
arena_run_split(arena, run, size, large, zero); arena_run_split(arena, run, size, large, binind, zero);
return (run); return (run);
} }
@ -579,40 +587,38 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
* run. * run.
*/ */
if (chunk == arena->spare) { if (chunk == arena->spare) {
assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0); assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
arena_chunk_alloc(arena); arena_chunk_alloc(arena);
} }
/* Temporarily allocate all free dirty runs within chunk. */ /* Temporarily allocate all free dirty runs within chunk. */
for (pageind = map_bias; pageind < chunk_npages;) { for (pageind = map_bias; pageind < chunk_npages;) {
mapelm = &chunk->map[pageind-map_bias]; mapelm = arena_mapp_get(chunk, pageind);
if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) { if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
size_t npages; size_t npages;
npages = mapelm->bits >> LG_PAGE; npages = arena_mapbits_unallocated_size_get(chunk,
pageind) >> LG_PAGE;
assert(pageind + npages <= chunk_npages); assert(pageind + npages <= chunk_npages);
if (mapelm->bits & CHUNK_MAP_DIRTY) { if (arena_mapbits_dirty_get(chunk, pageind)) {
size_t i; size_t i;
arena_avail_tree_remove( arena_avail_tree_remove(
&arena->runs_avail_dirty, mapelm); &arena->runs_avail_dirty, mapelm);
mapelm->bits = (npages << LG_PAGE) | arena_mapbits_large_set(chunk, pageind,
flag_unzeroed | CHUNK_MAP_LARGE | (npages << LG_PAGE), flag_unzeroed);
CHUNK_MAP_ALLOCATED;
/* /*
* Update internal elements in the page map, so * Update internal elements in the page map, so
* that CHUNK_MAP_UNZEROED is properly set. * that CHUNK_MAP_UNZEROED is properly set.
*/ */
for (i = 1; i < npages - 1; i++) { for (i = 1; i < npages - 1; i++) {
chunk->map[pageind+i-map_bias].bits = arena_mapbits_unzeroed_set(chunk,
flag_unzeroed; pageind+i, flag_unzeroed);
} }
if (npages > 1) { if (npages > 1) {
chunk->map[ arena_mapbits_large_set(chunk,
pageind+npages-1-map_bias].bits = pageind+npages-1, 0, flag_unzeroed);
flag_unzeroed | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
} }
if (config_stats) { if (config_stats) {
@ -637,15 +643,17 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
pageind += npages; pageind += npages;
} else { } else {
/* Skip allocated run. */ /* Skip allocated run. */
if (mapelm->bits & CHUNK_MAP_LARGE) if (arena_mapbits_large_get(chunk, pageind))
pageind += mapelm->bits >> LG_PAGE; pageind += arena_mapbits_large_size_get(chunk,
pageind) >> LG_PAGE;
else { else {
size_t binind; size_t binind;
arena_bin_info_t *bin_info; arena_bin_info_t *bin_info;
arena_run_t *run = (arena_run_t *)((uintptr_t) arena_run_t *run = (arena_run_t *)((uintptr_t)
chunk + (uintptr_t)(pageind << LG_PAGE)); chunk + (uintptr_t)(pageind << LG_PAGE));
assert((mapelm->bits >> LG_PAGE) == 0); assert(arena_mapbits_small_runind_get(chunk,
pageind) == 0);
binind = arena_bin_index(arena, run->bin); binind = arena_bin_index(arena, run->bin);
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
pageind += bin_info->run_size >> LG_PAGE; pageind += bin_info->run_size >> LG_PAGE;
@ -669,7 +677,8 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
ql_foreach(mapelm, &mapelms, u.ql_link) { ql_foreach(mapelm, &mapelms, u.ql_link) {
size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t)) + map_bias; sizeof(arena_chunk_map_t)) + map_bias;
size_t npages = mapelm->bits >> LG_PAGE; size_t npages = arena_mapbits_large_size_get(chunk, pageind) >>
LG_PAGE;
assert(pageind + npages <= chunk_npages); assert(pageind + npages <= chunk_npages);
assert(ndirty >= npages); assert(ndirty >= npages);
@ -806,15 +815,11 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
assert(run_ind >= map_bias); assert(run_ind >= map_bias);
assert(run_ind < chunk_npages); assert(run_ind < chunk_npages);
if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) { if (arena_mapbits_large_get(chunk, run_ind) != 0) {
size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK; size = arena_mapbits_large_size_get(chunk, run_ind);
assert(size == PAGE || assert(size == PAGE ||
(chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits & arena_mapbits_large_size_get(chunk,
~PAGE_MASK) == 0); run_ind+(size>>LG_PAGE)-1) == 0);
assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
CHUNK_MAP_LARGE) != 0);
assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
CHUNK_MAP_ALLOCATED) != 0);
} else { } else {
size_t binind = arena_bin_index(arena, run->bin); size_t binind = arena_bin_index(arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind]; arena_bin_info_t *bin_info = &arena_bin_info[binind];
@ -837,7 +842,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
* The run is dirty if the caller claims to have dirtied it, as well as * The run is dirty if the caller claims to have dirtied it, as well as
* if it was already dirty before being allocated. * if it was already dirty before being allocated.
*/ */
if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0) if (arena_mapbits_dirty_get(chunk, run_ind) != 0)
dirty = true; dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
runs_avail = dirty ? &arena->runs_avail_dirty : runs_avail = dirty ? &arena->runs_avail_dirty :
@ -845,58 +850,52 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
/* Mark pages as unallocated in the chunk map. */ /* Mark pages as unallocated in the chunk map. */
if (dirty) { if (dirty) {
chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY; arena_mapbits_unallocated_set(chunk, run_ind, size,
chunk->map[run_ind+run_pages-1-map_bias].bits = size | CHUNK_MAP_DIRTY);
CHUNK_MAP_DIRTY; arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
CHUNK_MAP_DIRTY);
chunk->ndirty += run_pages; chunk->ndirty += run_pages;
arena->ndirty += run_pages; arena->ndirty += run_pages;
} else { } else {
chunk->map[run_ind-map_bias].bits = size | arena_mapbits_unallocated_set(chunk, run_ind, size,
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED); arena_mapbits_unzeroed_get(chunk, run_ind));
chunk->map[run_ind+run_pages-1-map_bias].bits = size | arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
(chunk->map[run_ind+run_pages-1-map_bias].bits & arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
CHUNK_MAP_UNZEROED);
} }
/* Try to coalesce forward. */ /* Try to coalesce forward. */
if (run_ind + run_pages < chunk_npages && if (run_ind + run_pages < chunk_npages &&
(chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED) arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
== 0 && (chunk->map[run_ind+run_pages-map_bias].bits & arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
CHUNK_MAP_DIRTY) == flag_dirty) { size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits & run_ind+run_pages);
~PAGE_MASK;
size_t nrun_pages = nrun_size >> LG_PAGE; size_t nrun_pages = nrun_size >> LG_PAGE;
/* /*
* Remove successor from runs_avail; the coalesced run is * Remove successor from runs_avail; the coalesced run is
* inserted later. * inserted later.
*/ */
assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits assert(arena_mapbits_unallocated_size_get(chunk,
& ~PAGE_MASK) == nrun_size); run_ind+run_pages+nrun_pages-1) == nrun_size);
assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits assert(arena_mapbits_dirty_get(chunk,
& CHUNK_MAP_ALLOCATED) == 0); run_ind+run_pages+nrun_pages-1) == flag_dirty);
assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
& CHUNK_MAP_DIRTY) == flag_dirty);
arena_avail_tree_remove(runs_avail, arena_avail_tree_remove(runs_avail,
&chunk->map[run_ind+run_pages-map_bias]); arena_mapp_get(chunk, run_ind+run_pages));
size += nrun_size; size += nrun_size;
run_pages += nrun_pages; run_pages += nrun_pages;
chunk->map[run_ind-map_bias].bits = size | arena_mapbits_unallocated_size_set(chunk, run_ind, size);
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK); arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
chunk->map[run_ind+run_pages-1-map_bias].bits = size | size);
(chunk->map[run_ind+run_pages-1-map_bias].bits &
CHUNK_MAP_FLAGS_MASK);
} }
/* Try to coalesce backward. */ /* Try to coalesce backward. */
if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits & if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits & == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
CHUNK_MAP_DIRTY) == flag_dirty) { size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
size_t prun_size = chunk->map[run_ind-1-map_bias].bits & run_ind-1);
~PAGE_MASK;
size_t prun_pages = prun_size >> LG_PAGE; size_t prun_pages = prun_size >> LG_PAGE;
run_ind -= prun_pages; run_ind -= prun_pages;
@ -905,31 +904,26 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
* Remove predecessor from runs_avail; the coalesced run is * Remove predecessor from runs_avail; the coalesced run is
* inserted later. * inserted later.
*/ */
assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
== prun_size); prun_size);
assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED) assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
== 0); arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk,
assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) run_ind));
== flag_dirty);
arena_avail_tree_remove(runs_avail,
&chunk->map[run_ind-map_bias]);
size += prun_size; size += prun_size;
run_pages += prun_pages; run_pages += prun_pages;
chunk->map[run_ind-map_bias].bits = size | arena_mapbits_unallocated_size_set(chunk, run_ind, size);
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK); arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
chunk->map[run_ind+run_pages-1-map_bias].bits = size | size);
(chunk->map[run_ind+run_pages-1-map_bias].bits &
CHUNK_MAP_FLAGS_MASK);
} }
/* Insert into runs_avail, now that coalescing is complete. */ /* Insert into runs_avail, now that coalescing is complete. */
assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) == assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
(chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK)); arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == assert(arena_mapbits_dirty_get(chunk, run_ind) ==
(chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY)); arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]); arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, run_ind));
if (dirty) { if (dirty) {
/* /*
@ -943,14 +937,15 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
} }
} }
/* /* Deallocate chunk if it is now completely unused. */
* Deallocate chunk if it is now completely unused. The bit if (size == arena_maxclass) {
* manipulation checks whether the first run is unallocated and extends assert(run_ind == map_bias);
* to the end of the chunk. assert(run_pages == (arena_maxclass >> LG_PAGE));
*/ assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) == assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
arena_maxclass) arena_maxclass);
arena_chunk_dealloc(arena, chunk); arena_chunk_dealloc(arena, chunk);
}
/* /*
* It is okay to do dirty page processing here even if the chunk was * It is okay to do dirty page processing here even if the chunk was
@ -969,7 +964,7 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
{ {
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
size_t head_npages = (oldsize - newsize) >> LG_PAGE; size_t head_npages = (oldsize - newsize) >> LG_PAGE;
size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY; size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
assert(oldsize > newsize); assert(oldsize > newsize);
@ -978,29 +973,21 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* leading run as separately allocated. Set the last element of each * leading run as separately allocated. Set the last element of each
* run first, in case of single-page runs. * run first, in case of single-page runs.
*/ */
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0); assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0); arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty | arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
(chunk->map[pageind+head_npages-1-map_bias].bits & arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; arena_mapbits_unzeroed_get(chunk, pageind));
chunk->map[pageind-map_bias].bits = (oldsize - newsize)
| flag_dirty | (chunk->map[pageind-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
if (config_debug) { if (config_debug) {
UNUSED size_t tail_npages = newsize >> LG_PAGE; UNUSED size_t tail_npages = newsize >> LG_PAGE;
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] assert(arena_mapbits_large_size_get(chunk,
.bits & ~PAGE_MASK) == 0); pageind+head_npages+tail_npages-1) == 0);
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] assert(arena_mapbits_dirty_get(chunk,
.bits & CHUNK_MAP_DIRTY) == flag_dirty); pageind+head_npages+tail_npages-1) == flag_dirty);
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
.bits & CHUNK_MAP_LARGE) != 0);
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
.bits & CHUNK_MAP_ALLOCATED) != 0);
} }
chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty | arena_mapbits_large_set(chunk, pageind+head_npages, newsize, flag_dirty
(chunk->map[pageind+head_npages-map_bias].bits & | arena_mapbits_unzeroed_get(chunk, pageind+head_npages));
CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
arena_run_dalloc(arena, run, false); arena_run_dalloc(arena, run, false);
} }
@ -1011,9 +998,7 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
{ {
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
size_t head_npages = newsize >> LG_PAGE; size_t head_npages = newsize >> LG_PAGE;
size_t tail_npages = (oldsize - newsize) >> LG_PAGE; size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
size_t flag_dirty = chunk->map[pageind-map_bias].bits &
CHUNK_MAP_DIRTY;
assert(oldsize > newsize); assert(oldsize > newsize);
@ -1022,28 +1007,22 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* trailing run as separately allocated. Set the last element of each * trailing run as separately allocated. Set the last element of each
* run first, in case of single-page runs. * run first, in case of single-page runs.
*/ */
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0); assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0); arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty | arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
(chunk->map[pageind+head_npages-1-map_bias].bits & arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; arena_mapbits_unzeroed_get(chunk, pageind));
chunk->map[pageind-map_bias].bits = newsize | flag_dirty |
(chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) |
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & if (config_debug) {
~PAGE_MASK) == 0); UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & assert(arena_mapbits_large_size_get(chunk,
CHUNK_MAP_LARGE) != 0); pageind+head_npages+tail_npages-1) == 0);
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & assert(arena_mapbits_dirty_get(chunk,
CHUNK_MAP_ALLOCATED) != 0); pageind+head_npages+tail_npages-1) == flag_dirty);
chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits = }
flag_dirty | arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
(chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & flag_dirty | arena_mapbits_unzeroed_get(chunk,
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; pageind+head_npages));
chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) |
flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
dirty); dirty);
@ -1061,8 +1040,8 @@ arena_bin_runs_first(arena_bin_t *bin)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t))) + map_bias; sizeof(arena_chunk_map_t))) + map_bias;
run = (arena_run_t *)((uintptr_t)chunk + run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) << arena_mapbits_small_runind_get(chunk, pageind)) <<
LG_PAGE)); LG_PAGE));
return (run); return (run);
} }
@ -1075,7 +1054,7 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{ {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(run); arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias]; arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
assert(arena_run_tree_search(&bin->runs, mapelm) == NULL); assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
@ -1087,7 +1066,7 @@ arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{ {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias]; arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
assert(arena_run_tree_search(&bin->runs, mapelm) != NULL); assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
@ -1126,7 +1105,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(&bin->lock);
/******************************/ /******************************/
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
run = arena_run_alloc(arena, bin_info->run_size, false, false); run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
if (run != NULL) { if (run != NULL) {
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset); (uintptr_t)bin_info->bitmap_offset);
@ -1384,7 +1363,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
/* Large allocation. */ /* Large allocation. */
size = PAGE_CEILING(size); size = PAGE_CEILING(size);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
ret = (void *)arena_run_alloc(arena, size, true, zero); ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
if (ret == NULL) { if (ret == NULL) {
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
return (NULL); return (NULL);
@ -1428,7 +1407,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
alloc_size = size + alignment - PAGE; alloc_size = size + alignment - PAGE;
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
run = arena_run_alloc(arena, alloc_size, true, zero); run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
if (run == NULL) { if (run == NULL) {
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
return (NULL); return (NULL);
@ -1485,8 +1464,7 @@ arena_prof_promoted(const void *ptr, size_t size)
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
binind = SMALL_SIZE2BIN(size); binind = SMALL_SIZE2BIN(size);
assert(binind < NBINS); assert(binind < NBINS);
chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits & arena_mapbits_large_binind_set(chunk, pageind, binind);
~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
assert(isalloc(ptr, false) == PAGE); assert(isalloc(ptr, false) == PAGE);
assert(isalloc(ptr, true) == size); assert(isalloc(ptr, true) == size);
@ -1524,8 +1502,9 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t npages, run_ind, past; size_t npages, run_ind, past;
assert(run != bin->runcur); assert(run != bin->runcur);
assert(arena_run_tree_search(&bin->runs, &chunk->map[ assert(arena_run_tree_search(&bin->runs,
(((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)-map_bias]) == NULL); arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
== NULL);
binind = arena_bin_index(chunk->arena, run->bin); binind = arena_bin_index(chunk->arena, run->bin);
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
@ -1545,18 +1524,16 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* trim the clean pages before deallocating the dirty portion of the * trim the clean pages before deallocating the dirty portion of the
* run. * run.
*/ */
if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
- run_ind < npages) { npages) {
/* /*
* Trim clean pages. Convert to large run beforehand. Set the * Trim clean pages. Convert to large run beforehand. Set the
* last map element first, in case this is a one-page run. * last map element first, in case this is a one-page run.
*/ */
chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE | arena_mapbits_large_set(chunk, run_ind+npages-1, 0,
(chunk->map[run_ind+npages-1-map_bias].bits & arena_mapbits_unzeroed_get(chunk, run_ind+npages-1));
CHUNK_MAP_FLAGS_MASK); arena_mapbits_large_set(chunk, run_ind, bin_info->run_size,
chunk->map[run_ind-map_bias].bits = bin_info->run_size | arena_mapbits_unzeroed_get(chunk, run_ind));
CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
CHUNK_MAP_FLAGS_MASK);
arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
((past - run_ind) << LG_PAGE), false); ((past - run_ind) << LG_PAGE), false);
/* npages = past - run_ind; */ /* npages = past - run_ind; */
@ -1591,7 +1568,7 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
} }
void void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm) arena_chunk_map_t *mapelm)
{ {
size_t pageind; size_t pageind;
@ -1602,9 +1579,9 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> LG_PAGE)) << LG_PAGE)); arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
bin = run->bin; bin = run->bin;
binind = arena_bin_index(arena, bin); binind = arena_ptr_binind(ptr, mapelm->bits);
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
if (config_fill || config_stats) if (config_fill || config_stats)
size = bin_info->reg_size; size = bin_info->reg_size;
@ -1625,6 +1602,34 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
} }
} }
void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm)
{
arena_run_t *run;
arena_bin_t *bin;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
bin = run->bin;
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
malloc_mutex_unlock(&bin->lock);
}
void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind)
{
arena_chunk_map_t *mapelm;
if (config_debug) {
assert(arena_ptr_binind(ptr, arena_mapbits_get(chunk, pageind))
!= BININD_INVALID);
}
mapelm = arena_mapp_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
}
void void
arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats, arena_stats_t *astats, malloc_bin_stats_t *bstats,
@ -1673,12 +1678,12 @@ arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
} }
void void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{ {
if (config_fill || config_stats) { if (config_fill || config_stats) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK; size_t size = arena_mapbits_large_size_get(chunk, pageind);
if (config_fill && config_stats && opt_junk) if (config_fill && config_stats && opt_junk)
memset(ptr, 0x5a, size); memset(ptr, 0x5a, size);
@ -1693,6 +1698,15 @@ arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
arena_run_dalloc(arena, (arena_run_t *)ptr, true); arena_run_dalloc(arena, (arena_run_t *)ptr, true);
} }
void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
malloc_mutex_lock(&arena->lock);
arena_dalloc_large_locked(arena, chunk, ptr);
malloc_mutex_unlock(&arena->lock);
}
static void static void
arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size) size_t oldsize, size_t size)
@ -1731,16 +1745,15 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t npages = oldsize >> LG_PAGE; size_t npages = oldsize >> LG_PAGE;
size_t followsize; size_t followsize;
assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK)); assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
/* Try to extend the run. */ /* Try to extend the run. */
assert(size + extra > oldsize); assert(size + extra > oldsize);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
if (pageind + npages < chunk_npages && if (pageind + npages < chunk_npages &&
(chunk->map[pageind+npages-map_bias].bits arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
& CHUNK_MAP_ALLOCATED) == 0 && (followsize = (followsize = arena_mapbits_unallocated_size_get(chunk,
chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size - pageind+npages)) >= size - oldsize) {
oldsize) {
/* /*
* The next run is available and sufficiently large. Split the * The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing * following run, then merge the first part with the existing
@ -1750,7 +1763,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t splitsize = (oldsize + followsize <= size + extra) size_t splitsize = (oldsize + followsize <= size + extra)
? followsize : size + extra - oldsize; ? followsize : size + extra - oldsize;
arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk + arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
((pageind+npages) << LG_PAGE)), splitsize, true, zero); ((pageind+npages) << LG_PAGE)), splitsize, true,
BININD_INVALID, zero);
size = oldsize + splitsize; size = oldsize + splitsize;
npages = size >> LG_PAGE; npages = size >> LG_PAGE;
@ -1763,29 +1777,22 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
* arena_run_dalloc() with the dirty argument set to false * arena_run_dalloc() with the dirty argument set to false
* (which is when dirty flag consistency would really matter). * (which is when dirty flag consistency would really matter).
*/ */
flag_dirty = (chunk->map[pageind-map_bias].bits & flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
CHUNK_MAP_DIRTY) | arena_mapbits_dirty_get(chunk, pageind+npages-1);
(chunk->map[pageind+npages-1-map_bias].bits & arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
CHUNK_MAP_DIRTY); arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
chunk->map[pageind-map_bias].bits = size | flag_dirty
| CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
if (config_stats) { if (config_stats) {
arena->stats.ndalloc_large++; arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize; arena->stats.allocated_large -= oldsize;
arena->stats.lstats[(oldsize >> LG_PAGE) arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- 1].ndalloc++; arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
arena->stats.lstats[(oldsize >> LG_PAGE)
- 1].curruns--;
arena->stats.nmalloc_large++; arena->stats.nmalloc_large++;
arena->stats.nrequests_large++; arena->stats.nrequests_large++;
arena->stats.allocated_large += size; arena->stats.allocated_large += size;
arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
arena->stats.lstats[(size >> LG_PAGE) arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- 1].nrequests++;
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
} }
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);

View File

@ -24,6 +24,46 @@ size_t tcache_salloc(const void *ptr)
return (arena_salloc(ptr, false)); return (arena_salloc(ptr, false));
} }
void
tcache_event_hard(tcache_t *tcache)
{
size_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
if (binind < NBINS) {
tcache_bin_flush_small(tbin, binind, tbin->ncached -
tbin->low_water + (tbin->low_water >> 2), tcache);
} else {
tcache_bin_flush_large(tbin, binind, tbin->ncached -
tbin->low_water + (tbin->low_water >> 2), tcache);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that the
* fill count is always at least 1.
*/
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
* Increase fill count by 2X. Make sure lg_fill_div stays
* greater than 0.
*/
if (tbin->lg_fill_div > 1)
tbin->lg_fill_div--;
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
}
void * void *
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
{ {
@ -80,12 +120,13 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
size_t pageind = ((uintptr_t)ptr - size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE; (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm = arena_chunk_map_t *mapelm =
&chunk->map[pageind-map_bias]; arena_mapp_get(chunk, pageind);
if (config_fill && opt_junk) { if (config_fill && opt_junk) {
arena_alloc_junk_small(ptr, arena_alloc_junk_small(ptr,
&arena_bin_info[binind], true); &arena_bin_info[binind], true);
} }
arena_dalloc_bin(arena, chunk, ptr, mapelm); arena_dalloc_bin_locked(arena, chunk, ptr,
mapelm);
} else { } else {
/* /*
* This object was allocated via a different * This object was allocated via a different
@ -158,7 +199,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena) if (chunk->arena == arena)
arena_dalloc_large(arena, chunk, ptr); arena_dalloc_large_locked(arena, chunk, ptr);
else { else {
/* /*
* This object was allocated via a different * This object was allocated via a different
@ -314,22 +355,14 @@ tcache_destroy(tcache_t *tcache)
arena_t *arena = chunk->arena; arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >> size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
LG_PAGE; LG_PAGE;
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias]; arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
LG_PAGE));
arena_bin_t *bin = run->bin;
malloc_mutex_lock(&bin->lock); arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
arena_dalloc_bin(arena, chunk, tcache, mapelm);
malloc_mutex_unlock(&bin->lock);
} else if (tcache_size <= tcache_maxclass) { } else if (tcache_size <= tcache_maxclass) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena; arena_t *arena = chunk->arena;
malloc_mutex_lock(&arena->lock);
arena_dalloc_large(arena, chunk, tcache); arena_dalloc_large(arena, chunk, tcache);
malloc_mutex_unlock(&arena->lock);
} else } else
idalloc(tcache); idalloc(tcache);
} }

View File

@ -14,7 +14,7 @@ malloc_tsd_malloc(size_t size)
{ {
/* Avoid choose_arena() in order to dodge bootstrapping issues. */ /* Avoid choose_arena() in order to dodge bootstrapping issues. */
return arena_malloc(arenas[0], size, false, false); return (arena_malloc(arenas[0], size, false, false));
} }
void void