Rename extent_node_t to extent_t.
This commit is contained in:
parent
3aea827f5e
commit
a7a6f5bc96
@ -177,11 +177,11 @@ typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
|
|||||||
/* Arena chunk header. */
|
/* Arena chunk header. */
|
||||||
struct arena_chunk_s {
|
struct arena_chunk_s {
|
||||||
/*
|
/*
|
||||||
* A pointer to the arena that owns the chunk is stored within the node.
|
* A pointer to the arena that owns the chunk is stored within the
|
||||||
* This field as a whole is used by chunks_rtree to support both
|
* extent structure. This field as a whole is used by chunks_rtree to
|
||||||
* ivsalloc() and core-based debugging.
|
* support both ivsalloc() and core-based debugging.
|
||||||
*/
|
*/
|
||||||
extent_node_t node;
|
extent_t extent;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map of pages within chunk that keeps track of free/large/small. The
|
* Map of pages within chunk that keeps track of free/large/small. The
|
||||||
@ -303,7 +303,7 @@ struct arena_s {
|
|||||||
|
|
||||||
|
|
||||||
/* Extant arena chunks. */
|
/* Extant arena chunks. */
|
||||||
ql_head(extent_node_t) achunks;
|
ql_head(extent_t) achunks;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In order to avoid rapid chunk allocation/deallocation when an arena
|
* In order to avoid rapid chunk allocation/deallocation when an arena
|
||||||
@ -345,25 +345,25 @@ struct arena_s {
|
|||||||
* /-- arena ---\
|
* /-- arena ---\
|
||||||
* | |
|
* | |
|
||||||
* | |
|
* | |
|
||||||
* |------------| /- chunk -\
|
* |------------| /-- chunk --\
|
||||||
* ...->|chunks_cache|<--------------------------->| /----\ |<--...
|
* ...->|chunks_cache|<--------------------------->| /------\ |<--...
|
||||||
* |------------| | |node| |
|
* |------------| | |extent| |
|
||||||
* | | | | | |
|
* | | | | | |
|
||||||
* | | /- run -\ /- run -\ | | | |
|
* | | /- run -\ /- run -\ | | | |
|
||||||
* | | | | | | | | | |
|
* | | | | | | | | | |
|
||||||
* | | | | | | | | | |
|
* | | | | | | | | | |
|
||||||
* |------------| |-------| |-------| | |----| |
|
* |------------| |-------| |-------| | |------| |
|
||||||
* ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
|
* ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
|
||||||
* |------------| |-------| |-------| | |----| |
|
* |------------| |-------| |-------| | |------| |
|
||||||
* | | | | | | | | | |
|
* | | | | | | | | | |
|
||||||
* | | | | | | | \----/ |
|
* | | | | | | | \------/ |
|
||||||
* | | \-------/ \-------/ | |
|
* | | \-------/ \-------/ | |
|
||||||
* | | | |
|
* | | | |
|
||||||
* | | | |
|
* | | | |
|
||||||
* \------------/ \---------/
|
* \------------/ \-----------/
|
||||||
*/
|
*/
|
||||||
arena_runs_dirty_link_t runs_dirty;
|
arena_runs_dirty_link_t runs_dirty;
|
||||||
extent_node_t chunks_cache;
|
extent_t chunks_cache;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Approximate time in seconds from the creation of a set of unused
|
* Approximate time in seconds from the creation of a set of unused
|
||||||
@ -413,16 +413,16 @@ struct arena_s {
|
|||||||
size_t decay_backlog[SMOOTHSTEP_NSTEPS];
|
size_t decay_backlog[SMOOTHSTEP_NSTEPS];
|
||||||
|
|
||||||
/* Extant huge allocations. */
|
/* Extant huge allocations. */
|
||||||
ql_head(extent_node_t) huge;
|
ql_head(extent_t) huge;
|
||||||
/* Synchronizes all huge allocation/update/deallocation. */
|
/* Synchronizes all huge allocation/update/deallocation. */
|
||||||
malloc_mutex_t huge_mtx;
|
malloc_mutex_t huge_mtx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Trees of chunks that were previously allocated (trees differ only in
|
* Trees of chunks that were previously allocated (trees differ only in
|
||||||
* node ordering). These are used when allocating chunks, in an attempt
|
* extent ordering). These are used when allocating chunks, in an
|
||||||
* to re-use address space. Depending on function, different tree
|
* attempt to re-use address space. Depending on function, different
|
||||||
* orderings are needed, which is why there are two trees with the same
|
* tree orderings are needed, which is why there are two trees with the
|
||||||
* contents.
|
* same contents.
|
||||||
*/
|
*/
|
||||||
extent_tree_t chunks_szad_cached;
|
extent_tree_t chunks_szad_cached;
|
||||||
extent_tree_t chunks_ad_cached;
|
extent_tree_t chunks_ad_cached;
|
||||||
@ -430,9 +430,9 @@ struct arena_s {
|
|||||||
extent_tree_t chunks_ad_retained;
|
extent_tree_t chunks_ad_retained;
|
||||||
|
|
||||||
malloc_mutex_t chunks_mtx;
|
malloc_mutex_t chunks_mtx;
|
||||||
/* Cache of nodes that were allocated via base_alloc(). */
|
/* Cache of extent structures that were allocated via base_alloc(). */
|
||||||
ql_head(extent_node_t) node_cache;
|
ql_head(extent_t) extent_cache;
|
||||||
malloc_mutex_t node_cache_mtx;
|
malloc_mutex_t extent_cache_mtx;
|
||||||
|
|
||||||
/* User-configurable chunk hook functions. */
|
/* User-configurable chunk hook functions. */
|
||||||
chunk_hooks_t chunk_hooks;
|
chunk_hooks_t chunk_hooks;
|
||||||
@ -486,12 +486,12 @@ typedef size_t (run_quantize_t)(size_t);
|
|||||||
extern run_quantize_t *run_quantize_floor;
|
extern run_quantize_t *run_quantize_floor;
|
||||||
extern run_quantize_t *run_quantize_ceil;
|
extern run_quantize_t *run_quantize_ceil;
|
||||||
#endif
|
#endif
|
||||||
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
|
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent,
|
||||||
bool cache);
|
bool cache);
|
||||||
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
|
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
|
||||||
bool cache);
|
bool cache);
|
||||||
extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
|
extent_t *arena_extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
||||||
void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
|
void arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||||
void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool *zero);
|
size_t alignment, bool *zero);
|
||||||
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
||||||
@ -1066,7 +1066,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
|||||||
assert(binind != BININD_INVALID);
|
assert(binind != BININD_INVALID);
|
||||||
assert(binind < NBINS);
|
assert(binind < NBINS);
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
arena = extent_node_arena_get(&chunk->node);
|
arena = extent_arena_get(&chunk->extent);
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||||
actual_mapbits = arena_mapbits_get(chunk, pageind);
|
actual_mapbits = arena_mapbits_get(chunk, pageind);
|
||||||
assert(mapbits == actual_mapbits);
|
assert(mapbits == actual_mapbits);
|
||||||
@ -1317,7 +1317,7 @@ arena_aalloc(const void *ptr)
|
|||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (likely(chunk != ptr))
|
if (likely(chunk != ptr))
|
||||||
return (extent_node_arena_get(&chunk->node));
|
return (extent_arena_get(&chunk->extent));
|
||||||
else
|
else
|
||||||
return (huge_aalloc(ptr));
|
return (huge_aalloc(ptr));
|
||||||
}
|
}
|
||||||
@ -1395,7 +1395,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
|
|||||||
binind, slow_path);
|
binind, slow_path);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_small(tsdn,
|
arena_dalloc_small(tsdn,
|
||||||
extent_node_arena_get(&chunk->node), chunk,
|
extent_arena_get(&chunk->extent), chunk,
|
||||||
ptr, pageind);
|
ptr, pageind);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1411,7 +1411,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
|
|||||||
size - large_pad, slow_path);
|
size - large_pad, slow_path);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_large(tsdn,
|
arena_dalloc_large(tsdn,
|
||||||
extent_node_arena_get(&chunk->node), chunk,
|
extent_arena_get(&chunk->extent), chunk,
|
||||||
ptr);
|
ptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1455,7 +1455,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
|||||||
size_t pageind = ((uintptr_t)ptr -
|
size_t pageind = ((uintptr_t)ptr -
|
||||||
(uintptr_t)chunk) >> LG_PAGE;
|
(uintptr_t)chunk) >> LG_PAGE;
|
||||||
arena_dalloc_small(tsdn,
|
arena_dalloc_small(tsdn,
|
||||||
extent_node_arena_get(&chunk->node), chunk,
|
extent_arena_get(&chunk->extent), chunk,
|
||||||
ptr, pageind);
|
ptr, pageind);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1467,7 +1467,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
|||||||
size, slow_path);
|
size, slow_path);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_large(tsdn,
|
arena_dalloc_large(tsdn,
|
||||||
extent_node_arena_get(&chunk->node), chunk,
|
extent_arena_get(&chunk->extent), chunk,
|
||||||
ptr);
|
ptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -52,13 +52,12 @@ chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
|
|||||||
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
|
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
|
||||||
const chunk_hooks_t *chunk_hooks);
|
const chunk_hooks_t *chunk_hooks);
|
||||||
|
|
||||||
bool chunk_register(tsdn_t *tsdn, const void *chunk,
|
bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent);
|
||||||
const extent_node_t *node);
|
void chunk_deregister(const void *chunk, const extent_t *extent);
|
||||||
void chunk_deregister(const void *chunk, const extent_node_t *node);
|
|
||||||
void *chunk_alloc_base(size_t size);
|
void *chunk_alloc_base(size_t size);
|
||||||
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, bool dalloc_node);
|
bool *zero, bool dalloc_extent);
|
||||||
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, bool *commit);
|
bool *zero, bool *commit);
|
||||||
@ -80,11 +79,11 @@ void chunk_postfork_child(tsdn_t *tsdn);
|
|||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
extent_node_t *chunk_lookup(const void *chunk, bool dependent);
|
extent_t *chunk_lookup(const void *chunk, bool dependent);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
|
||||||
JEMALLOC_INLINE extent_node_t *
|
JEMALLOC_INLINE extent_t *
|
||||||
chunk_lookup(const void *ptr, bool dependent)
|
chunk_lookup(const void *ptr, bool dependent)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -1,237 +1,236 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
typedef struct extent_node_s extent_node_t;
|
typedef struct extent_s extent_t;
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
/* Tree of extents. Use accessor functions for en_* fields. */
|
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
||||||
struct extent_node_s {
|
struct extent_s {
|
||||||
/* Arena from which this extent came, if any. */
|
/* Arena from which this extent came, if any. */
|
||||||
arena_t *en_arena;
|
arena_t *e_arena;
|
||||||
|
|
||||||
/* Pointer to the extent that this tree node is responsible for. */
|
/* Pointer to the extent that this structure is responsible for. */
|
||||||
void *en_addr;
|
void *e_addr;
|
||||||
|
|
||||||
/* Total region size. */
|
/* Total region size. */
|
||||||
size_t en_size;
|
size_t e_size;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The zeroed flag is used by chunk recycling code to track whether
|
* The zeroed flag is used by chunk recycling code to track whether
|
||||||
* memory is zero-filled.
|
* memory is zero-filled.
|
||||||
*/
|
*/
|
||||||
bool en_zeroed;
|
bool e_zeroed;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* True if physical memory is committed to the extent, whether
|
* True if physical memory is committed to the extent, whether
|
||||||
* explicitly or implicitly as on a system that overcommits and
|
* explicitly or implicitly as on a system that overcommits and
|
||||||
* satisfies physical memory needs on demand via soft page faults.
|
* satisfies physical memory needs on demand via soft page faults.
|
||||||
*/
|
*/
|
||||||
bool en_committed;
|
bool e_committed;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The achunk flag is used to validate that huge allocation lookups
|
* The achunk flag is used to validate that huge allocation lookups
|
||||||
* don't return arena chunks.
|
* don't return arena chunks.
|
||||||
*/
|
*/
|
||||||
bool en_achunk;
|
bool e_achunk;
|
||||||
|
|
||||||
/* Profile counters, used for huge objects. */
|
/* Profile counters, used for huge objects. */
|
||||||
prof_tctx_t *en_prof_tctx;
|
prof_tctx_t *e_prof_tctx;
|
||||||
|
|
||||||
/* Linkage for arena's runs_dirty and chunks_cache rings. */
|
/* Linkage for arena's runs_dirty and chunks_cache rings. */
|
||||||
arena_runs_dirty_link_t rd;
|
arena_runs_dirty_link_t rd;
|
||||||
qr(extent_node_t) cc_link;
|
qr(extent_t) cc_link;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
/* Linkage for the size/address-ordered tree. */
|
/* Linkage for the size/address-ordered tree. */
|
||||||
rb_node(extent_node_t) szad_link;
|
rb_node(extent_t) szad_link;
|
||||||
|
|
||||||
/* Linkage for arena's achunks, huge, and node_cache lists. */
|
/* Linkage for arena's achunks, huge, and node_cache lists. */
|
||||||
ql_elm(extent_node_t) ql_link;
|
ql_elm(extent_t) ql_link;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Linkage for the address-ordered tree. */
|
/* Linkage for the address-ordered tree. */
|
||||||
rb_node(extent_node_t) ad_link;
|
rb_node(extent_t) ad_link;
|
||||||
};
|
};
|
||||||
typedef rb_tree(extent_node_t) extent_tree_t;
|
typedef rb_tree(extent_t) extent_tree_t;
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
|
rb_proto(, extent_tree_szad_, extent_tree_t, extent_t)
|
||||||
|
|
||||||
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
rb_proto(, extent_tree_ad_, extent_tree_t, extent_t)
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
arena_t *extent_node_arena_get(const extent_node_t *node);
|
arena_t *extent_arena_get(const extent_t *extent);
|
||||||
void *extent_node_addr_get(const extent_node_t *node);
|
void *extent_addr_get(const extent_t *extent);
|
||||||
size_t extent_node_size_get(const extent_node_t *node);
|
size_t extent_size_get(const extent_t *extent);
|
||||||
bool extent_node_zeroed_get(const extent_node_t *node);
|
bool extent_zeroed_get(const extent_t *extent);
|
||||||
bool extent_node_committed_get(const extent_node_t *node);
|
bool extent_committed_get(const extent_t *extent);
|
||||||
bool extent_node_achunk_get(const extent_node_t *node);
|
bool extent_achunk_get(const extent_t *extent);
|
||||||
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
|
prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
|
||||||
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
|
void extent_arena_set(extent_t *extent, arena_t *arena);
|
||||||
void extent_node_addr_set(extent_node_t *node, void *addr);
|
void extent_addr_set(extent_t *extent, void *addr);
|
||||||
void extent_node_size_set(extent_node_t *node, size_t size);
|
void extent_size_set(extent_t *extent, size_t size);
|
||||||
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
|
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
||||||
void extent_node_committed_set(extent_node_t *node, bool committed);
|
void extent_committed_set(extent_t *extent, bool committed);
|
||||||
void extent_node_achunk_set(extent_node_t *node, bool achunk);
|
void extent_achunk_set(extent_t *extent, bool achunk);
|
||||||
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
|
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
||||||
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
|
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
||||||
size_t size, bool zeroed, bool committed);
|
size_t size, bool zeroed, bool committed);
|
||||||
void extent_node_dirty_linkage_init(extent_node_t *node);
|
void extent_dirty_linkage_init(extent_t *extent);
|
||||||
void extent_node_dirty_insert(extent_node_t *node,
|
void extent_dirty_insert(extent_t *extent,
|
||||||
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
|
arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty);
|
||||||
void extent_node_dirty_remove(extent_node_t *node);
|
void extent_dirty_remove(extent_t *extent);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
||||||
JEMALLOC_INLINE arena_t *
|
JEMALLOC_INLINE arena_t *
|
||||||
extent_node_arena_get(const extent_node_t *node)
|
extent_arena_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (node->en_arena);
|
return (extent->e_arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
JEMALLOC_INLINE void *
|
||||||
extent_node_addr_get(const extent_node_t *node)
|
extent_addr_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (node->en_addr);
|
return (extent->e_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
JEMALLOC_INLINE size_t
|
||||||
extent_node_size_get(const extent_node_t *node)
|
extent_size_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (node->en_size);
|
return (extent->e_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
extent_node_zeroed_get(const extent_node_t *node)
|
extent_zeroed_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (node->en_zeroed);
|
return (extent->e_zeroed);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
extent_node_committed_get(const extent_node_t *node)
|
extent_committed_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(!node->en_achunk);
|
assert(!extent->e_achunk);
|
||||||
return (node->en_committed);
|
return (extent->e_committed);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
extent_node_achunk_get(const extent_node_t *node)
|
extent_achunk_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (node->en_achunk);
|
return (extent->e_achunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE prof_tctx_t *
|
JEMALLOC_INLINE prof_tctx_t *
|
||||||
extent_node_prof_tctx_get(const extent_node_t *node)
|
extent_prof_tctx_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (node->en_prof_tctx);
|
return (extent->e_prof_tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_arena_set(extent_node_t *node, arena_t *arena)
|
extent_arena_set(extent_t *extent, arena_t *arena)
|
||||||
{
|
{
|
||||||
|
|
||||||
node->en_arena = arena;
|
extent->e_arena = arena;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_addr_set(extent_node_t *node, void *addr)
|
extent_addr_set(extent_t *extent, void *addr)
|
||||||
{
|
{
|
||||||
|
|
||||||
node->en_addr = addr;
|
extent->e_addr = addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_size_set(extent_node_t *node, size_t size)
|
extent_size_set(extent_t *extent, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
node->en_size = size;
|
extent->e_size = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
|
extent_zeroed_set(extent_t *extent, bool zeroed)
|
||||||
{
|
{
|
||||||
|
|
||||||
node->en_zeroed = zeroed;
|
extent->e_zeroed = zeroed;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_committed_set(extent_node_t *node, bool committed)
|
extent_committed_set(extent_t *extent, bool committed)
|
||||||
{
|
{
|
||||||
|
|
||||||
node->en_committed = committed;
|
extent->e_committed = committed;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_achunk_set(extent_node_t *node, bool achunk)
|
extent_achunk_set(extent_t *extent, bool achunk)
|
||||||
{
|
{
|
||||||
|
|
||||||
node->en_achunk = achunk;
|
extent->e_achunk = achunk;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
|
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
node->en_prof_tctx = tctx;
|
extent->e_prof_tctx = tctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
|
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||||
bool zeroed, bool committed)
|
bool zeroed, bool committed)
|
||||||
{
|
{
|
||||||
|
|
||||||
extent_node_arena_set(node, arena);
|
extent_arena_set(extent, arena);
|
||||||
extent_node_addr_set(node, addr);
|
extent_addr_set(extent, addr);
|
||||||
extent_node_size_set(node, size);
|
extent_size_set(extent, size);
|
||||||
extent_node_zeroed_set(node, zeroed);
|
extent_zeroed_set(extent, zeroed);
|
||||||
extent_node_committed_set(node, committed);
|
extent_committed_set(extent, committed);
|
||||||
extent_node_achunk_set(node, false);
|
extent_achunk_set(extent, false);
|
||||||
if (config_prof)
|
if (config_prof)
|
||||||
extent_node_prof_tctx_set(node, NULL);
|
extent_prof_tctx_set(extent, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_dirty_linkage_init(extent_node_t *node)
|
extent_dirty_linkage_init(extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
qr_new(&node->rd, rd_link);
|
qr_new(&extent->rd, rd_link);
|
||||||
qr_new(node, cc_link);
|
qr_new(extent, cc_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_dirty_insert(extent_node_t *node,
|
extent_dirty_insert(extent_t *extent,
|
||||||
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
|
arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty)
|
||||||
{
|
{
|
||||||
|
|
||||||
qr_meld(runs_dirty, &node->rd, rd_link);
|
qr_meld(runs_dirty, &extent->rd, rd_link);
|
||||||
qr_meld(chunks_dirty, node, cc_link);
|
qr_meld(chunks_dirty, extent, cc_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_dirty_remove(extent_node_t *node)
|
extent_dirty_remove(extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
qr_remove(&node->rd, rd_link);
|
qr_remove(&extent->rd, rd_link);
|
||||||
qr_remove(node, cc_link);
|
qr_remove(extent, cc_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
|
@ -966,6 +966,7 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
|
|||||||
#include "jemalloc/internal/hash.h"
|
#include "jemalloc/internal/hash.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
|
extent_t *iealloc(const void *ptr);
|
||||||
arena_t *iaalloc(const void *ptr);
|
arena_t *iaalloc(const void *ptr);
|
||||||
size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
|
size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
|
||||||
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
|
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
|
||||||
@ -995,6 +996,13 @@ bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||||
|
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||||
|
iealloc(const void *ptr)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (chunk_lookup(ptr, true));
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||||
iaalloc(const void *ptr)
|
iaalloc(const void *ptr)
|
||||||
{
|
{
|
||||||
@ -1086,15 +1094,15 @@ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
|
|||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
|
ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
|
|
||||||
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
|
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
|
||||||
node = chunk_lookup(ptr, false);
|
extent = chunk_lookup(ptr, false);
|
||||||
if (node == NULL)
|
if (extent == NULL)
|
||||||
return (0);
|
return (0);
|
||||||
/* Only arena chunks should be looked up via interior pointers. */
|
/* Only arena chunks should be looked up via interior pointers. */
|
||||||
assert(extent_node_addr_get(node) == ptr ||
|
assert(extent_addr_get(extent) == ptr ||
|
||||||
extent_node_achunk_get(node));
|
extent_achunk_get(extent));
|
||||||
|
|
||||||
return (isalloc(tsdn, ptr, demote));
|
return (isalloc(tsdn, ptr, demote));
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,8 @@ arena_decay_time_get
|
|||||||
arena_decay_time_set
|
arena_decay_time_set
|
||||||
arena_dss_prec_get
|
arena_dss_prec_get
|
||||||
arena_dss_prec_set
|
arena_dss_prec_set
|
||||||
|
arena_extent_alloc
|
||||||
|
arena_extent_dalloc
|
||||||
arena_get
|
arena_get
|
||||||
arena_ichoose
|
arena_ichoose
|
||||||
arena_init
|
arena_init
|
||||||
@ -78,8 +80,6 @@ arena_miscelm_get_mutable
|
|||||||
arena_miscelm_to_pageind
|
arena_miscelm_to_pageind
|
||||||
arena_miscelm_to_rpages
|
arena_miscelm_to_rpages
|
||||||
arena_new
|
arena_new
|
||||||
arena_node_alloc
|
|
||||||
arena_node_dalloc
|
|
||||||
arena_nthreads_dec
|
arena_nthreads_dec
|
||||||
arena_nthreads_get
|
arena_nthreads_get
|
||||||
arena_nthreads_inc
|
arena_nthreads_inc
|
||||||
@ -204,24 +204,22 @@ ctl_postfork_parent
|
|||||||
ctl_prefork
|
ctl_prefork
|
||||||
decay_ticker_get
|
decay_ticker_get
|
||||||
dss_prec_names
|
dss_prec_names
|
||||||
extent_node_achunk_get
|
extent_achunk_get
|
||||||
extent_node_achunk_set
|
extent_achunk_set
|
||||||
extent_node_addr_get
|
extent_addr_get
|
||||||
extent_node_addr_set
|
extent_addr_set
|
||||||
extent_node_arena_get
|
extent_arena_get
|
||||||
extent_node_arena_set
|
extent_arena_set
|
||||||
extent_node_committed_get
|
extent_committed_get
|
||||||
extent_node_committed_set
|
extent_committed_set
|
||||||
extent_node_dirty_insert
|
extent_dirty_insert
|
||||||
extent_node_dirty_linkage_init
|
extent_dirty_linkage_init
|
||||||
extent_node_dirty_remove
|
extent_dirty_remove
|
||||||
extent_node_init
|
extent_init
|
||||||
extent_node_prof_tctx_get
|
extent_prof_tctx_get
|
||||||
extent_node_prof_tctx_set
|
extent_prof_tctx_set
|
||||||
extent_node_size_get
|
extent_size_get
|
||||||
extent_node_size_set
|
extent_size_set
|
||||||
extent_node_zeroed_get
|
|
||||||
extent_node_zeroed_set
|
|
||||||
extent_tree_ad_destroy
|
extent_tree_ad_destroy
|
||||||
extent_tree_ad_destroy_recurse
|
extent_tree_ad_destroy_recurse
|
||||||
extent_tree_ad_empty
|
extent_tree_ad_empty
|
||||||
@ -260,6 +258,8 @@ extent_tree_szad_reverse_iter
|
|||||||
extent_tree_szad_reverse_iter_recurse
|
extent_tree_szad_reverse_iter_recurse
|
||||||
extent_tree_szad_reverse_iter_start
|
extent_tree_szad_reverse_iter_start
|
||||||
extent_tree_szad_search
|
extent_tree_szad_search
|
||||||
|
extent_zeroed_get
|
||||||
|
extent_zeroed_set
|
||||||
ffs_llu
|
ffs_llu
|
||||||
ffs_lu
|
ffs_lu
|
||||||
ffs_u
|
ffs_u
|
||||||
@ -294,6 +294,7 @@ iallocztm
|
|||||||
iarena_cleanup
|
iarena_cleanup
|
||||||
idalloc
|
idalloc
|
||||||
idalloctm
|
idalloctm
|
||||||
|
iealloc
|
||||||
index2size
|
index2size
|
||||||
index2size_compute
|
index2size_compute
|
||||||
index2size_lookup
|
index2size_lookup
|
||||||
|
@ -39,7 +39,7 @@ struct rtree_node_elm_s {
|
|||||||
union {
|
union {
|
||||||
void *pun;
|
void *pun;
|
||||||
rtree_node_elm_t *child;
|
rtree_node_elm_t *child;
|
||||||
extent_node_t *val;
|
extent_t *val;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -116,17 +116,17 @@ rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
|
|||||||
bool dependent);
|
bool dependent);
|
||||||
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
||||||
unsigned level, bool dependent);
|
unsigned level, bool dependent);
|
||||||
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
extent_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
||||||
bool dependent);
|
bool dependent);
|
||||||
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
|
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
|
||||||
const extent_node_t *val);
|
const extent_t *val);
|
||||||
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
|
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
|
||||||
bool dependent);
|
bool dependent);
|
||||||
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
|
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
|
||||||
bool dependent);
|
bool dependent);
|
||||||
|
|
||||||
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
|
extent_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
|
||||||
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
|
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_t *val);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
||||||
@ -186,7 +186,7 @@ rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
|
|||||||
return (child);
|
return (child);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE extent_node_t *
|
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||||
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
|
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -209,7 +209,7 @@ rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
|
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_t *val)
|
||||||
{
|
{
|
||||||
|
|
||||||
atomic_write_p(&elm->pun, val);
|
atomic_write_p(&elm->pun, val);
|
||||||
@ -240,7 +240,7 @@ rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
|
|||||||
return (subtree);
|
return (subtree);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE extent_node_t *
|
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||||
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
|
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
|
||||||
{
|
{
|
||||||
uintptr_t subkey;
|
uintptr_t subkey;
|
||||||
@ -332,7 +332,7 @@ rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
|
rtree_set(rtree_t *rtree, uintptr_t key, const extent_t *val)
|
||||||
{
|
{
|
||||||
uintptr_t subkey;
|
uintptr_t subkey;
|
||||||
unsigned i, start_level;
|
unsigned i, start_level;
|
||||||
|
@ -24,7 +24,7 @@ typedef int witness_comp_t (const witness_t *, const witness_t *);
|
|||||||
|
|
||||||
#define WITNESS_RANK_ARENA 8U
|
#define WITNESS_RANK_ARENA 8U
|
||||||
#define WITNESS_RANK_ARENA_CHUNKS 9U
|
#define WITNESS_RANK_ARENA_CHUNKS 9U
|
||||||
#define WITNESS_RANK_ARENA_NODE_CACHE 10
|
#define WITNESS_RANK_ARENA_EXTENT_CACHE 10
|
||||||
|
|
||||||
#define WITNESS_RANK_BASE 11U
|
#define WITNESS_RANK_BASE 11U
|
||||||
|
|
||||||
|
175
src/arena.c
175
src/arena.c
@ -214,32 +214,32 @@ arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
arena_chunk_dirty_npages(const extent_node_t *node)
|
arena_chunk_dirty_npages(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (extent_node_size_get(node) >> LG_PAGE);
|
return (extent_size_get(extent) >> LG_PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
|
arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (cache) {
|
if (cache) {
|
||||||
extent_node_dirty_linkage_init(node);
|
extent_dirty_linkage_init(extent);
|
||||||
extent_node_dirty_insert(node, &arena->runs_dirty,
|
extent_dirty_insert(extent, &arena->runs_dirty,
|
||||||
&arena->chunks_cache);
|
&arena->chunks_cache);
|
||||||
arena->ndirty += arena_chunk_dirty_npages(node);
|
arena->ndirty += arena_chunk_dirty_npages(extent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
|
arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent, bool dirty)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (dirty) {
|
if (dirty) {
|
||||||
extent_node_dirty_remove(node);
|
extent_dirty_remove(extent);
|
||||||
assert(arena->ndirty >= arena_chunk_dirty_npages(node));
|
assert(arena->ndirty >= arena_chunk_dirty_npages(extent));
|
||||||
arena->ndirty -= arena_chunk_dirty_npages(node);
|
arena->ndirty -= arena_chunk_dirty_npages(extent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -516,14 +516,14 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
|||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The extent node notion of "committed" doesn't directly apply to
|
* The extent notion of "committed" doesn't directly apply to arena
|
||||||
* arena chunks. Arbitrarily mark them as committed. The commit state
|
* chunks. Arbitrarily mark them as committed. The commit state of
|
||||||
* of runs is tracked individually, and upon chunk deallocation the
|
* runs is tracked individually, and upon chunk deallocation the entire
|
||||||
* entire chunk is in a consistent commit state.
|
* chunk is in a consistent commit state.
|
||||||
*/
|
*/
|
||||||
extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
|
extent_init(&chunk->extent, arena, chunk, chunksize, zero, true);
|
||||||
extent_node_achunk_set(&chunk->node, true);
|
extent_achunk_set(&chunk->extent, true);
|
||||||
return (chunk_register(tsdn, chunk, &chunk->node));
|
return (chunk_register(tsdn, chunk, &chunk->extent));
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static arena_chunk_t *
|
||||||
@ -648,8 +648,8 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
ql_elm_new(&chunk->node, ql_link);
|
ql_elm_new(&chunk->extent, ql_link);
|
||||||
ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
|
ql_tail_insert(&arena->achunks, &chunk->extent, ql_link);
|
||||||
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
|
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
|
||||||
|
|
||||||
return (chunk);
|
return (chunk);
|
||||||
@ -661,7 +661,7 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
|
|||||||
bool committed;
|
bool committed;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
chunk_deregister(chunk, &chunk->node);
|
chunk_deregister(chunk, &chunk->extent);
|
||||||
|
|
||||||
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
|
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
|
||||||
if (!committed) {
|
if (!committed) {
|
||||||
@ -718,7 +718,7 @@ arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
|
|||||||
/* Remove run from runs_avail, so that the arena does not use it. */
|
/* Remove run from runs_avail, so that the arena does not use it. */
|
||||||
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
|
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
|
||||||
|
|
||||||
ql_remove(&arena->achunks, &chunk->node, ql_link);
|
ql_remove(&arena->achunks, &chunk->extent, ql_link);
|
||||||
spare = arena->spare;
|
spare = arena->spare;
|
||||||
arena->spare = chunk;
|
arena->spare = chunk;
|
||||||
if (spare != NULL)
|
if (spare != NULL)
|
||||||
@ -805,30 +805,30 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
|
|||||||
arena_huge_malloc_stats_update_undo(arena, usize);
|
arena_huge_malloc_stats_update_undo(arena, usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_node_t *
|
extent_t *
|
||||||
arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
|
arena_extent_alloc(tsdn_t *tsdn, arena_t *arena)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
|
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
|
||||||
node = ql_last(&arena->node_cache, ql_link);
|
extent = ql_last(&arena->extent_cache, ql_link);
|
||||||
if (node == NULL) {
|
if (extent == NULL) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
|
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
|
||||||
return (base_alloc(tsdn, sizeof(extent_node_t)));
|
return (base_alloc(tsdn, sizeof(extent_t)));
|
||||||
}
|
}
|
||||||
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
|
ql_tail_remove(&arena->extent_cache, extent_t, ql_link);
|
||||||
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
|
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
|
||||||
return (node);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
|
arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
|
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
|
||||||
ql_elm_new(node, ql_link);
|
ql_elm_new(extent, ql_link);
|
||||||
ql_tail_insert(&arena->node_cache, node, ql_link);
|
ql_tail_insert(&arena->extent_cache, extent, ql_link);
|
||||||
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
|
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
@ -1424,7 +1424,7 @@ arena_dirty_count(arena_t *arena)
|
|||||||
{
|
{
|
||||||
size_t ndirty = 0;
|
size_t ndirty = 0;
|
||||||
arena_runs_dirty_link_t *rdelm;
|
arena_runs_dirty_link_t *rdelm;
|
||||||
extent_node_t *chunkselm;
|
extent_t *chunkselm;
|
||||||
|
|
||||||
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
|
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
|
||||||
chunkselm = qr_next(&arena->chunks_cache, cc_link);
|
chunkselm = qr_next(&arena->chunks_cache, cc_link);
|
||||||
@ -1432,7 +1432,7 @@ arena_dirty_count(arena_t *arena)
|
|||||||
size_t npages;
|
size_t npages;
|
||||||
|
|
||||||
if (rdelm == &chunkselm->rd) {
|
if (rdelm == &chunkselm->rd) {
|
||||||
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
|
npages = extent_size_get(chunkselm) >> LG_PAGE;
|
||||||
chunkselm = qr_next(chunkselm, cc_link);
|
chunkselm = qr_next(chunkselm, cc_link);
|
||||||
} else {
|
} else {
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||||
@ -1456,10 +1456,10 @@ arena_dirty_count(arena_t *arena)
|
|||||||
static size_t
|
static size_t
|
||||||
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
|
size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
|
||||||
extent_node_t *purge_chunks_sentinel)
|
extent_t *purge_chunks_sentinel)
|
||||||
{
|
{
|
||||||
arena_runs_dirty_link_t *rdelm, *rdelm_next;
|
arena_runs_dirty_link_t *rdelm, *rdelm_next;
|
||||||
extent_node_t *chunkselm;
|
extent_t *chunkselm;
|
||||||
size_t nstashed = 0;
|
size_t nstashed = 0;
|
||||||
|
|
||||||
/* Stash runs/chunks according to ndirty_limit. */
|
/* Stash runs/chunks according to ndirty_limit. */
|
||||||
@ -1470,11 +1470,11 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
rdelm_next = qr_next(rdelm, rd_link);
|
rdelm_next = qr_next(rdelm, rd_link);
|
||||||
|
|
||||||
if (rdelm == &chunkselm->rd) {
|
if (rdelm == &chunkselm->rd) {
|
||||||
extent_node_t *chunkselm_next;
|
extent_t *chunkselm_next;
|
||||||
bool zero;
|
bool zero;
|
||||||
UNUSED void *chunk;
|
UNUSED void *chunk;
|
||||||
|
|
||||||
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
|
npages = extent_size_get(chunkselm) >> LG_PAGE;
|
||||||
if (opt_purge == purge_mode_decay && arena->ndirty -
|
if (opt_purge == purge_mode_decay && arena->ndirty -
|
||||||
(nstashed + npages) < ndirty_limit)
|
(nstashed + npages) < ndirty_limit)
|
||||||
break;
|
break;
|
||||||
@ -1482,18 +1482,18 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
chunkselm_next = qr_next(chunkselm, cc_link);
|
chunkselm_next = qr_next(chunkselm, cc_link);
|
||||||
/*
|
/*
|
||||||
* Allocate. chunkselm remains valid due to the
|
* Allocate. chunkselm remains valid due to the
|
||||||
* dalloc_node=false argument to chunk_alloc_cache().
|
* dalloc_extent=false argument to chunk_alloc_cache().
|
||||||
*/
|
*/
|
||||||
zero = false;
|
zero = false;
|
||||||
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
|
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
|
||||||
extent_node_addr_get(chunkselm),
|
extent_addr_get(chunkselm),
|
||||||
extent_node_size_get(chunkselm), chunksize, &zero,
|
extent_size_get(chunkselm), chunksize, &zero,
|
||||||
false);
|
false);
|
||||||
assert(chunk == extent_node_addr_get(chunkselm));
|
assert(chunk == extent_addr_get(chunkselm));
|
||||||
assert(zero == extent_node_zeroed_get(chunkselm));
|
assert(zero == extent_zeroed_get(chunkselm));
|
||||||
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
|
extent_dirty_insert(chunkselm, purge_runs_sentinel,
|
||||||
purge_chunks_sentinel);
|
purge_chunks_sentinel);
|
||||||
assert(npages == (extent_node_size_get(chunkselm) >>
|
assert(npages == (extent_size_get(chunkselm) >>
|
||||||
LG_PAGE));
|
LG_PAGE));
|
||||||
chunkselm = chunkselm_next;
|
chunkselm = chunkselm_next;
|
||||||
} else {
|
} else {
|
||||||
@ -1546,11 +1546,11 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
static size_t
|
static size_t
|
||||||
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
arena_runs_dirty_link_t *purge_runs_sentinel,
|
arena_runs_dirty_link_t *purge_runs_sentinel,
|
||||||
extent_node_t *purge_chunks_sentinel)
|
extent_t *purge_chunks_sentinel)
|
||||||
{
|
{
|
||||||
size_t npurged, nmadvise;
|
size_t npurged, nmadvise;
|
||||||
arena_runs_dirty_link_t *rdelm;
|
arena_runs_dirty_link_t *rdelm;
|
||||||
extent_node_t *chunkselm;
|
extent_t *chunkselm;
|
||||||
|
|
||||||
if (config_stats)
|
if (config_stats)
|
||||||
nmadvise = 0;
|
nmadvise = 0;
|
||||||
@ -1571,7 +1571,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
* decommitted, or purged, depending on chunk
|
* decommitted, or purged, depending on chunk
|
||||||
* deallocation policy.
|
* deallocation policy.
|
||||||
*/
|
*/
|
||||||
size_t size = extent_node_size_get(chunkselm);
|
size_t size = extent_size_get(chunkselm);
|
||||||
npages = size >> LG_PAGE;
|
npages = size >> LG_PAGE;
|
||||||
chunkselm = qr_next(chunkselm, cc_link);
|
chunkselm = qr_next(chunkselm, cc_link);
|
||||||
} else {
|
} else {
|
||||||
@ -1639,10 +1639,10 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
static void
|
static void
|
||||||
arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
arena_runs_dirty_link_t *purge_runs_sentinel,
|
arena_runs_dirty_link_t *purge_runs_sentinel,
|
||||||
extent_node_t *purge_chunks_sentinel)
|
extent_t *purge_chunks_sentinel)
|
||||||
{
|
{
|
||||||
arena_runs_dirty_link_t *rdelm, *rdelm_next;
|
arena_runs_dirty_link_t *rdelm, *rdelm_next;
|
||||||
extent_node_t *chunkselm;
|
extent_t *chunkselm;
|
||||||
|
|
||||||
/* Deallocate chunks/runs. */
|
/* Deallocate chunks/runs. */
|
||||||
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
|
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
|
||||||
@ -1650,14 +1650,13 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
|
rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
|
||||||
rdelm_next = qr_next(rdelm, rd_link);
|
rdelm_next = qr_next(rdelm, rd_link);
|
||||||
if (rdelm == &chunkselm->rd) {
|
if (rdelm == &chunkselm->rd) {
|
||||||
extent_node_t *chunkselm_next = qr_next(chunkselm,
|
extent_t *chunkselm_next = qr_next(chunkselm, cc_link);
|
||||||
cc_link);
|
void *addr = extent_addr_get(chunkselm);
|
||||||
void *addr = extent_node_addr_get(chunkselm);
|
size_t size = extent_size_get(chunkselm);
|
||||||
size_t size = extent_node_size_get(chunkselm);
|
bool zeroed = extent_zeroed_get(chunkselm);
|
||||||
bool zeroed = extent_node_zeroed_get(chunkselm);
|
bool committed = extent_committed_get(chunkselm);
|
||||||
bool committed = extent_node_committed_get(chunkselm);
|
extent_dirty_remove(chunkselm);
|
||||||
extent_node_dirty_remove(chunkselm);
|
arena_extent_dalloc(tsdn, arena, chunkselm);
|
||||||
arena_node_dalloc(tsdn, arena, chunkselm);
|
|
||||||
chunkselm = chunkselm_next;
|
chunkselm = chunkselm_next;
|
||||||
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
|
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
|
||||||
size, zeroed, committed);
|
size, zeroed, committed);
|
||||||
@ -1692,7 +1691,7 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
|
|||||||
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
|
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
|
||||||
size_t npurge, npurged;
|
size_t npurge, npurged;
|
||||||
arena_runs_dirty_link_t purge_runs_sentinel;
|
arena_runs_dirty_link_t purge_runs_sentinel;
|
||||||
extent_node_t purge_chunks_sentinel;
|
extent_t purge_chunks_sentinel;
|
||||||
|
|
||||||
arena->purging = true;
|
arena->purging = true;
|
||||||
|
|
||||||
@ -1708,7 +1707,7 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
|
|||||||
arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
|
arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
|
||||||
|
|
||||||
qr_new(&purge_runs_sentinel, rd_link);
|
qr_new(&purge_runs_sentinel, rd_link);
|
||||||
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
|
extent_dirty_linkage_init(&purge_chunks_sentinel);
|
||||||
|
|
||||||
npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
|
npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
|
||||||
&purge_runs_sentinel, &purge_chunks_sentinel);
|
&purge_runs_sentinel, &purge_chunks_sentinel);
|
||||||
@ -1783,7 +1782,7 @@ void
|
|||||||
arena_reset(tsd_t *tsd, arena_t *arena)
|
arena_reset(tsd_t *tsd, arena_t *arena)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Locking in this function is unintuitive. The caller guarantees that
|
* Locking in this function is unintuitive. The caller guarantees that
|
||||||
@ -1801,9 +1800,9 @@ arena_reset(tsd_t *tsd, arena_t *arena)
|
|||||||
|
|
||||||
/* Remove large allocations from prof sample set. */
|
/* Remove large allocations from prof sample set. */
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
ql_foreach(node, &arena->achunks, ql_link) {
|
ql_foreach(extent, &arena->achunks, ql_link) {
|
||||||
arena_achunk_prof_reset(tsd, arena,
|
arena_achunk_prof_reset(tsd, arena,
|
||||||
extent_node_addr_get(node));
|
extent_addr_get(extent));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1815,9 +1814,9 @@ arena_reset(tsd_t *tsd, arena_t *arena)
|
|||||||
|
|
||||||
/* Huge allocations. */
|
/* Huge allocations. */
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
|
||||||
for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
|
for (extent = ql_last(&arena->huge, ql_link); extent != NULL; extent =
|
||||||
ql_last(&arena->huge, ql_link)) {
|
ql_last(&arena->huge, ql_link)) {
|
||||||
void *ptr = extent_node_addr_get(node);
|
void *ptr = extent_addr_get(extent);
|
||||||
size_t usize;
|
size_t usize;
|
||||||
|
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
|
||||||
@ -1854,18 +1853,18 @@ arena_reset(tsd_t *tsd, arena_t *arena)
|
|||||||
* chains directly correspond.
|
* chains directly correspond.
|
||||||
*/
|
*/
|
||||||
qr_new(&arena->runs_dirty, rd_link);
|
qr_new(&arena->runs_dirty, rd_link);
|
||||||
for (node = qr_next(&arena->chunks_cache, cc_link);
|
for (extent = qr_next(&arena->chunks_cache, cc_link);
|
||||||
node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
|
extent != &arena->chunks_cache; extent = qr_next(extent, cc_link)) {
|
||||||
qr_new(&node->rd, rd_link);
|
qr_new(&extent->rd, rd_link);
|
||||||
qr_meld(&arena->runs_dirty, &node->rd, rd_link);
|
qr_meld(&arena->runs_dirty, &extent->rd, rd_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Arena chunks. */
|
/* Arena chunks. */
|
||||||
for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
|
for (extent = ql_last(&arena->achunks, ql_link); extent != NULL; extent
|
||||||
ql_last(&arena->achunks, ql_link)) {
|
= ql_last(&arena->achunks, ql_link)) {
|
||||||
ql_remove(&arena->achunks, node, ql_link);
|
ql_remove(&arena->achunks, extent, ql_link);
|
||||||
arena_chunk_discard(tsd_tsdn(tsd), arena,
|
arena_chunk_discard(tsd_tsdn(tsd), arena,
|
||||||
extent_node_addr_get(node));
|
extent_addr_get(extent));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Spare. */
|
/* Spare. */
|
||||||
@ -2649,8 +2648,8 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
|
|||||||
if (run == bin->runcur)
|
if (run == bin->runcur)
|
||||||
bin->runcur = NULL;
|
bin->runcur = NULL;
|
||||||
else {
|
else {
|
||||||
szind_t binind = arena_bin_index(extent_node_arena_get(
|
szind_t binind = arena_bin_index(extent_arena_get(
|
||||||
&chunk->node), bin);
|
&chunk->extent), bin);
|
||||||
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3018,7 +3017,7 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
|
|||||||
}
|
}
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
arena = extent_node_arena_get(&chunk->node);
|
arena = extent_arena_get(&chunk->extent);
|
||||||
|
|
||||||
if (oldsize < usize_max) {
|
if (oldsize < usize_max) {
|
||||||
bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
|
bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
|
||||||
@ -3080,7 +3079,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
|
arena_decay_tick(tsdn, extent_arena_get(&chunk->extent));
|
||||||
return (false);
|
return (false);
|
||||||
} else {
|
} else {
|
||||||
return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
|
return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
|
||||||
@ -3404,9 +3403,9 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
|||||||
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
|
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
|
||||||
WITNESS_RANK_ARENA_CHUNKS))
|
WITNESS_RANK_ARENA_CHUNKS))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
ql_new(&arena->node_cache);
|
ql_new(&arena->extent_cache);
|
||||||
if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
|
if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
|
||||||
WITNESS_RANK_ARENA_NODE_CACHE))
|
WITNESS_RANK_ARENA_EXTENT_CACHE))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
arena->chunk_hooks = chunk_hooks_default;
|
arena->chunk_hooks = chunk_hooks_default;
|
||||||
@ -3492,7 +3491,7 @@ void
|
|||||||
arena_prefork2(tsdn_t *tsdn, arena_t *arena)
|
arena_prefork2(tsdn_t *tsdn, arena_t *arena)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
|
malloc_mutex_prefork(tsdn, &arena->extent_cache_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -3513,7 +3512,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
|
|||||||
malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
|
||||||
for (i = 0; i < NBINS; i++)
|
for (i = 0; i < NBINS; i++)
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
|
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->lock);
|
malloc_mutex_postfork_parent(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
@ -3526,7 +3525,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
|
|||||||
malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
|
||||||
for (i = 0; i < NBINS; i++)
|
for (i = 0; i < NBINS; i++)
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
|
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->lock);
|
malloc_mutex_postfork_child(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
|
74
src/base.c
74
src/base.c
@ -6,59 +6,59 @@
|
|||||||
|
|
||||||
static malloc_mutex_t base_mtx;
|
static malloc_mutex_t base_mtx;
|
||||||
static extent_tree_t base_avail_szad;
|
static extent_tree_t base_avail_szad;
|
||||||
static extent_node_t *base_nodes;
|
static extent_t *base_extents;
|
||||||
static size_t base_allocated;
|
static size_t base_allocated;
|
||||||
static size_t base_resident;
|
static size_t base_resident;
|
||||||
static size_t base_mapped;
|
static size_t base_mapped;
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
static extent_node_t *
|
static extent_t *
|
||||||
base_node_try_alloc(tsdn_t *tsdn)
|
base_extent_try_alloc(tsdn_t *tsdn)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
|
|
||||||
malloc_mutex_assert_owner(tsdn, &base_mtx);
|
malloc_mutex_assert_owner(tsdn, &base_mtx);
|
||||||
|
|
||||||
if (base_nodes == NULL)
|
if (base_extents == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
node = base_nodes;
|
extent = base_extents;
|
||||||
base_nodes = *(extent_node_t **)node;
|
base_extents = *(extent_t **)extent;
|
||||||
return (node);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
|
base_extent_dalloc(tsdn_t *tsdn, extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_assert_owner(tsdn, &base_mtx);
|
malloc_mutex_assert_owner(tsdn, &base_mtx);
|
||||||
|
|
||||||
*(extent_node_t **)node = base_nodes;
|
*(extent_t **)extent = base_extents;
|
||||||
base_nodes = node;
|
base_extents = extent;
|
||||||
}
|
}
|
||||||
|
|
||||||
static extent_node_t *
|
static extent_t *
|
||||||
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
|
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
size_t csize, nsize;
|
size_t csize, nsize;
|
||||||
void *addr;
|
void *addr;
|
||||||
|
|
||||||
malloc_mutex_assert_owner(tsdn, &base_mtx);
|
malloc_mutex_assert_owner(tsdn, &base_mtx);
|
||||||
assert(minsize != 0);
|
assert(minsize != 0);
|
||||||
node = base_node_try_alloc(tsdn);
|
extent = base_extent_try_alloc(tsdn);
|
||||||
/* Allocate enough space to also carve a node out if necessary. */
|
/* Allocate enough space to also carve an extent out if necessary. */
|
||||||
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
|
nsize = (extent == NULL) ? CACHELINE_CEILING(sizeof(extent_t)) : 0;
|
||||||
csize = CHUNK_CEILING(minsize + nsize);
|
csize = CHUNK_CEILING(minsize + nsize);
|
||||||
addr = chunk_alloc_base(csize);
|
addr = chunk_alloc_base(csize);
|
||||||
if (addr == NULL) {
|
if (addr == NULL) {
|
||||||
if (node != NULL)
|
if (extent != NULL)
|
||||||
base_node_dalloc(tsdn, node);
|
base_extent_dalloc(tsdn, extent);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
base_mapped += csize;
|
base_mapped += csize;
|
||||||
if (node == NULL) {
|
if (extent == NULL) {
|
||||||
node = (extent_node_t *)addr;
|
extent = (extent_t *)addr;
|
||||||
addr = (void *)((uintptr_t)addr + nsize);
|
addr = (void *)((uintptr_t)addr + nsize);
|
||||||
csize -= nsize;
|
csize -= nsize;
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -66,8 +66,8 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
|
|||||||
base_resident += PAGE_CEILING(nsize);
|
base_resident += PAGE_CEILING(nsize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extent_node_init(node, NULL, addr, csize, true, true);
|
extent_init(extent, NULL, addr, csize, true, true);
|
||||||
return (node);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -80,8 +80,8 @@ base_alloc(tsdn_t *tsdn, size_t size)
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t csize, usize;
|
size_t csize, usize;
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
extent_node_t key;
|
extent_t key;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Round size up to nearest multiple of the cacheline size, so that
|
* Round size up to nearest multiple of the cacheline size, so that
|
||||||
@ -90,28 +90,28 @@ base_alloc(tsdn_t *tsdn, size_t size)
|
|||||||
csize = CACHELINE_CEILING(size);
|
csize = CACHELINE_CEILING(size);
|
||||||
|
|
||||||
usize = s2u(csize);
|
usize = s2u(csize);
|
||||||
extent_node_init(&key, NULL, NULL, usize, false, false);
|
extent_init(&key, NULL, NULL, usize, false, false);
|
||||||
malloc_mutex_lock(tsdn, &base_mtx);
|
malloc_mutex_lock(tsdn, &base_mtx);
|
||||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
extent = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||||
if (node != NULL) {
|
if (extent != NULL) {
|
||||||
/* Use existing space. */
|
/* Use existing space. */
|
||||||
extent_tree_szad_remove(&base_avail_szad, node);
|
extent_tree_szad_remove(&base_avail_szad, extent);
|
||||||
} else {
|
} else {
|
||||||
/* Try to allocate more space. */
|
/* Try to allocate more space. */
|
||||||
node = base_chunk_alloc(tsdn, csize);
|
extent = base_chunk_alloc(tsdn, csize);
|
||||||
}
|
}
|
||||||
if (node == NULL) {
|
if (extent == NULL) {
|
||||||
ret = NULL;
|
ret = NULL;
|
||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = extent_node_addr_get(node);
|
ret = extent_addr_get(extent);
|
||||||
if (extent_node_size_get(node) > csize) {
|
if (extent_size_get(extent) > csize) {
|
||||||
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
|
extent_addr_set(extent, (void *)((uintptr_t)ret + csize));
|
||||||
extent_node_size_set(node, extent_node_size_get(node) - csize);
|
extent_size_set(extent, extent_size_get(extent) - csize);
|
||||||
extent_tree_szad_insert(&base_avail_szad, node);
|
extent_tree_szad_insert(&base_avail_szad, extent);
|
||||||
} else
|
} else
|
||||||
base_node_dalloc(tsdn, node);
|
base_extent_dalloc(tsdn, extent);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
base_allocated += csize;
|
base_allocated += csize;
|
||||||
/*
|
/*
|
||||||
@ -147,7 +147,7 @@ base_boot(void)
|
|||||||
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
|
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
|
||||||
return (true);
|
return (true);
|
||||||
extent_tree_szad_new(&base_avail_szad);
|
extent_tree_szad_new(&base_avail_szad);
|
||||||
base_nodes = NULL;
|
base_extents = NULL;
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
186
src/chunk.c
186
src/chunk.c
@ -141,15 +141,15 @@ chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
|
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(extent_node_addr_get(node) == chunk);
|
assert(extent_addr_get(extent) == chunk);
|
||||||
|
|
||||||
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
|
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, extent))
|
||||||
return (true);
|
return (true);
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
size_t size = extent_node_size_get(node);
|
size_t size = extent_size_get(extent);
|
||||||
size_t nadd = (size == 0) ? 1 : size / chunksize;
|
size_t nadd = (size == 0) ? 1 : size / chunksize;
|
||||||
size_t cur = atomic_add_z(&curchunks, nadd);
|
size_t cur = atomic_add_z(&curchunks, nadd);
|
||||||
size_t high = atomic_read_z(&highchunks);
|
size_t high = atomic_read_z(&highchunks);
|
||||||
@ -168,14 +168,14 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_deregister(const void *chunk, const extent_node_t *node)
|
chunk_deregister(const void *chunk, const extent_t *extent)
|
||||||
{
|
{
|
||||||
bool err;
|
bool err;
|
||||||
|
|
||||||
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
||||||
assert(!err);
|
assert(!err);
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
size_t size = extent_node_size_get(node);
|
size_t size = extent_size_get(extent);
|
||||||
size_t nsub = (size == 0) ? 1 : size / chunksize;
|
size_t nsub = (size == 0) ? 1 : size / chunksize;
|
||||||
assert(atomic_read_z(&curchunks) >= nsub);
|
assert(atomic_read_z(&curchunks) >= nsub);
|
||||||
atomic_sub_z(&curchunks, nsub);
|
atomic_sub_z(&curchunks, nsub);
|
||||||
@ -186,15 +186,15 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
|
|||||||
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
|
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
|
||||||
* fits.
|
* fits.
|
||||||
*/
|
*/
|
||||||
static extent_node_t *
|
static extent_t *
|
||||||
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
||||||
extent_tree_t *chunks_ad, size_t size)
|
extent_tree_t *chunks_ad, size_t size)
|
||||||
{
|
{
|
||||||
extent_node_t key;
|
extent_t key;
|
||||||
|
|
||||||
assert(size == CHUNK_CEILING(size));
|
assert(size == CHUNK_CEILING(size));
|
||||||
|
|
||||||
extent_node_init(&key, arena, NULL, size, false, false);
|
extent_init(&key, arena, NULL, size, false, false);
|
||||||
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,20 +202,20 @@ static void *
|
|||||||
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
||||||
bool dalloc_node)
|
bool dalloc_extent)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
size_t alloc_size, leadsize, trailsize;
|
size_t alloc_size, leadsize, trailsize;
|
||||||
bool zeroed, committed;
|
bool zeroed, committed;
|
||||||
|
|
||||||
assert(new_addr == NULL || alignment == chunksize);
|
assert(new_addr == NULL || alignment == chunksize);
|
||||||
/*
|
/*
|
||||||
* Cached chunks use the node linkage embedded in their headers, in
|
* Cached chunks use the extent linkage embedded in their headers, in
|
||||||
* which case dalloc_node is true, and new_addr is non-NULL because
|
* which case dalloc_extent is true, and new_addr is non-NULL because
|
||||||
* we're operating on a specific chunk.
|
* we're operating on a specific chunk.
|
||||||
*/
|
*/
|
||||||
assert(dalloc_node || new_addr != NULL);
|
assert(dalloc_extent || new_addr != NULL);
|
||||||
|
|
||||||
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
@ -224,56 +224,55 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||||
if (new_addr != NULL) {
|
if (new_addr != NULL) {
|
||||||
extent_node_t key;
|
extent_t key;
|
||||||
extent_node_init(&key, arena, new_addr, alloc_size, false,
|
extent_init(&key, arena, new_addr, alloc_size, false, false);
|
||||||
false);
|
extent = extent_tree_ad_search(chunks_ad, &key);
|
||||||
node = extent_tree_ad_search(chunks_ad, &key);
|
|
||||||
} else {
|
} else {
|
||||||
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
extent = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
||||||
alloc_size);
|
alloc_size);
|
||||||
}
|
}
|
||||||
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
|
||||||
size)) {
|
size)) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
|
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
|
||||||
alignment) - (uintptr_t)extent_node_addr_get(node);
|
alignment) - (uintptr_t)extent_addr_get(extent);
|
||||||
assert(new_addr == NULL || leadsize == 0);
|
assert(new_addr == NULL || leadsize == 0);
|
||||||
assert(extent_node_size_get(node) >= leadsize + size);
|
assert(extent_size_get(extent) >= leadsize + size);
|
||||||
trailsize = extent_node_size_get(node) - leadsize - size;
|
trailsize = extent_size_get(extent) - leadsize - size;
|
||||||
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
ret = (void *)((uintptr_t)extent_addr_get(extent) + leadsize);
|
||||||
zeroed = extent_node_zeroed_get(node);
|
zeroed = extent_zeroed_get(extent);
|
||||||
if (zeroed)
|
if (zeroed)
|
||||||
*zero = true;
|
*zero = true;
|
||||||
committed = extent_node_committed_get(node);
|
committed = extent_committed_get(extent);
|
||||||
if (committed)
|
if (committed)
|
||||||
*commit = true;
|
*commit = true;
|
||||||
/* Split the lead. */
|
/* Split the lead. */
|
||||||
if (leadsize != 0 &&
|
if (leadsize != 0 &&
|
||||||
chunk_hooks->split(extent_node_addr_get(node),
|
chunk_hooks->split(extent_addr_get(extent),
|
||||||
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
|
extent_size_get(extent), leadsize, size, false, arena->ind)) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
/* Remove node from the tree. */
|
/* Remove extent from the tree. */
|
||||||
extent_tree_szad_remove(chunks_szad, node);
|
extent_tree_szad_remove(chunks_szad, extent);
|
||||||
extent_tree_ad_remove(chunks_ad, node);
|
extent_tree_ad_remove(chunks_ad, extent);
|
||||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
arena_chunk_cache_maybe_remove(arena, extent, cache);
|
||||||
if (leadsize != 0) {
|
if (leadsize != 0) {
|
||||||
/* Insert the leading space as a smaller chunk. */
|
/* Insert the leading space as a smaller chunk. */
|
||||||
extent_node_size_set(node, leadsize);
|
extent_size_set(extent, leadsize);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, extent);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, extent);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
||||||
node = NULL;
|
extent = NULL;
|
||||||
}
|
}
|
||||||
if (trailsize != 0) {
|
if (trailsize != 0) {
|
||||||
/* Split the trail. */
|
/* Split the trail. */
|
||||||
if (chunk_hooks->split(ret, size + trailsize, size,
|
if (chunk_hooks->split(ret, size + trailsize, size,
|
||||||
trailsize, false, arena->ind)) {
|
trailsize, false, arena->ind)) {
|
||||||
if (dalloc_node && node != NULL)
|
if (dalloc_extent && extent != NULL)
|
||||||
arena_node_dalloc(tsdn, arena, node);
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
|
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
|
||||||
chunks_ad, cache, ret, size + trailsize, zeroed,
|
chunks_ad, cache, ret, size + trailsize, zeroed,
|
||||||
@ -281,9 +280,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
/* Insert the trailing space as a smaller chunk. */
|
/* Insert the trailing space as a smaller chunk. */
|
||||||
if (node == NULL) {
|
if (extent == NULL) {
|
||||||
node = arena_node_alloc(tsdn, arena);
|
extent = arena_extent_alloc(tsdn, arena);
|
||||||
if (node == NULL) {
|
if (extent == NULL) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_record(tsdn, arena, chunk_hooks,
|
chunk_record(tsdn, arena, chunk_hooks,
|
||||||
chunks_szad, chunks_ad, cache, ret, size +
|
chunks_szad, chunks_ad, cache, ret, size +
|
||||||
@ -291,12 +290,12 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
|
extent_init(extent, arena, (void *)((uintptr_t)(ret) + size),
|
||||||
trailsize, zeroed, committed);
|
trailsize, zeroed, committed);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, extent);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, extent);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
||||||
node = NULL;
|
extent = NULL;
|
||||||
}
|
}
|
||||||
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
@ -306,9 +305,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
}
|
}
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
|
|
||||||
assert(dalloc_node || node != NULL);
|
assert(dalloc_extent || extent != NULL);
|
||||||
if (dalloc_node && node != NULL)
|
if (dalloc_extent && extent != NULL)
|
||||||
arena_node_dalloc(tsdn, arena, node);
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
if (!zeroed)
|
if (!zeroed)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
@ -381,7 +380,8 @@ chunk_alloc_base(size_t size)
|
|||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||||
|
bool dalloc_extent)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
bool commit;
|
bool commit;
|
||||||
@ -394,7 +394,7 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
commit = true;
|
commit = true;
|
||||||
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
||||||
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
|
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
|
||||||
new_addr, size, alignment, zero, &commit, dalloc_node);
|
new_addr, size, alignment, zero, &commit, dalloc_extent);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
assert(commit);
|
assert(commit);
|
||||||
@ -480,40 +480,39 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
void *chunk, size_t size, bool zeroed, bool committed)
|
void *chunk, size_t size, bool zeroed, bool committed)
|
||||||
{
|
{
|
||||||
bool unzeroed;
|
bool unzeroed;
|
||||||
extent_node_t *node, *prev;
|
extent_t *extent, *prev;
|
||||||
extent_node_t key;
|
extent_t key;
|
||||||
|
|
||||||
assert(!cache || !zeroed);
|
assert(!cache || !zeroed);
|
||||||
unzeroed = cache || !zeroed;
|
unzeroed = cache || !zeroed;
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||||
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
extent_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, false,
|
||||||
false, false);
|
false);
|
||||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
extent = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||||
/* Try to coalesce forward. */
|
/* Try to coalesce forward. */
|
||||||
if (node != NULL && extent_node_addr_get(node) ==
|
if (extent != NULL && extent_addr_get(extent) == extent_addr_get(&key)
|
||||||
extent_node_addr_get(&key) && extent_node_committed_get(node) ==
|
&& extent_committed_get(extent) == committed &&
|
||||||
committed && !chunk_hooks->merge(chunk, size,
|
!chunk_hooks->merge(chunk, size, extent_addr_get(extent),
|
||||||
extent_node_addr_get(node), extent_node_size_get(node), false,
|
extent_size_get(extent), false, arena->ind)) {
|
||||||
arena->ind)) {
|
|
||||||
/*
|
/*
|
||||||
* Coalesce chunk with the following address range. This does
|
* Coalesce chunk with the following address range. This does
|
||||||
* not change the position within chunks_ad, so only
|
* not change the position within chunks_ad, so only
|
||||||
* remove/insert from/into chunks_szad.
|
* remove/insert from/into chunks_szad.
|
||||||
*/
|
*/
|
||||||
extent_tree_szad_remove(chunks_szad, node);
|
extent_tree_szad_remove(chunks_szad, extent);
|
||||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
arena_chunk_cache_maybe_remove(arena, extent, cache);
|
||||||
extent_node_addr_set(node, chunk);
|
extent_addr_set(extent, chunk);
|
||||||
extent_node_size_set(node, size + extent_node_size_get(node));
|
extent_size_set(extent, size + extent_size_get(extent));
|
||||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
extent_zeroed_set(extent, extent_zeroed_get(extent) &&
|
||||||
!unzeroed);
|
!unzeroed);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, extent);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
||||||
} else {
|
} else {
|
||||||
/* Coalescing forward failed, so insert a new node. */
|
/* Coalescing forward failed, so insert a new extent. */
|
||||||
node = arena_node_alloc(tsdn, arena);
|
extent = arena_extent_alloc(tsdn, arena);
|
||||||
if (node == NULL) {
|
if (extent == NULL) {
|
||||||
/*
|
/*
|
||||||
* Node allocation failed, which is an exceedingly
|
* Node allocation failed, which is an exceedingly
|
||||||
* unlikely failure. Leak chunk after making sure its
|
* unlikely failure. Leak chunk after making sure its
|
||||||
@ -526,39 +525,38 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
}
|
}
|
||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
extent_node_init(node, arena, chunk, size, !unzeroed,
|
extent_init(extent, arena, chunk, size, !unzeroed,
|
||||||
committed);
|
committed);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, extent);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, extent);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Try to coalesce backward. */
|
/* Try to coalesce backward. */
|
||||||
prev = extent_tree_ad_prev(chunks_ad, node);
|
prev = extent_tree_ad_prev(chunks_ad, extent);
|
||||||
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
|
if (prev != NULL && (void *)((uintptr_t)extent_addr_get(prev) +
|
||||||
extent_node_size_get(prev)) == chunk &&
|
extent_size_get(prev)) == chunk && extent_committed_get(prev) ==
|
||||||
extent_node_committed_get(prev) == committed &&
|
committed && !chunk_hooks->merge(extent_addr_get(prev),
|
||||||
!chunk_hooks->merge(extent_node_addr_get(prev),
|
extent_size_get(prev), chunk, size, false, arena->ind)) {
|
||||||
extent_node_size_get(prev), chunk, size, false, arena->ind)) {
|
|
||||||
/*
|
/*
|
||||||
* Coalesce chunk with the previous address range. This does
|
* Coalesce chunk with the previous address range. This does
|
||||||
* not change the position within chunks_ad, so only
|
* not change the position within chunks_ad, so only
|
||||||
* remove/insert node from/into chunks_szad.
|
* remove/insert extent from/into chunks_szad.
|
||||||
*/
|
*/
|
||||||
extent_tree_szad_remove(chunks_szad, prev);
|
extent_tree_szad_remove(chunks_szad, prev);
|
||||||
extent_tree_ad_remove(chunks_ad, prev);
|
extent_tree_ad_remove(chunks_ad, prev);
|
||||||
arena_chunk_cache_maybe_remove(arena, prev, cache);
|
arena_chunk_cache_maybe_remove(arena, prev, cache);
|
||||||
extent_tree_szad_remove(chunks_szad, node);
|
extent_tree_szad_remove(chunks_szad, extent);
|
||||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
arena_chunk_cache_maybe_remove(arena, extent, cache);
|
||||||
extent_node_addr_set(node, extent_node_addr_get(prev));
|
extent_addr_set(extent, extent_addr_get(prev));
|
||||||
extent_node_size_set(node, extent_node_size_get(prev) +
|
extent_size_set(extent, extent_size_get(prev) +
|
||||||
extent_node_size_get(node));
|
extent_size_get(extent));
|
||||||
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
|
extent_zeroed_set(extent, extent_zeroed_get(prev) &&
|
||||||
extent_node_zeroed_get(node));
|
extent_zeroed_get(extent));
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, extent);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
||||||
|
|
||||||
arena_node_dalloc(tsdn, arena, prev);
|
arena_extent_dalloc(tsdn, arena, prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
label_return:
|
label_return:
|
||||||
|
20
src/extent.c
20
src/extent.c
@ -15,11 +15,11 @@ extent_quantize(size_t size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C int
|
JEMALLOC_INLINE_C int
|
||||||
extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
|
extent_szad_comp(const extent_t *a, const extent_t *b)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
size_t a_qsize = extent_quantize(extent_node_size_get(a));
|
size_t a_qsize = extent_quantize(extent_size_get(a));
|
||||||
size_t b_qsize = extent_quantize(extent_node_size_get(b));
|
size_t b_qsize = extent_quantize(extent_size_get(b));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compare based on quantized size rather than size, in order to sort
|
* Compare based on quantized size rather than size, in order to sort
|
||||||
@ -27,8 +27,8 @@ extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
|
|||||||
*/
|
*/
|
||||||
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
|
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
||||||
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
||||||
|
|
||||||
ret = (a_addr > b_addr) - (a_addr < b_addr);
|
ret = (a_addr > b_addr) - (a_addr < b_addr);
|
||||||
}
|
}
|
||||||
@ -37,17 +37,17 @@ extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Generate red-black tree functions. */
|
/* Generate red-black tree functions. */
|
||||||
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
|
rb_gen(, extent_tree_szad_, extent_tree_t, extent_t, szad_link,
|
||||||
extent_szad_comp)
|
extent_szad_comp)
|
||||||
|
|
||||||
JEMALLOC_INLINE_C int
|
JEMALLOC_INLINE_C int
|
||||||
extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
|
extent_ad_comp(const extent_t *a, const extent_t *b)
|
||||||
{
|
{
|
||||||
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
||||||
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
||||||
|
|
||||||
return ((a_addr > b_addr) - (a_addr < b_addr));
|
return ((a_addr > b_addr) - (a_addr < b_addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Generate red-black tree functions. */
|
/* Generate red-black tree functions. */
|
||||||
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
|
rb_gen(, extent_tree_ad_, extent_tree_t, extent_t, ad_link, extent_ad_comp)
|
||||||
|
147
src/huge.c
147
src/huge.c
@ -3,40 +3,40 @@
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
static extent_node_t *
|
static extent_t *
|
||||||
huge_node_get(const void *ptr)
|
huge_extent_get(const void *ptr)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
|
|
||||||
node = chunk_lookup(ptr, true);
|
extent = chunk_lookup(ptr, true);
|
||||||
assert(!extent_node_achunk_get(node));
|
assert(!extent_achunk_get(extent));
|
||||||
|
|
||||||
return (node);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
|
huge_extent_set(tsdn_t *tsdn, const void *ptr, extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(extent_node_addr_get(node) == ptr);
|
assert(extent_addr_get(extent) == ptr);
|
||||||
assert(!extent_node_achunk_get(node));
|
assert(!extent_achunk_get(extent));
|
||||||
return (chunk_register(tsdn, ptr, node));
|
return (chunk_register(tsdn, ptr, extent));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
|
huge_extent_reset(tsdn_t *tsdn, const void *ptr, extent_t *extent)
|
||||||
{
|
{
|
||||||
bool err;
|
bool err;
|
||||||
|
|
||||||
err = huge_node_set(tsdn, ptr, node);
|
err = huge_extent_set(tsdn, ptr, extent);
|
||||||
assert(!err);
|
assert(!err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
huge_node_unset(const void *ptr, const extent_node_t *node)
|
huge_extent_unset(const void *ptr, const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
chunk_deregister(ptr, node);
|
chunk_deregister(ptr, extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
@ -54,7 +54,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t ausize;
|
size_t ausize;
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
bool is_zeroed;
|
bool is_zeroed;
|
||||||
|
|
||||||
/* Allocate one or more contiguous chunks for this request. */
|
/* Allocate one or more contiguous chunks for this request. */
|
||||||
@ -66,10 +66,10 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
assert(ausize >= chunksize);
|
assert(ausize >= chunksize);
|
||||||
|
|
||||||
/* Allocate an extent node with which to track the chunk. */
|
/* Allocate an extent with which to track the chunk. */
|
||||||
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
|
extent = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_t)),
|
||||||
CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena));
|
CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena));
|
||||||
if (node == NULL)
|
if (extent == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -81,22 +81,22 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
||||||
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
|
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
|
||||||
arena, usize, alignment, &is_zeroed)) == NULL) {
|
arena, usize, alignment, &is_zeroed)) == NULL) {
|
||||||
idalloctm(tsdn, node, NULL, true, true);
|
idalloctm(tsdn, extent, NULL, true, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_node_init(node, arena, ret, usize, is_zeroed, true);
|
extent_init(extent, arena, ret, usize, is_zeroed, true);
|
||||||
|
|
||||||
if (huge_node_set(tsdn, ret, node)) {
|
if (huge_extent_set(tsdn, ret, extent)) {
|
||||||
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
|
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
|
||||||
idalloctm(tsdn, node, NULL, true, true);
|
idalloctm(tsdn, extent, NULL, true, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Insert node into huge. */
|
/* Insert extent into huge. */
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
ql_elm_new(node, ql_link);
|
ql_elm_new(extent, ql_link);
|
||||||
ql_tail_insert(&arena->huge, node, ql_link);
|
ql_tail_insert(&arena->huge, extent, ql_link);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
@ -137,7 +137,7 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
size_t usize_min, size_t usize_max, bool zero)
|
size_t usize_min, size_t usize_max, bool zero)
|
||||||
{
|
{
|
||||||
size_t usize, usize_next;
|
size_t usize, usize_next;
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
bool pre_zeroed, post_zeroed;
|
bool pre_zeroed, post_zeroed;
|
||||||
@ -150,9 +150,9 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
if (oldsize == usize)
|
if (oldsize == usize)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
extent = huge_extent_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_arena_get(extent);
|
||||||
pre_zeroed = extent_node_zeroed_get(node);
|
pre_zeroed = extent_zeroed_get(extent);
|
||||||
|
|
||||||
/* Fill if necessary (shrinking). */
|
/* Fill if necessary (shrinking). */
|
||||||
if (oldsize > usize) {
|
if (oldsize > usize) {
|
||||||
@ -171,12 +171,12 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
huge_node_unset(ptr, node);
|
assert(extent_size_get(extent) != usize);
|
||||||
assert(extent_node_size_get(node) != usize);
|
huge_extent_unset(ptr, extent);
|
||||||
extent_node_size_set(node, usize);
|
extent_size_set(extent, usize);
|
||||||
huge_node_reset(tsdn, ptr, node);
|
huge_extent_reset(tsdn, ptr, extent);
|
||||||
/* Update zeroed. */
|
/* Update zeroed. */
|
||||||
extent_node_zeroed_set(node, post_zeroed);
|
extent_zeroed_set(extent, post_zeroed);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
|
||||||
@ -199,15 +199,15 @@ static bool
|
|||||||
huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
||||||
size_t usize)
|
size_t usize)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
chunk_hooks_t chunk_hooks;
|
chunk_hooks_t chunk_hooks;
|
||||||
size_t cdiff;
|
size_t cdiff;
|
||||||
bool pre_zeroed, post_zeroed;
|
bool pre_zeroed, post_zeroed;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
extent = huge_extent_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_arena_get(extent);
|
||||||
pre_zeroed = extent_node_zeroed_get(node);
|
pre_zeroed = extent_zeroed_get(extent);
|
||||||
chunk_hooks = chunk_hooks_get(tsdn, arena);
|
chunk_hooks = chunk_hooks_get(tsdn, arena);
|
||||||
|
|
||||||
assert(oldsize > usize);
|
assert(oldsize > usize);
|
||||||
@ -235,11 +235,11 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
huge_node_unset(ptr, node);
|
huge_extent_unset(ptr, extent);
|
||||||
extent_node_size_set(node, usize);
|
extent_size_set(extent, usize);
|
||||||
huge_node_reset(tsdn, ptr, node);
|
huge_extent_reset(tsdn, ptr, extent);
|
||||||
/* Update zeroed. */
|
/* Update zeroed. */
|
||||||
extent_node_zeroed_set(node, post_zeroed);
|
extent_zeroed_set(extent, post_zeroed);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
/* Zap the excess chunks. */
|
/* Zap the excess chunks. */
|
||||||
@ -250,15 +250,16 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
||||||
size_t usize, bool zero) {
|
size_t usize, bool zero)
|
||||||
extent_node_t *node;
|
{
|
||||||
|
extent_t *extent;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
bool is_zeroed_subchunk, is_zeroed_chunk;
|
bool is_zeroed_subchunk, is_zeroed_chunk;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
extent = huge_extent_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_arena_get(extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
is_zeroed_subchunk = extent_node_zeroed_get(node);
|
is_zeroed_subchunk = extent_zeroed_get(extent);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -273,9 +274,9 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
huge_node_unset(ptr, node);
|
huge_extent_unset(ptr, extent);
|
||||||
extent_node_size_set(node, usize);
|
extent_size_set(extent, usize);
|
||||||
huge_node_reset(tsdn, ptr, node);
|
huge_extent_reset(tsdn, ptr, extent);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
@ -390,21 +391,21 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
|||||||
void
|
void
|
||||||
huge_dalloc(tsdn_t *tsdn, void *ptr)
|
huge_dalloc(tsdn_t *tsdn, void *ptr)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
extent = huge_extent_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_arena_get(extent);
|
||||||
huge_node_unset(ptr, node);
|
huge_extent_unset(ptr, extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
ql_remove(&arena->huge, node, ql_link);
|
ql_remove(&arena->huge, extent, ql_link);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
huge_dalloc_junk(tsdn, extent_node_addr_get(node),
|
huge_dalloc_junk(tsdn, extent_addr_get(extent),
|
||||||
extent_node_size_get(node));
|
extent_size_get(extent));
|
||||||
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
|
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent),
|
||||||
extent_node_addr_get(node), extent_node_size_get(node));
|
extent_addr_get(extent), extent_size_get(extent));
|
||||||
idalloctm(tsdn, node, NULL, true, true);
|
idalloctm(tsdn, extent, NULL, true, true);
|
||||||
|
|
||||||
arena_decay_tick(tsdn, arena);
|
arena_decay_tick(tsdn, arena);
|
||||||
}
|
}
|
||||||
@ -413,20 +414,20 @@ arena_t *
|
|||||||
huge_aalloc(const void *ptr)
|
huge_aalloc(const void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (extent_node_arena_get(huge_node_get(ptr)));
|
return (extent_arena_get(huge_extent_get(ptr)));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
huge_salloc(tsdn_t *tsdn, const void *ptr)
|
huge_salloc(tsdn_t *tsdn, const void *ptr)
|
||||||
{
|
{
|
||||||
size_t size;
|
size_t size;
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
extent = huge_extent_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_arena_get(extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
size = extent_node_size_get(node);
|
size = extent_size_get(extent);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
return (size);
|
return (size);
|
||||||
@ -436,13 +437,13 @@ prof_tctx_t *
|
|||||||
huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
|
huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
|
||||||
{
|
{
|
||||||
prof_tctx_t *tctx;
|
prof_tctx_t *tctx;
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
extent = huge_extent_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_arena_get(extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
tctx = extent_node_prof_tctx_get(node);
|
tctx = extent_prof_tctx_get(extent);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
return (tctx);
|
return (tctx);
|
||||||
@ -451,13 +452,13 @@ huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
|
|||||||
void
|
void
|
||||||
huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
|
huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_t *extent;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
extent = huge_extent_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_arena_get(extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
extent_node_prof_tctx_set(node, tctx);
|
extent_prof_tctx_set(extent, tctx);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
/* Lock the arena bin associated with the first object. */
|
/* Lock the arena bin associated with the first object. */
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||||
*(tbin->avail - 1));
|
*(tbin->avail - 1));
|
||||||
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
|
arena_t *bin_arena = extent_arena_get(&chunk->extent);
|
||||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||||
|
|
||||||
if (config_prof && bin_arena == arena) {
|
if (config_prof && bin_arena == arena) {
|
||||||
@ -126,7 +126,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
ptr = *(tbin->avail - 1 - i);
|
ptr = *(tbin->avail - 1 - i);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (extent_node_arena_get(&chunk->node) == bin_arena) {
|
if (extent_arena_get(&chunk->extent) == bin_arena) {
|
||||||
size_t pageind = ((uintptr_t)ptr -
|
size_t pageind = ((uintptr_t)ptr -
|
||||||
(uintptr_t)chunk) >> LG_PAGE;
|
(uintptr_t)chunk) >> LG_PAGE;
|
||||||
arena_chunk_map_bits_t *bitselm =
|
arena_chunk_map_bits_t *bitselm =
|
||||||
@ -185,7 +185,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
/* Lock the arena associated with the first object. */
|
/* Lock the arena associated with the first object. */
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||||
*(tbin->avail - 1));
|
*(tbin->avail - 1));
|
||||||
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
|
arena_t *locked_arena = extent_arena_get(&chunk->extent);
|
||||||
UNUSED bool idump;
|
UNUSED bool idump;
|
||||||
|
|
||||||
if (config_prof)
|
if (config_prof)
|
||||||
@ -211,8 +211,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
ptr = *(tbin->avail - 1 - i);
|
ptr = *(tbin->avail - 1 - i);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (extent_node_arena_get(&chunk->node) ==
|
if (extent_arena_get(&chunk->extent) == locked_arena) {
|
||||||
locked_arena) {
|
|
||||||
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
|
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
|
||||||
locked_arena, chunk, ptr);
|
locked_arena, chunk, ptr);
|
||||||
} else {
|
} else {
|
||||||
|
@ -32,21 +32,22 @@ TEST_END
|
|||||||
TEST_BEGIN(test_rtree_extrema)
|
TEST_BEGIN(test_rtree_extrema)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
extent_node_t node_a, node_b;
|
extent_t extent_a, extent_b;
|
||||||
|
|
||||||
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
|
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
|
||||||
rtree_t rtree;
|
rtree_t rtree;
|
||||||
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
|
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
|
||||||
"Unexpected rtree_new() failure");
|
"Unexpected rtree_new() failure");
|
||||||
|
|
||||||
assert_false(rtree_set(&rtree, 0, &node_a),
|
assert_false(rtree_set(&rtree, 0, &extent_a),
|
||||||
"Unexpected rtree_set() failure");
|
"Unexpected rtree_set() failure");
|
||||||
assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a,
|
assert_ptr_eq(rtree_get(&rtree, 0, true), &extent_a,
|
||||||
"rtree_get() should return previously set value");
|
"rtree_get() should return previously set value");
|
||||||
|
|
||||||
assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b),
|
assert_false(rtree_set(&rtree, ~((uintptr_t)0), &extent_b),
|
||||||
"Unexpected rtree_set() failure");
|
"Unexpected rtree_set() failure");
|
||||||
assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b,
|
assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true),
|
||||||
|
&extent_b,
|
||||||
"rtree_get() should return previously set value");
|
"rtree_get() should return previously set value");
|
||||||
|
|
||||||
rtree_delete(&rtree);
|
rtree_delete(&rtree);
|
||||||
@ -61,18 +62,18 @@ TEST_BEGIN(test_rtree_bits)
|
|||||||
for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
|
for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
|
||||||
uintptr_t keys[] = {0, 1,
|
uintptr_t keys[] = {0, 1,
|
||||||
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
|
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
|
||||||
extent_node_t node;
|
extent_t extent;
|
||||||
rtree_t rtree;
|
rtree_t rtree;
|
||||||
|
|
||||||
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
|
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
|
||||||
"Unexpected rtree_new() failure");
|
"Unexpected rtree_new() failure");
|
||||||
|
|
||||||
for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
|
for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
|
||||||
assert_false(rtree_set(&rtree, keys[j], &node),
|
assert_false(rtree_set(&rtree, keys[j], &extent),
|
||||||
"Unexpected rtree_set() failure");
|
"Unexpected rtree_set() failure");
|
||||||
for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
|
for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
|
||||||
assert_ptr_eq(rtree_get(&rtree, keys[k], true),
|
assert_ptr_eq(rtree_get(&rtree, keys[k], true),
|
||||||
&node, "rtree_get() should return "
|
&extent, "rtree_get() should return "
|
||||||
"previously set value and ignore "
|
"previously set value and ignore "
|
||||||
"insignificant key bits; i=%u, j=%u, k=%u, "
|
"insignificant key bits; i=%u, j=%u, k=%u, "
|
||||||
"set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
|
"set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
|
||||||
@ -101,7 +102,7 @@ TEST_BEGIN(test_rtree_random)
|
|||||||
sfmt = init_gen_rand(SEED);
|
sfmt = init_gen_rand(SEED);
|
||||||
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
|
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
|
||||||
uintptr_t keys[NSET];
|
uintptr_t keys[NSET];
|
||||||
extent_node_t node;
|
extent_t extent;
|
||||||
unsigned j;
|
unsigned j;
|
||||||
rtree_t rtree;
|
rtree_t rtree;
|
||||||
|
|
||||||
@ -110,13 +111,13 @@ TEST_BEGIN(test_rtree_random)
|
|||||||
|
|
||||||
for (j = 0; j < NSET; j++) {
|
for (j = 0; j < NSET; j++) {
|
||||||
keys[j] = (uintptr_t)gen_rand64(sfmt);
|
keys[j] = (uintptr_t)gen_rand64(sfmt);
|
||||||
assert_false(rtree_set(&rtree, keys[j], &node),
|
assert_false(rtree_set(&rtree, keys[j], &extent),
|
||||||
"Unexpected rtree_set() failure");
|
"Unexpected rtree_set() failure");
|
||||||
assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node,
|
assert_ptr_eq(rtree_get(&rtree, keys[j], true), &extent,
|
||||||
"rtree_get() should return previously set value");
|
"rtree_get() should return previously set value");
|
||||||
}
|
}
|
||||||
for (j = 0; j < NSET; j++) {
|
for (j = 0; j < NSET; j++) {
|
||||||
assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node,
|
assert_ptr_eq(rtree_get(&rtree, keys[j], true), &extent,
|
||||||
"rtree_get() should return previously set value");
|
"rtree_get() should return previously set value");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user