Rename extent_node_t to extent_t.

This commit is contained in:
Jason Evans 2016-03-23 21:09:28 -07:00
parent 3aea827f5e
commit a7a6f5bc96
14 changed files with 490 additions and 485 deletions

View File

@ -177,11 +177,11 @@ typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
/* Arena chunk header. */
struct arena_chunk_s {
/*
* A pointer to the arena that owns the chunk is stored within the node.
* This field as a whole is used by chunks_rtree to support both
* ivsalloc() and core-based debugging.
* A pointer to the arena that owns the chunk is stored within the
* extent structure. This field as a whole is used by chunks_rtree to
* support both ivsalloc() and core-based debugging.
*/
extent_node_t node;
extent_t extent;
/*
* Map of pages within chunk that keeps track of free/large/small. The
@ -303,7 +303,7 @@ struct arena_s {
/* Extant arena chunks. */
ql_head(extent_node_t) achunks;
ql_head(extent_t) achunks;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
@ -345,25 +345,25 @@ struct arena_s {
* /-- arena ---\
* | |
* | |
* |------------| /- chunk -\
* ...->|chunks_cache|<--------------------------->| /----\ |<--...
* |------------| | |node| |
* | | | | | |
* | | /- run -\ /- run -\ | | | |
* | | | | | | | | | |
* | | | | | | | | | |
* |------------| |-------| |-------| | |----| |
* ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
* |------------| |-------| |-------| | |----| |
* | | | | | | | | | |
* | | | | | | | \----/ |
* | | \-------/ \-------/ | |
* | | | |
* | | | |
* \------------/ \---------/
* |------------| /-- chunk --\
* ...->|chunks_cache|<--------------------------->| /------\ |<--...
* |------------| | |extent| |
* | | | | | |
* | | /- run -\ /- run -\ | | | |
* | | | | | | | | | |
* | | | | | | | | | |
* |------------| |-------| |-------| | |------| |
* ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
* |------------| |-------| |-------| | |------| |
* | | | | | | | | | |
* | | | | | | | \------/ |
* | | \-------/ \-------/ | |
* | | | |
* | | | |
* \------------/ \-----------/
*/
arena_runs_dirty_link_t runs_dirty;
extent_node_t chunks_cache;
extent_t chunks_cache;
/*
* Approximate time in seconds from the creation of a set of unused
@ -413,16 +413,16 @@ struct arena_s {
size_t decay_backlog[SMOOTHSTEP_NSTEPS];
/* Extant huge allocations. */
ql_head(extent_node_t) huge;
ql_head(extent_t) huge;
/* Synchronizes all huge allocation/update/deallocation. */
malloc_mutex_t huge_mtx;
/*
* Trees of chunks that were previously allocated (trees differ only in
* node ordering). These are used when allocating chunks, in an attempt
* to re-use address space. Depending on function, different tree
* orderings are needed, which is why there are two trees with the same
* contents.
* extent ordering). These are used when allocating chunks, in an
* attempt to re-use address space. Depending on function, different
* tree orderings are needed, which is why there are two trees with the
* same contents.
*/
extent_tree_t chunks_szad_cached;
extent_tree_t chunks_ad_cached;
@ -430,9 +430,9 @@ struct arena_s {
extent_tree_t chunks_ad_retained;
malloc_mutex_t chunks_mtx;
/* Cache of nodes that were allocated via base_alloc(). */
ql_head(extent_node_t) node_cache;
malloc_mutex_t node_cache_mtx;
/* Cache of extent structures that were allocated via base_alloc(). */
ql_head(extent_t) extent_cache;
malloc_mutex_t extent_cache_mtx;
/* User-configurable chunk hook functions. */
chunk_hooks_t chunk_hooks;
@ -486,12 +486,12 @@ typedef size_t (run_quantize_t)(size_t);
extern run_quantize_t *run_quantize_floor;
extern run_quantize_t *run_quantize_ceil;
#endif
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent,
bool cache);
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
bool cache);
extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
extent_t *arena_extent_alloc(tsdn_t *tsdn, arena_t *arena);
void arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero);
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
@ -1066,7 +1066,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
assert(binind != BININD_INVALID);
assert(binind < NBINS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = extent_node_arena_get(&chunk->node);
arena = extent_arena_get(&chunk->extent);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
actual_mapbits = arena_mapbits_get(chunk, pageind);
assert(mapbits == actual_mapbits);
@ -1317,7 +1317,7 @@ arena_aalloc(const void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
return (extent_node_arena_get(&chunk->node));
return (extent_arena_get(&chunk->extent));
else
return (huge_aalloc(ptr));
}
@ -1395,7 +1395,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
binind, slow_path);
} else {
arena_dalloc_small(tsdn,
extent_node_arena_get(&chunk->node), chunk,
extent_arena_get(&chunk->extent), chunk,
ptr, pageind);
}
} else {
@ -1411,7 +1411,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
size - large_pad, slow_path);
} else {
arena_dalloc_large(tsdn,
extent_node_arena_get(&chunk->node), chunk,
extent_arena_get(&chunk->extent), chunk,
ptr);
}
}
@ -1455,7 +1455,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_dalloc_small(tsdn,
extent_node_arena_get(&chunk->node), chunk,
extent_arena_get(&chunk->extent), chunk,
ptr, pageind);
}
} else {
@ -1467,7 +1467,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
size, slow_path);
} else {
arena_dalloc_large(tsdn,
extent_node_arena_get(&chunk->node), chunk,
extent_arena_get(&chunk->extent), chunk,
ptr);
}
}

View File

@ -52,13 +52,12 @@ chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(tsdn_t *tsdn, const void *chunk,
const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node);
bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent);
void chunk_deregister(const void *chunk, const extent_t *extent);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool dalloc_node);
bool *zero, bool dalloc_extent);
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
@ -80,11 +79,11 @@ void chunk_postfork_child(tsdn_t *tsdn);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
extent_node_t *chunk_lookup(const void *chunk, bool dependent);
extent_t *chunk_lookup(const void *chunk, bool dependent);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
JEMALLOC_INLINE extent_node_t *
JEMALLOC_INLINE extent_t *
chunk_lookup(const void *ptr, bool dependent)
{

View File

@ -1,237 +1,236 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct extent_node_s extent_node_t;
typedef struct extent_s extent_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. Use accessor functions for en_* fields. */
struct extent_node_s {
/* Extent (span of pages). Use accessor functions for e_* fields. */
struct extent_s {
/* Arena from which this extent came, if any. */
arena_t *en_arena;
arena_t *e_arena;
/* Pointer to the extent that this tree node is responsible for. */
void *en_addr;
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
/* Total region size. */
size_t en_size;
size_t e_size;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
bool en_zeroed;
bool e_zeroed;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
bool en_committed;
bool e_committed;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
bool en_achunk;
bool e_achunk;
/* Profile counters, used for huge objects. */
prof_tctx_t *en_prof_tctx;
prof_tctx_t *e_prof_tctx;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_runs_dirty_link_t rd;
qr(extent_node_t) cc_link;
qr(extent_t) cc_link;
union {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) szad_link;
rb_node(extent_t) szad_link;
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
ql_elm(extent_t) ql_link;
};
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) ad_link;
rb_node(extent_t) ad_link;
};
typedef rb_tree(extent_node_t) extent_tree_t;
typedef rb_tree(extent_t) extent_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_szad_, extent_tree_t, extent_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_t)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
arena_t *extent_arena_get(const extent_t *extent);
void *extent_addr_get(const extent_t *extent);
size_t extent_size_get(const extent_t *extent);
bool extent_zeroed_get(const extent_t *extent);
bool extent_committed_get(const extent_t *extent);
bool extent_achunk_get(const extent_t *extent);
prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
void extent_arena_set(extent_t *extent, arena_t *arena);
void extent_addr_set(extent_t *extent, void *addr);
void extent_size_set(extent_t *extent, size_t size);
void extent_zeroed_set(extent_t *extent, bool zeroed);
void extent_committed_set(extent_t *extent, bool committed);
void extent_achunk_set(extent_t *extent, bool achunk);
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
void extent_init(extent_t *extent, arena_t *arena, void *addr,
size_t size, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
void extent_node_dirty_remove(extent_node_t *node);
void extent_dirty_linkage_init(extent_t *extent);
void extent_dirty_insert(extent_t *extent,
arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty);
void extent_dirty_remove(extent_t *extent);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE arena_t *
extent_node_arena_get(const extent_node_t *node)
extent_arena_get(const extent_t *extent)
{
return (node->en_arena);
return (extent->e_arena);
}
JEMALLOC_INLINE void *
extent_node_addr_get(const extent_node_t *node)
extent_addr_get(const extent_t *extent)
{
return (node->en_addr);
return (extent->e_addr);
}
JEMALLOC_INLINE size_t
extent_node_size_get(const extent_node_t *node)
extent_size_get(const extent_t *extent)
{
return (node->en_size);
return (extent->e_size);
}
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
extent_zeroed_get(const extent_t *extent)
{
return (node->en_zeroed);
return (extent->e_zeroed);
}
JEMALLOC_INLINE bool
extent_node_committed_get(const extent_node_t *node)
extent_committed_get(const extent_t *extent)
{
assert(!node->en_achunk);
return (node->en_committed);
assert(!extent->e_achunk);
return (extent->e_committed);
}
JEMALLOC_INLINE bool
extent_node_achunk_get(const extent_node_t *node)
extent_achunk_get(const extent_t *extent)
{
return (node->en_achunk);
return (extent->e_achunk);
}
JEMALLOC_INLINE prof_tctx_t *
extent_node_prof_tctx_get(const extent_node_t *node)
extent_prof_tctx_get(const extent_t *extent)
{
return (node->en_prof_tctx);
return (extent->e_prof_tctx);
}
JEMALLOC_INLINE void
extent_node_arena_set(extent_node_t *node, arena_t *arena)
extent_arena_set(extent_t *extent, arena_t *arena)
{
node->en_arena = arena;
extent->e_arena = arena;
}
JEMALLOC_INLINE void
extent_node_addr_set(extent_node_t *node, void *addr)
extent_addr_set(extent_t *extent, void *addr)
{
node->en_addr = addr;
extent->e_addr = addr;
}
JEMALLOC_INLINE void
extent_node_size_set(extent_node_t *node, size_t size)
extent_size_set(extent_t *extent, size_t size)
{
node->en_size = size;
extent->e_size = size;
}
JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
extent_zeroed_set(extent_t *extent, bool zeroed)
{
node->en_zeroed = zeroed;
extent->e_zeroed = zeroed;
}
JEMALLOC_INLINE void
extent_node_committed_set(extent_node_t *node, bool committed)
extent_committed_set(extent_t *extent, bool committed)
{
node->en_committed = committed;
extent->e_committed = committed;
}
JEMALLOC_INLINE void
extent_node_achunk_set(extent_node_t *node, bool achunk)
extent_achunk_set(extent_t *extent, bool achunk)
{
node->en_achunk = achunk;
extent->e_achunk = achunk;
}
JEMALLOC_INLINE void
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
{
node->en_prof_tctx = tctx;
extent->e_prof_tctx = tctx;
}
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);
extent_arena_set(extent, arena);
extent_addr_set(extent, addr);
extent_size_set(extent, size);
extent_zeroed_set(extent, zeroed);
extent_committed_set(extent, committed);
extent_achunk_set(extent, false);
if (config_prof)
extent_node_prof_tctx_set(node, NULL);
extent_prof_tctx_set(extent, NULL);
}
JEMALLOC_INLINE void
extent_node_dirty_linkage_init(extent_node_t *node)
extent_dirty_linkage_init(extent_t *extent)
{
qr_new(&node->rd, rd_link);
qr_new(node, cc_link);
qr_new(&extent->rd, rd_link);
qr_new(extent, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
extent_dirty_insert(extent_t *extent,
arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty)
{
qr_meld(runs_dirty, &node->rd, rd_link);
qr_meld(chunks_dirty, node, cc_link);
qr_meld(runs_dirty, &extent->rd, rd_link);
qr_meld(chunks_dirty, extent, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_remove(extent_node_t *node)
extent_dirty_remove(extent_t *extent)
{
qr_remove(&node->rd, rd_link);
qr_remove(node, cc_link);
qr_remove(&extent->rd, rd_link);
qr_remove(extent, cc_link);
}
#endif
#endif /* JEMALLOC_H_INLINES */

View File

@ -966,6 +966,7 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
#include "jemalloc/internal/hash.h"
#ifndef JEMALLOC_ENABLE_INLINE
extent_t *iealloc(const void *ptr);
arena_t *iaalloc(const void *ptr);
size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
@ -995,6 +996,13 @@ bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE extent_t *
iealloc(const void *ptr)
{
return (chunk_lookup(ptr, true));
}
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(const void *ptr)
{
@ -1086,15 +1094,15 @@ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
extent_node_t *node;
extent_t *extent;
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
node = chunk_lookup(ptr, false);
if (node == NULL)
extent = chunk_lookup(ptr, false);
if (extent == NULL)
return (0);
/* Only arena chunks should be looked up via interior pointers. */
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
assert(extent_addr_get(extent) == ptr ||
extent_achunk_get(extent));
return (isalloc(tsdn, ptr, demote));
}

View File

@ -35,6 +35,8 @@ arena_decay_time_get
arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
arena_extent_alloc
arena_extent_dalloc
arena_get
arena_ichoose
arena_init
@ -78,8 +80,6 @@ arena_miscelm_get_mutable
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_new
arena_node_alloc
arena_node_dalloc
arena_nthreads_dec
arena_nthreads_get
arena_nthreads_inc
@ -204,24 +204,22 @@ ctl_postfork_parent
ctl_prefork
decay_ticker_get
dss_prec_names
extent_node_achunk_get
extent_node_achunk_set
extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
extent_node_committed_get
extent_node_committed_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
extent_node_init
extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_achunk_get
extent_achunk_set
extent_addr_get
extent_addr_set
extent_arena_get
extent_arena_set
extent_committed_get
extent_committed_set
extent_dirty_insert
extent_dirty_linkage_init
extent_dirty_remove
extent_init
extent_prof_tctx_get
extent_prof_tctx_set
extent_size_get
extent_size_set
extent_tree_ad_destroy
extent_tree_ad_destroy_recurse
extent_tree_ad_empty
@ -260,6 +258,8 @@ extent_tree_szad_reverse_iter
extent_tree_szad_reverse_iter_recurse
extent_tree_szad_reverse_iter_start
extent_tree_szad_search
extent_zeroed_get
extent_zeroed_set
ffs_llu
ffs_lu
ffs_u
@ -294,6 +294,7 @@ iallocztm
iarena_cleanup
idalloc
idalloctm
iealloc
index2size
index2size_compute
index2size_lookup

View File

@ -39,7 +39,7 @@ struct rtree_node_elm_s {
union {
void *pun;
rtree_node_elm_t *child;
extent_node_t *val;
extent_t *val;
};
};
@ -116,17 +116,17 @@ rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
bool dependent);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
unsigned level, bool dependent);
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
extent_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
const extent_node_t *val);
const extent_t *val);
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
bool dependent);
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
bool dependent);
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
extent_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
@ -186,7 +186,7 @@ rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
return (child);
}
JEMALLOC_ALWAYS_INLINE extent_node_t *
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
@ -209,7 +209,7 @@ rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
}
JEMALLOC_INLINE void
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_t *val)
{
atomic_write_p(&elm->pun, val);
@ -240,7 +240,7 @@ rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
return (subtree);
}
JEMALLOC_ALWAYS_INLINE extent_node_t *
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
@ -332,7 +332,7 @@ rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
}
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
rtree_set(rtree_t *rtree, uintptr_t key, const extent_t *val)
{
uintptr_t subkey;
unsigned i, start_level;

View File

@ -24,7 +24,7 @@ typedef int witness_comp_t (const witness_t *, const witness_t *);
#define WITNESS_RANK_ARENA 8U
#define WITNESS_RANK_ARENA_CHUNKS 9U
#define WITNESS_RANK_ARENA_NODE_CACHE 10
#define WITNESS_RANK_ARENA_EXTENT_CACHE 10
#define WITNESS_RANK_BASE 11U

View File

@ -214,32 +214,32 @@ arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
}
static size_t
arena_chunk_dirty_npages(const extent_node_t *node)
arena_chunk_dirty_npages(const extent_t *extent)
{
return (extent_node_size_get(node) >> LG_PAGE);
return (extent_size_get(extent) >> LG_PAGE);
}
void
arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
{
if (cache) {
extent_node_dirty_linkage_init(node);
extent_node_dirty_insert(node, &arena->runs_dirty,
extent_dirty_linkage_init(extent);
extent_dirty_insert(extent, &arena->runs_dirty,
&arena->chunks_cache);
arena->ndirty += arena_chunk_dirty_npages(node);
arena->ndirty += arena_chunk_dirty_npages(extent);
}
}
void
arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent, bool dirty)
{
if (dirty) {
extent_node_dirty_remove(node);
assert(arena->ndirty >= arena_chunk_dirty_npages(node));
arena->ndirty -= arena_chunk_dirty_npages(node);
extent_dirty_remove(extent);
assert(arena->ndirty >= arena_chunk_dirty_npages(extent));
arena->ndirty -= arena_chunk_dirty_npages(extent);
}
}
@ -516,14 +516,14 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
{
/*
* The extent node notion of "committed" doesn't directly apply to
* arena chunks. Arbitrarily mark them as committed. The commit state
* of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state.
* The extent notion of "committed" doesn't directly apply to arena
* chunks. Arbitrarily mark them as committed. The commit state of
* runs is tracked individually, and upon chunk deallocation the entire
* chunk is in a consistent commit state.
*/
extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(tsdn, chunk, &chunk->node));
extent_init(&chunk->extent, arena, chunk, chunksize, zero, true);
extent_achunk_set(&chunk->extent, true);
return (chunk_register(tsdn, chunk, &chunk->extent));
}
static arena_chunk_t *
@ -648,8 +648,8 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
return (NULL);
}
ql_elm_new(&chunk->node, ql_link);
ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
ql_elm_new(&chunk->extent, ql_link);
ql_tail_insert(&arena->achunks, &chunk->extent, ql_link);
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk);
@ -661,7 +661,7 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
chunk_deregister(chunk, &chunk->extent);
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
@ -718,7 +718,7 @@ arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
/* Remove run from runs_avail, so that the arena does not use it. */
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
ql_remove(&arena->achunks, &chunk->node, ql_link);
ql_remove(&arena->achunks, &chunk->extent, ql_link);
spare = arena->spare;
arena->spare = chunk;
if (spare != NULL)
@ -805,30 +805,30 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
arena_huge_malloc_stats_update_undo(arena, usize);
}
extent_node_t *
arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
extent_t *
arena_extent_alloc(tsdn_t *tsdn, arena_t *arena)
{
extent_node_t *node;
extent_t *extent;
malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
node = ql_last(&arena->node_cache, ql_link);
if (node == NULL) {
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
return (base_alloc(tsdn, sizeof(extent_node_t)));
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
extent = ql_last(&arena->extent_cache, ql_link);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
return (base_alloc(tsdn, sizeof(extent_t)));
}
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
return (node);
ql_tail_remove(&arena->extent_cache, extent_t, ql_link);
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
return (extent);
}
void
arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
{
malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->node_cache, node, ql_link);
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
ql_elm_new(extent, ql_link);
ql_tail_insert(&arena->extent_cache, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
}
static void *
@ -1424,7 +1424,7 @@ arena_dirty_count(arena_t *arena)
{
size_t ndirty = 0;
arena_runs_dirty_link_t *rdelm;
extent_node_t *chunkselm;
extent_t *chunkselm;
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link);
@ -1432,7 +1432,7 @@ arena_dirty_count(arena_t *arena)
size_t npages;
if (rdelm == &chunkselm->rd) {
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
npages = extent_size_get(chunkselm) >> LG_PAGE;
chunkselm = qr_next(chunkselm, cc_link);
} else {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
@ -1456,10 +1456,10 @@ arena_dirty_count(arena_t *arena)
static size_t
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
extent_t *purge_chunks_sentinel)
{
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
extent_t *chunkselm;
size_t nstashed = 0;
/* Stash runs/chunks according to ndirty_limit. */
@ -1470,11 +1470,11 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
rdelm_next = qr_next(rdelm, rd_link);
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next;
extent_t *chunkselm_next;
bool zero;
UNUSED void *chunk;
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
npages = extent_size_get(chunkselm) >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
(nstashed + npages) < ndirty_limit)
break;
@ -1482,18 +1482,18 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunkselm_next = qr_next(chunkselm, cc_link);
/*
* Allocate. chunkselm remains valid due to the
* dalloc_node=false argument to chunk_alloc_cache().
* dalloc_extent=false argument to chunk_alloc_cache().
*/
zero = false;
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm), chunksize, &zero,
extent_addr_get(chunkselm),
extent_size_get(chunkselm), chunksize, &zero,
false);
assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
assert(chunk == extent_addr_get(chunkselm));
assert(zero == extent_zeroed_get(chunkselm));
extent_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel);
assert(npages == (extent_node_size_get(chunkselm) >>
assert(npages == (extent_size_get(chunkselm) >>
LG_PAGE));
chunkselm = chunkselm_next;
} else {
@ -1546,11 +1546,11 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
static size_t
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
extent_t *purge_chunks_sentinel)
{
size_t npurged, nmadvise;
arena_runs_dirty_link_t *rdelm;
extent_node_t *chunkselm;
extent_t *chunkselm;
if (config_stats)
nmadvise = 0;
@ -1571,7 +1571,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* decommitted, or purged, depending on chunk
* deallocation policy.
*/
size_t size = extent_node_size_get(chunkselm);
size_t size = extent_size_get(chunkselm);
npages = size >> LG_PAGE;
chunkselm = qr_next(chunkselm, cc_link);
} else {
@ -1639,10 +1639,10 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
static void
arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
extent_t *purge_chunks_sentinel)
{
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
extent_t *chunkselm;
/* Deallocate chunks/runs. */
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
@ -1650,14 +1650,13 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
rdelm_next = qr_next(rdelm, rd_link);
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next = qr_next(chunkselm,
cc_link);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(tsdn, arena, chunkselm);
extent_t *chunkselm_next = qr_next(chunkselm, cc_link);
void *addr = extent_addr_get(chunkselm);
size_t size = extent_size_get(chunkselm);
bool zeroed = extent_zeroed_get(chunkselm);
bool committed = extent_committed_get(chunkselm);
extent_dirty_remove(chunkselm);
arena_extent_dalloc(tsdn, arena, chunkselm);
chunkselm = chunkselm_next;
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
size, zeroed, committed);
@ -1692,7 +1691,7 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
size_t npurge, npurged;
arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel;
extent_t purge_chunks_sentinel;
arena->purging = true;
@ -1708,7 +1707,7 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
extent_dirty_linkage_init(&purge_chunks_sentinel);
npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
&purge_runs_sentinel, &purge_chunks_sentinel);
@ -1783,7 +1782,7 @@ void
arena_reset(tsd_t *tsd, arena_t *arena)
{
unsigned i;
extent_node_t *node;
extent_t *extent;
/*
* Locking in this function is unintuitive. The caller guarantees that
@ -1801,9 +1800,9 @@ arena_reset(tsd_t *tsd, arena_t *arena)
/* Remove large allocations from prof sample set. */
if (config_prof && opt_prof) {
ql_foreach(node, &arena->achunks, ql_link) {
ql_foreach(extent, &arena->achunks, ql_link) {
arena_achunk_prof_reset(tsd, arena,
extent_node_addr_get(node));
extent_addr_get(extent));
}
}
@ -1815,9 +1814,9 @@ arena_reset(tsd_t *tsd, arena_t *arena)
/* Huge allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
for (extent = ql_last(&arena->huge, ql_link); extent != NULL; extent =
ql_last(&arena->huge, ql_link)) {
void *ptr = extent_node_addr_get(node);
void *ptr = extent_addr_get(extent);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
@ -1854,18 +1853,18 @@ arena_reset(tsd_t *tsd, arena_t *arena)
* chains directly correspond.
*/
qr_new(&arena->runs_dirty, rd_link);
for (node = qr_next(&arena->chunks_cache, cc_link);
node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
qr_new(&node->rd, rd_link);
qr_meld(&arena->runs_dirty, &node->rd, rd_link);
for (extent = qr_next(&arena->chunks_cache, cc_link);
extent != &arena->chunks_cache; extent = qr_next(extent, cc_link)) {
qr_new(&extent->rd, rd_link);
qr_meld(&arena->runs_dirty, &extent->rd, rd_link);
}
/* Arena chunks. */
for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
ql_last(&arena->achunks, ql_link)) {
ql_remove(&arena->achunks, node, ql_link);
for (extent = ql_last(&arena->achunks, ql_link); extent != NULL; extent
= ql_last(&arena->achunks, ql_link)) {
ql_remove(&arena->achunks, extent, ql_link);
arena_chunk_discard(tsd_tsdn(tsd), arena,
extent_node_addr_get(node));
extent_addr_get(extent));
}
/* Spare. */
@ -2649,8 +2648,8 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
if (run == bin->runcur)
bin->runcur = NULL;
else {
szind_t binind = arena_bin_index(extent_node_arena_get(
&chunk->node), bin);
szind_t binind = arena_bin_index(extent_arena_get(
&chunk->extent), bin);
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
/*
@ -3018,7 +3017,7 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = extent_node_arena_get(&chunk->node);
arena = extent_arena_get(&chunk->extent);
if (oldsize < usize_max) {
bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
@ -3080,7 +3079,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
arena_decay_tick(tsdn, extent_arena_get(&chunk->extent));
return (false);
} else {
return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
@ -3404,9 +3403,9 @@ arena_new(tsdn_t *tsdn, unsigned ind)
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
WITNESS_RANK_ARENA_CHUNKS))
return (NULL);
ql_new(&arena->node_cache);
if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
WITNESS_RANK_ARENA_NODE_CACHE))
ql_new(&arena->extent_cache);
if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
WITNESS_RANK_ARENA_EXTENT_CACHE))
return (NULL);
arena->chunk_hooks = chunk_hooks_default;
@ -3492,7 +3491,7 @@ void
arena_prefork2(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
malloc_mutex_prefork(tsdn, &arena->extent_cache_mtx);
}
void
@ -3513,7 +3512,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->lock);
}
@ -3526,7 +3525,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx);
malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
malloc_mutex_postfork_child(tsdn, &arena->lock);
}

View File

@ -6,59 +6,59 @@
static malloc_mutex_t base_mtx;
static extent_tree_t base_avail_szad;
static extent_node_t *base_nodes;
static extent_t *base_extents;
static size_t base_allocated;
static size_t base_resident;
static size_t base_mapped;
/******************************************************************************/
static extent_node_t *
base_node_try_alloc(tsdn_t *tsdn)
static extent_t *
base_extent_try_alloc(tsdn_t *tsdn)
{
extent_node_t *node;
extent_t *extent;
malloc_mutex_assert_owner(tsdn, &base_mtx);
if (base_nodes == NULL)
if (base_extents == NULL)
return (NULL);
node = base_nodes;
base_nodes = *(extent_node_t **)node;
return (node);
extent = base_extents;
base_extents = *(extent_t **)extent;
return (extent);
}
static void
base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
base_extent_dalloc(tsdn_t *tsdn, extent_t *extent)
{
malloc_mutex_assert_owner(tsdn, &base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
*(extent_t **)extent = base_extents;
base_extents = extent;
}
static extent_node_t *
static extent_t *
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{
extent_node_t *node;
extent_t *extent;
size_t csize, nsize;
void *addr;
malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0);
node = base_node_try_alloc(tsdn);
/* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
extent = base_extent_try_alloc(tsdn);
/* Allocate enough space to also carve an extent out if necessary. */
nsize = (extent == NULL) ? CACHELINE_CEILING(sizeof(extent_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize);
addr = chunk_alloc_base(csize);
if (addr == NULL) {
if (node != NULL)
base_node_dalloc(tsdn, node);
if (extent != NULL)
base_extent_dalloc(tsdn, extent);
return (NULL);
}
base_mapped += csize;
if (node == NULL) {
node = (extent_node_t *)addr;
if (extent == NULL) {
extent = (extent_t *)addr;
addr = (void *)((uintptr_t)addr + nsize);
csize -= nsize;
if (config_stats) {
@ -66,8 +66,8 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
base_resident += PAGE_CEILING(nsize);
}
}
extent_node_init(node, NULL, addr, csize, true, true);
return (node);
extent_init(extent, NULL, addr, csize, true, true);
return (extent);
}
/*
@ -80,8 +80,8 @@ base_alloc(tsdn_t *tsdn, size_t size)
{
void *ret;
size_t csize, usize;
extent_node_t *node;
extent_node_t key;
extent_t *extent;
extent_t key;
/*
* Round size up to nearest multiple of the cacheline size, so that
@ -90,28 +90,28 @@ base_alloc(tsdn_t *tsdn, size_t size)
csize = CACHELINE_CEILING(size);
usize = s2u(csize);
extent_node_init(&key, NULL, NULL, usize, false, false);
extent_init(&key, NULL, NULL, usize, false, false);
malloc_mutex_lock(tsdn, &base_mtx);
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (node != NULL) {
extent = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (extent != NULL) {
/* Use existing space. */
extent_tree_szad_remove(&base_avail_szad, node);
extent_tree_szad_remove(&base_avail_szad, extent);
} else {
/* Try to allocate more space. */
node = base_chunk_alloc(tsdn, csize);
extent = base_chunk_alloc(tsdn, csize);
}
if (node == NULL) {
if (extent == NULL) {
ret = NULL;
goto label_return;
}
ret = extent_node_addr_get(node);
if (extent_node_size_get(node) > csize) {
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
extent_node_size_set(node, extent_node_size_get(node) - csize);
extent_tree_szad_insert(&base_avail_szad, node);
ret = extent_addr_get(extent);
if (extent_size_get(extent) > csize) {
extent_addr_set(extent, (void *)((uintptr_t)ret + csize));
extent_size_set(extent, extent_size_get(extent) - csize);
extent_tree_szad_insert(&base_avail_szad, extent);
} else
base_node_dalloc(tsdn, node);
base_extent_dalloc(tsdn, extent);
if (config_stats) {
base_allocated += csize;
/*
@ -147,7 +147,7 @@ base_boot(void)
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true);
extent_tree_szad_new(&base_avail_szad);
base_nodes = NULL;
base_extents = NULL;
return (false);
}

View File

@ -141,15 +141,15 @@ chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
}
bool
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
{
assert(extent_node_addr_get(node) == chunk);
assert(extent_addr_get(extent) == chunk);
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, extent))
return (true);
if (config_prof && opt_prof) {
size_t size = extent_node_size_get(node);
size_t size = extent_size_get(extent);
size_t nadd = (size == 0) ? 1 : size / chunksize;
size_t cur = atomic_add_z(&curchunks, nadd);
size_t high = atomic_read_z(&highchunks);
@ -168,14 +168,14 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
}
void
chunk_deregister(const void *chunk, const extent_node_t *node)
chunk_deregister(const void *chunk, const extent_t *extent)
{
bool err;
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
assert(!err);
if (config_prof && opt_prof) {
size_t size = extent_node_size_get(node);
size_t size = extent_size_get(extent);
size_t nsub = (size == 0) ? 1 : size / chunksize;
assert(atomic_read_z(&curchunks) >= nsub);
atomic_sub_z(&curchunks, nsub);
@ -186,15 +186,15 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
* fits.
*/
static extent_node_t *
static extent_t *
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, size_t size)
{
extent_node_t key;
extent_t key;
assert(size == CHUNK_CEILING(size));
extent_node_init(&key, arena, NULL, size, false, false);
extent_init(&key, arena, NULL, size, false, false);
return (extent_tree_szad_nsearch(chunks_szad, &key));
}
@ -202,20 +202,20 @@ static void *
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool dalloc_node)
bool dalloc_extent)
{
void *ret;
extent_node_t *node;
extent_t *extent;
size_t alloc_size, leadsize, trailsize;
bool zeroed, committed;
assert(new_addr == NULL || alignment == chunksize);
/*
* Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because
* Cached chunks use the extent linkage embedded in their headers, in
* which case dalloc_extent is true, and new_addr is non-NULL because
* we're operating on a specific chunk.
*/
assert(dalloc_node || new_addr != NULL);
assert(dalloc_extent || new_addr != NULL);
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
/* Beware size_t wrap-around. */
@ -224,56 +224,55 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, false,
false);
node = extent_tree_ad_search(chunks_ad, &key);
extent_t key;
extent_init(&key, arena, new_addr, alloc_size, false, false);
extent = extent_tree_ad_search(chunks_ad, &key);
} else {
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
extent = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
alloc_size);
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
size)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
alignment) - (uintptr_t)extent_node_addr_get(node);
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
assert(new_addr == NULL || leadsize == 0);
assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
zeroed = extent_node_zeroed_get(node);
assert(extent_size_get(extent) >= leadsize + size);
trailsize = extent_size_get(extent) - leadsize - size;
ret = (void *)((uintptr_t)extent_addr_get(extent) + leadsize);
zeroed = extent_zeroed_get(extent);
if (zeroed)
*zero = true;
committed = extent_node_committed_get(node);
committed = extent_committed_get(extent);
if (committed)
*commit = true;
/* Split the lead. */
if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
chunk_hooks->split(extent_addr_get(extent),
extent_size_get(extent), leadsize, size, false, arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
/* Remove extent from the tree. */
extent_tree_szad_remove(chunks_szad, extent);
extent_tree_ad_remove(chunks_ad, extent);
arena_chunk_cache_maybe_remove(arena, extent, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
extent_size_set(extent, leadsize);
extent_tree_szad_insert(chunks_szad, extent);
extent_tree_ad_insert(chunks_ad, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
extent = NULL;
}
if (trailsize != 0) {
/* Split the trail. */
if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL)
arena_node_dalloc(tsdn, arena, node);
if (dalloc_extent && extent != NULL)
arena_extent_dalloc(tsdn, arena, extent);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
chunks_ad, cache, ret, size + trailsize, zeroed,
@ -281,9 +280,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
if (extent == NULL) {
extent = arena_extent_alloc(tsdn, arena);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks,
chunks_szad, chunks_ad, cache, ret, size +
@ -291,12 +290,12 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (NULL);
}
}
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
extent_init(extent, arena, (void *)((uintptr_t)(ret) + size),
trailsize, zeroed, committed);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
extent_tree_szad_insert(chunks_szad, extent);
extent_tree_ad_insert(chunks_ad, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
extent = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@ -306,9 +305,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
arena_node_dalloc(tsdn, arena, node);
assert(dalloc_extent || extent != NULL);
if (dalloc_extent && extent != NULL)
arena_extent_dalloc(tsdn, arena, extent);
if (*zero) {
if (!zeroed)
memset(ret, 0, size);
@ -381,7 +380,8 @@ chunk_alloc_base(size_t size)
void *
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
void *new_addr, size_t size, size_t alignment, bool *zero,
bool dalloc_extent)
{
void *ret;
bool commit;
@ -394,7 +394,7 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
commit = true;
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
new_addr, size, alignment, zero, &commit, dalloc_node);
new_addr, size, alignment, zero, &commit, dalloc_extent);
if (ret == NULL)
return (NULL);
assert(commit);
@ -480,40 +480,39 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed)
{
bool unzeroed;
extent_node_t *node, *prev;
extent_node_t key;
extent_t *extent, *prev;
extent_t key;
assert(!cache || !zeroed);
unzeroed = cache || !zeroed;
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
extent_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, false,
false);
extent = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && extent_node_addr_get(node) ==
extent_node_addr_get(&key) && extent_node_committed_get(node) ==
committed && !chunk_hooks->merge(chunk, size,
extent_node_addr_get(node), extent_node_size_get(node), false,
arena->ind)) {
if (extent != NULL && extent_addr_get(extent) == extent_addr_get(&key)
&& extent_committed_get(extent) == committed &&
!chunk_hooks->merge(chunk, size, extent_addr_get(extent),
extent_size_get(extent), false, arena->ind)) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node));
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
extent_tree_szad_remove(chunks_szad, extent);
arena_chunk_cache_maybe_remove(arena, extent, cache);
extent_addr_set(extent, chunk);
extent_size_set(extent, size + extent_size_get(extent));
extent_zeroed_set(extent, extent_zeroed_get(extent) &&
!unzeroed);
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
extent_tree_szad_insert(chunks_szad, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
/* Coalescing forward failed, so insert a new extent. */
extent = arena_extent_alloc(tsdn, arena);
if (extent == NULL) {
/*
* Node allocation failed, which is an exceedingly
* unlikely failure. Leak chunk after making sure its
@ -526,39 +525,38 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
goto label_return;
}
extent_node_init(node, arena, chunk, size, !unzeroed,
extent_init(extent, arena, chunk, size, !unzeroed,
committed);
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
extent_tree_ad_insert(chunks_ad, extent);
extent_tree_szad_insert(chunks_szad, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
}
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
extent_node_size_get(prev)) == chunk &&
extent_node_committed_get(prev) == committed &&
!chunk_hooks->merge(extent_node_addr_get(prev),
extent_node_size_get(prev), chunk, size, false, arena->ind)) {
prev = extent_tree_ad_prev(chunks_ad, extent);
if (prev != NULL && (void *)((uintptr_t)extent_addr_get(prev) +
extent_size_get(prev)) == chunk && extent_committed_get(prev) ==
committed && !chunk_hooks->merge(extent_addr_get(prev),
extent_size_get(prev), chunk, size, false, arena->ind)) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
* remove/insert extent from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_cache_maybe_remove(arena, prev, cache);
extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node));
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
extent_tree_szad_remove(chunks_szad, extent);
arena_chunk_cache_maybe_remove(arena, extent, cache);
extent_addr_set(extent, extent_addr_get(prev));
extent_size_set(extent, extent_size_get(prev) +
extent_size_get(extent));
extent_zeroed_set(extent, extent_zeroed_get(prev) &&
extent_zeroed_get(extent));
extent_tree_szad_insert(chunks_szad, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
arena_node_dalloc(tsdn, arena, prev);
arena_extent_dalloc(tsdn, arena, prev);
}
label_return:

View File

@ -15,11 +15,11 @@ extent_quantize(size_t size)
}
JEMALLOC_INLINE_C int
extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
extent_szad_comp(const extent_t *a, const extent_t *b)
{
int ret;
size_t a_qsize = extent_quantize(extent_node_size_get(a));
size_t b_qsize = extent_quantize(extent_node_size_get(b));
size_t a_qsize = extent_quantize(extent_size_get(a));
size_t b_qsize = extent_quantize(extent_size_get(b));
/*
* Compare based on quantized size rather than size, in order to sort
@ -27,8 +27,8 @@ extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
@ -37,17 +37,17 @@ extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
rb_gen(, extent_tree_szad_, extent_tree_t, extent_t, szad_link,
extent_szad_comp)
JEMALLOC_INLINE_C int
extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
extent_ad_comp(const extent_t *a, const extent_t *b)
{
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
return ((a_addr > b_addr) - (a_addr < b_addr));
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
rb_gen(, extent_tree_ad_, extent_tree_t, extent_t, ad_link, extent_ad_comp)

View File

@ -3,40 +3,40 @@
/******************************************************************************/
static extent_node_t *
huge_node_get(const void *ptr)
static extent_t *
huge_extent_get(const void *ptr)
{
extent_node_t *node;
extent_t *extent;
node = chunk_lookup(ptr, true);
assert(!extent_node_achunk_get(node));
extent = chunk_lookup(ptr, true);
assert(!extent_achunk_get(extent));
return (node);
return (extent);
}
static bool
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
huge_extent_set(tsdn_t *tsdn, const void *ptr, extent_t *extent)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
return (chunk_register(tsdn, ptr, node));
assert(extent_addr_get(extent) == ptr);
assert(!extent_achunk_get(extent));
return (chunk_register(tsdn, ptr, extent));
}
static void
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
huge_extent_reset(tsdn_t *tsdn, const void *ptr, extent_t *extent)
{
bool err;
err = huge_node_set(tsdn, ptr, node);
err = huge_extent_set(tsdn, ptr, extent);
assert(!err);
}
static void
huge_node_unset(const void *ptr, const extent_node_t *node)
huge_extent_unset(const void *ptr, const extent_t *extent)
{
chunk_deregister(ptr, node);
chunk_deregister(ptr, extent);
}
void *
@ -54,7 +54,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
{
void *ret;
size_t ausize;
extent_node_t *node;
extent_t *extent;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
@ -66,10 +66,10 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
return (NULL);
assert(ausize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
/* Allocate an extent with which to track the chunk. */
extent = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_t)),
CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena));
if (node == NULL)
if (extent == NULL)
return (NULL);
/*
@ -81,22 +81,22 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
idalloctm(tsdn, node, NULL, true, true);
idalloctm(tsdn, extent, NULL, true, true);
return (NULL);
}
extent_node_init(node, arena, ret, usize, is_zeroed, true);
extent_init(extent, arena, ret, usize, is_zeroed, true);
if (huge_node_set(tsdn, ret, node)) {
if (huge_extent_set(tsdn, ret, extent)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
idalloctm(tsdn, node, NULL, true, true);
idalloctm(tsdn, extent, NULL, true, true);
return (NULL);
}
/* Insert node into huge. */
/* Insert extent into huge. */
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->huge, node, ql_link);
ql_elm_new(extent, ql_link);
ql_tail_insert(&arena->huge, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
@ -137,7 +137,7 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero)
{
size_t usize, usize_next;
extent_node_t *node;
extent_t *extent;
arena_t *arena;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool pre_zeroed, post_zeroed;
@ -150,9 +150,9 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
if (oldsize == usize)
return;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
pre_zeroed = extent_zeroed_get(extent);
/* Fill if necessary (shrinking). */
if (oldsize > usize) {
@ -171,12 +171,12 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
assert(extent_size_get(extent) != usize);
huge_extent_unset(ptr, extent);
extent_size_set(extent, usize);
huge_extent_reset(tsdn, ptr, extent);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
extent_zeroed_set(extent, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
@ -199,15 +199,15 @@ static bool
huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize)
{
extent_node_t *node;
extent_t *extent;
arena_t *arena;
chunk_hooks_t chunk_hooks;
size_t cdiff;
bool pre_zeroed, post_zeroed;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
pre_zeroed = extent_zeroed_get(extent);
chunk_hooks = chunk_hooks_get(tsdn, arena);
assert(oldsize > usize);
@ -235,11 +235,11 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
huge_extent_unset(ptr, extent);
extent_size_set(extent, usize);
huge_extent_reset(tsdn, ptr, extent);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
extent_zeroed_set(extent, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* Zap the excess chunks. */
@ -250,15 +250,16 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
static bool
huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize, bool zero) {
extent_node_t *node;
size_t usize, bool zero)
{
extent_t *extent;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
is_zeroed_subchunk = extent_node_zeroed_get(node);
is_zeroed_subchunk = extent_zeroed_get(extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/*
@ -273,9 +274,9 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
huge_extent_unset(ptr, extent);
extent_size_set(extent, usize);
huge_extent_reset(tsdn, ptr, extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
@ -390,21 +391,21 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
void
huge_dalloc(tsdn_t *tsdn, void *ptr)
{
extent_node_t *node;
extent_t *extent;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
huge_node_unset(ptr, node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
huge_extent_unset(ptr, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_remove(&arena->huge, node, ql_link);
ql_remove(&arena->huge, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
huge_dalloc_junk(tsdn, extent_node_addr_get(node),
extent_node_size_get(node));
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node));
idalloctm(tsdn, node, NULL, true, true);
huge_dalloc_junk(tsdn, extent_addr_get(extent),
extent_size_get(extent));
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent),
extent_addr_get(extent), extent_size_get(extent));
idalloctm(tsdn, extent, NULL, true, true);
arena_decay_tick(tsdn, arena);
}
@ -413,20 +414,20 @@ arena_t *
huge_aalloc(const void *ptr)
{
return (extent_node_arena_get(huge_node_get(ptr)));
return (extent_arena_get(huge_extent_get(ptr)));
}
size_t
huge_salloc(tsdn_t *tsdn, const void *ptr)
{
size_t size;
extent_node_t *node;
extent_t *extent;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
size = extent_node_size_get(node);
size = extent_size_get(extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (size);
@ -436,13 +437,13 @@ prof_tctx_t *
huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
prof_tctx_t *tctx;
extent_node_t *node;
extent_t *extent;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
tctx = extent_node_prof_tctx_get(node);
tctx = extent_prof_tctx_get(extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (tctx);
@ -451,13 +452,13 @@ huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
void
huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
{
extent_node_t *node;
extent_t *extent;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_node_prof_tctx_set(node, tctx);
extent_prof_tctx_set(extent, tctx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
}

View File

@ -103,7 +103,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
*(tbin->avail - 1));
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
arena_t *bin_arena = extent_arena_get(&chunk->extent);
arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) {
@ -126,7 +126,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
ptr = *(tbin->avail - 1 - i);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) == bin_arena) {
if (extent_arena_get(&chunk->extent) == bin_arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
@ -185,7 +185,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
*(tbin->avail - 1));
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
arena_t *locked_arena = extent_arena_get(&chunk->extent);
UNUSED bool idump;
if (config_prof)
@ -211,8 +211,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
ptr = *(tbin->avail - 1 - i);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) ==
locked_arena) {
if (extent_arena_get(&chunk->extent) == locked_arena) {
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
locked_arena, chunk, ptr);
} else {

View File

@ -32,21 +32,22 @@ TEST_END
TEST_BEGIN(test_rtree_extrema)
{
unsigned i;
extent_node_t node_a, node_b;
extent_t extent_a, extent_b;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t rtree;
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
"Unexpected rtree_new() failure");
assert_false(rtree_set(&rtree, 0, &node_a),
assert_false(rtree_set(&rtree, 0, &extent_a),
"Unexpected rtree_set() failure");
assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a,
assert_ptr_eq(rtree_get(&rtree, 0, true), &extent_a,
"rtree_get() should return previously set value");
assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b),
assert_false(rtree_set(&rtree, ~((uintptr_t)0), &extent_b),
"Unexpected rtree_set() failure");
assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b,
assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true),
&extent_b,
"rtree_get() should return previously set value");
rtree_delete(&rtree);
@ -61,18 +62,18 @@ TEST_BEGIN(test_rtree_bits)
for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
uintptr_t keys[] = {0, 1,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
extent_node_t node;
extent_t extent;
rtree_t rtree;
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
"Unexpected rtree_new() failure");
for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
assert_false(rtree_set(&rtree, keys[j], &node),
assert_false(rtree_set(&rtree, keys[j], &extent),
"Unexpected rtree_set() failure");
for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
assert_ptr_eq(rtree_get(&rtree, keys[k], true),
&node, "rtree_get() should return "
&extent, "rtree_get() should return "
"previously set value and ignore "
"insignificant key bits; i=%u, j=%u, k=%u, "
"set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
@ -101,7 +102,7 @@ TEST_BEGIN(test_rtree_random)
sfmt = init_gen_rand(SEED);
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
uintptr_t keys[NSET];
extent_node_t node;
extent_t extent;
unsigned j;
rtree_t rtree;
@ -110,13 +111,13 @@ TEST_BEGIN(test_rtree_random)
for (j = 0; j < NSET; j++) {
keys[j] = (uintptr_t)gen_rand64(sfmt);
assert_false(rtree_set(&rtree, keys[j], &node),
assert_false(rtree_set(&rtree, keys[j], &extent),
"Unexpected rtree_set() failure");
assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node,
assert_ptr_eq(rtree_get(&rtree, keys[j], true), &extent,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node,
assert_ptr_eq(rtree_get(&rtree, keys[j], true), &extent,
"rtree_get() should return previously set value");
}