Integrate whole chunks into unused dirty page purging machinery.
Extend per arena unused dirty page purging to manage unused dirty chunks in aaddtion to unused dirty runs. Rather than immediately unmapping deallocated chunks (or purging them in the --disable-munmap case), store them in a separate set of trees, chunks_[sz]ad_dirty. Preferrentially allocate dirty chunks. When excessive unused dirty pages accumulate, purge runs and chunks in ingegrated LRU order (and unmap chunks in the --enable-munmap case). Refactor extent_node_t to provide accessor functions.
This commit is contained in:
parent
40ab8f98e4
commit
ee41ad409a
@ -35,6 +35,7 @@ typedef struct arena_s arena_t;
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_ARENA_STRUCTS_A
|
||||||
struct arena_run_s {
|
struct arena_run_s {
|
||||||
/* Index of bin this run is associated with. */
|
/* Index of bin this run is associated with. */
|
||||||
index_t binind;
|
index_t binind;
|
||||||
@ -136,7 +137,7 @@ struct arena_chunk_map_misc_s {
|
|||||||
|
|
||||||
union {
|
union {
|
||||||
/* Linkage for list of dirty runs. */
|
/* Linkage for list of dirty runs. */
|
||||||
ql_elm(arena_chunk_map_misc_t) dr_link;
|
qr(arena_chunk_map_misc_t) rd_link;
|
||||||
|
|
||||||
/* Profile counters, used for large object runs. */
|
/* Profile counters, used for large object runs. */
|
||||||
prof_tctx_t *prof_tctx;
|
prof_tctx_t *prof_tctx;
|
||||||
@ -147,14 +148,16 @@ struct arena_chunk_map_misc_s {
|
|||||||
};
|
};
|
||||||
typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
|
typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
|
||||||
typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
|
typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
|
||||||
typedef ql_head(arena_chunk_map_misc_t) arena_chunk_miscelms_t;
|
typedef qr(arena_chunk_map_misc_t) arena_chunk_miscelms_t;
|
||||||
|
#endif /* JEMALLOC_ARENA_STRUCTS_A */
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_ARENA_STRUCTS_B
|
||||||
/* Arena chunk header. */
|
/* Arena chunk header. */
|
||||||
struct arena_chunk_s {
|
struct arena_chunk_s {
|
||||||
/*
|
/*
|
||||||
* The arena that owns the chunk is node.arena. This field as a whole
|
* A pointer to the arena that owns the chunk is stored within the node.
|
||||||
* is used by chunks_rtree to support both ivsalloc() and core-based
|
* This field as a whole is used by chunks_rtree to support both
|
||||||
* debugging.
|
* ivsalloc() and core-based debugging.
|
||||||
*/
|
*/
|
||||||
extent_node_t node;
|
extent_node_t node;
|
||||||
|
|
||||||
@ -309,13 +312,29 @@ struct arena_s {
|
|||||||
size_t ndirty;
|
size_t ndirty;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Size/address-ordered trees of this arena's available runs. The trees
|
* Size/address-ordered tree of this arena's available runs. The tree
|
||||||
* are used for first-best-fit run allocation.
|
* is used for first-best-fit run allocation.
|
||||||
*/
|
*/
|
||||||
arena_avail_tree_t runs_avail;
|
arena_avail_tree_t runs_avail;
|
||||||
|
|
||||||
/* List of dirty runs this arena manages. */
|
/*
|
||||||
arena_chunk_miscelms_t runs_dirty;
|
* Unused dirty memory this arena manages. Dirty memory is conceptually
|
||||||
|
* tracked as an arbitrarily interleaved LRU of runs and chunks, but the
|
||||||
|
* list linkage is actually semi-duplicated in order to avoid extra
|
||||||
|
* arena_chunk_map_misc_t space overhead.
|
||||||
|
*
|
||||||
|
* LRU-----------------------------------------------------------MRU
|
||||||
|
*
|
||||||
|
* ______________ ___ ___
|
||||||
|
* ...-->|chunks_dirty|<--------->|c|<-------------------->|c|<--...
|
||||||
|
* -------------- |h| |h|
|
||||||
|
* ____________ _____ |u| _____ _____ |u|
|
||||||
|
* ...-->|runs_dirty|<-->|run|<-->|n|<-->|run|<-->|run|<-->|n|<--...
|
||||||
|
* ------------ ----- |k| ----- ----- |k|
|
||||||
|
* --- ---
|
||||||
|
*/
|
||||||
|
arena_chunk_map_misc_t runs_dirty;
|
||||||
|
extent_node_t chunks_dirty;
|
||||||
|
|
||||||
/* Extant huge allocations. */
|
/* Extant huge allocations. */
|
||||||
ql_head(extent_node_t) huge;
|
ql_head(extent_node_t) huge;
|
||||||
@ -329,6 +348,8 @@ struct arena_s {
|
|||||||
* orderings are needed, which is why there are two trees with the same
|
* orderings are needed, which is why there are two trees with the same
|
||||||
* contents.
|
* contents.
|
||||||
*/
|
*/
|
||||||
|
extent_tree_t chunks_szad_dirty;
|
||||||
|
extent_tree_t chunks_ad_dirty;
|
||||||
extent_tree_t chunks_szad_mmap;
|
extent_tree_t chunks_szad_mmap;
|
||||||
extent_tree_t chunks_ad_mmap;
|
extent_tree_t chunks_ad_mmap;
|
||||||
extent_tree_t chunks_szad_dss;
|
extent_tree_t chunks_szad_dss;
|
||||||
@ -347,6 +368,7 @@ struct arena_s {
|
|||||||
/* bins is used to store trees of free regions. */
|
/* bins is used to store trees of free regions. */
|
||||||
arena_bin_t bins[NBINS];
|
arena_bin_t bins[NBINS];
|
||||||
};
|
};
|
||||||
|
#endif /* JEMALLOC_ARENA_STRUCTS_B */
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -363,6 +385,10 @@ extern size_t arena_maxclass; /* Max size class for arenas. */
|
|||||||
extern unsigned nlclasses; /* Number of large size classes. */
|
extern unsigned nlclasses; /* Number of large size classes. */
|
||||||
extern unsigned nhclasses; /* Number of huge size classes. */
|
extern unsigned nhclasses; /* Number of huge size classes. */
|
||||||
|
|
||||||
|
void arena_chunk_dirty_maybe_insert(arena_t *arena, extent_node_t *node,
|
||||||
|
bool dirty);
|
||||||
|
void arena_chunk_dirty_maybe_remove(arena_t *arena, extent_node_t *node,
|
||||||
|
bool dirty);
|
||||||
extent_node_t *arena_node_alloc(arena_t *arena);
|
extent_node_t *arena_node_alloc(arena_t *arena);
|
||||||
void arena_node_dalloc(arena_t *arena, extent_node_t *node);
|
void arena_node_dalloc(arena_t *arena, extent_node_t *node);
|
||||||
void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
||||||
@ -818,7 +844,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
|||||||
assert(binind != BININD_INVALID);
|
assert(binind != BININD_INVALID);
|
||||||
assert(binind < NBINS);
|
assert(binind < NBINS);
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
arena = chunk->node.arena;
|
arena = extent_node_arena_get(&chunk->node);
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||||
actual_mapbits = arena_mapbits_get(chunk, pageind);
|
actual_mapbits = arena_mapbits_get(chunk, pageind);
|
||||||
assert(mapbits == actual_mapbits);
|
assert(mapbits == actual_mapbits);
|
||||||
@ -1013,7 +1039,7 @@ arena_aalloc(const void *ptr)
|
|||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (likely(chunk != ptr))
|
if (likely(chunk != ptr))
|
||||||
return (chunk->node.arena);
|
return (extent_node_arena_get(&chunk->node));
|
||||||
else
|
else
|
||||||
return (huge_aalloc(ptr));
|
return (huge_aalloc(ptr));
|
||||||
}
|
}
|
||||||
@ -1085,8 +1111,8 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
|||||||
mapbits);
|
mapbits);
|
||||||
tcache_dalloc_small(tsd, tcache, ptr, binind);
|
tcache_dalloc_small(tsd, tcache, ptr, binind);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_small(chunk->node.arena, chunk,
|
arena_dalloc_small(extent_node_arena_get(
|
||||||
ptr, pageind);
|
&chunk->node), chunk, ptr, pageind);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
size_t size = arena_mapbits_large_size_get(chunk,
|
size_t size = arena_mapbits_large_size_get(chunk,
|
||||||
@ -1097,8 +1123,8 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
|||||||
if (likely(tcache != NULL) && size <= tcache_maxclass)
|
if (likely(tcache != NULL) && size <= tcache_maxclass)
|
||||||
tcache_dalloc_large(tsd, tcache, ptr, size);
|
tcache_dalloc_large(tsd, tcache, ptr, size);
|
||||||
else {
|
else {
|
||||||
arena_dalloc_large(chunk->node.arena, chunk,
|
arena_dalloc_large(extent_node_arena_get(
|
||||||
ptr);
|
&chunk->node), chunk, ptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
@ -1136,8 +1162,8 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
|
|||||||
} else {
|
} else {
|
||||||
size_t pageind = ((uintptr_t)ptr -
|
size_t pageind = ((uintptr_t)ptr -
|
||||||
(uintptr_t)chunk) >> LG_PAGE;
|
(uintptr_t)chunk) >> LG_PAGE;
|
||||||
arena_dalloc_small(chunk->node.arena, chunk,
|
arena_dalloc_small(extent_node_arena_get(
|
||||||
ptr, pageind);
|
&chunk->node), chunk, ptr, pageind);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
||||||
@ -1145,8 +1171,8 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
|
|||||||
if (likely(tcache != NULL) && size <= tcache_maxclass)
|
if (likely(tcache != NULL) && size <= tcache_maxclass)
|
||||||
tcache_dalloc_large(tsd, tcache, ptr, size);
|
tcache_dalloc_large(tsd, tcache, ptr, size);
|
||||||
else {
|
else {
|
||||||
arena_dalloc_large(chunk->node.arena, chunk,
|
arena_dalloc_large(extent_node_arena_get(
|
||||||
ptr);
|
&chunk->node), chunk, ptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
|
@ -44,8 +44,10 @@ void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
|
|||||||
size_t size, size_t alignment, bool *zero);
|
size_t size, size_t alignment, bool *zero);
|
||||||
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
|
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, unsigned arena_ind);
|
bool *zero, unsigned arena_ind);
|
||||||
void chunk_unmap(arena_t *arena, void *chunk, size_t size);
|
void chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
||||||
|
extent_tree_t *chunks_ad, bool dirty, void *chunk, size_t size);
|
||||||
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
|
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
|
||||||
|
void chunk_unmap(arena_t *arena, void *chunk, size_t size);
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
void chunk_prefork(void);
|
void chunk_prefork(void);
|
||||||
void chunk_postfork_parent(void);
|
void chunk_postfork_parent(void);
|
||||||
|
@ -7,36 +7,48 @@ typedef struct extent_node_s extent_node_t;
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
/* Tree of extents. */
|
/* Tree of extents. Use accessor functions for en_* fields. */
|
||||||
struct extent_node_s {
|
struct extent_node_s {
|
||||||
/* Arena from which this extent came, if any. */
|
/* Arena from which this extent came, if any. */
|
||||||
arena_t *arena;
|
arena_t *en_arena;
|
||||||
|
|
||||||
/* Pointer to the extent that this tree node is responsible for. */
|
/* Pointer to the extent that this tree node is responsible for. */
|
||||||
void *addr;
|
void *en_addr;
|
||||||
|
|
||||||
|
/* Total region size. */
|
||||||
|
size_t en_size;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Total region size, or 0 if this node corresponds to an arena chunk.
|
* The zeroed flag is used by chunk recycling code to track whether
|
||||||
|
* memory is zero-filled.
|
||||||
*/
|
*/
|
||||||
size_t size;
|
bool en_zeroed;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 'prof_tctx' and 'zeroed' are never needed at the same time, so
|
* The achunk flag is used to validate that huge allocation lookups
|
||||||
* overlay them in order to fit extent_node_t in one cache line.
|
* don't return arena chunks.
|
||||||
*/
|
*/
|
||||||
|
bool en_achunk;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
/* Profile counters, used for huge objects. */
|
/* Profile counters, used for huge objects. */
|
||||||
prof_tctx_t *prof_tctx;
|
prof_tctx_t *en_prof_tctx;
|
||||||
|
|
||||||
/* True if zero-filled; used by chunk recycling code. */
|
struct {
|
||||||
bool zeroed;
|
/*
|
||||||
|
* Linkage for arena's runs_dirty and chunks_dirty
|
||||||
|
* rings.
|
||||||
|
*/
|
||||||
|
qr(extent_node_t) cd_link;
|
||||||
|
arena_chunk_map_misc_t runs_dirty;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
union {
|
union {
|
||||||
/* Linkage for the size/address-ordered tree. */
|
/* Linkage for the size/address-ordered tree. */
|
||||||
rb_node(extent_node_t) szad_link;
|
rb_node(extent_node_t) szad_link;
|
||||||
|
|
||||||
/* Linkage for huge allocations and cached chunks nodes. */
|
/* Linkage for arena's huge and node_cache lists. */
|
||||||
ql_elm(extent_node_t) ql_link;
|
ql_elm(extent_node_t) ql_link;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -57,6 +69,107 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
|
arena_t *extent_node_arena_get(const extent_node_t *node);
|
||||||
|
void *extent_node_addr_get(const extent_node_t *node);
|
||||||
|
size_t extent_node_size_get(const extent_node_t *node);
|
||||||
|
bool extent_node_zeroed_get(const extent_node_t *node);
|
||||||
|
bool extent_node_achunk_get(const extent_node_t *node);
|
||||||
|
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
|
||||||
|
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
|
||||||
|
void extent_node_addr_set(extent_node_t *node, void *addr);
|
||||||
|
void extent_node_size_set(extent_node_t *node, size_t size);
|
||||||
|
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
|
||||||
|
void extent_node_achunk_set(extent_node_t *node, bool achunk);
|
||||||
|
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
||||||
|
JEMALLOC_INLINE arena_t *
|
||||||
|
extent_node_arena_get(const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (node->en_arena);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void *
|
||||||
|
extent_node_addr_get(const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (node->en_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE size_t
|
||||||
|
extent_node_size_get(const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (node->en_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE bool
|
||||||
|
extent_node_zeroed_get(const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (node->en_zeroed);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE bool
|
||||||
|
extent_node_achunk_get(const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (node->en_achunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE prof_tctx_t *
|
||||||
|
extent_node_prof_tctx_get(const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (node->en_prof_tctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
extent_node_arena_set(extent_node_t *node, arena_t *arena)
|
||||||
|
{
|
||||||
|
|
||||||
|
node->en_arena = arena;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
extent_node_addr_set(extent_node_t *node, void *addr)
|
||||||
|
{
|
||||||
|
|
||||||
|
node->en_addr = addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
extent_node_size_set(extent_node_t *node, size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
node->en_size = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
|
||||||
|
{
|
||||||
|
|
||||||
|
node->en_zeroed = zeroed;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
extent_node_achunk_set(extent_node_t *node, bool achunk)
|
||||||
|
{
|
||||||
|
|
||||||
|
node->en_achunk = achunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
|
||||||
|
{
|
||||||
|
|
||||||
|
node->en_prof_tctx = tctx;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
|
@ -368,8 +368,13 @@ typedef unsigned index_t;
|
|||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
#include "jemalloc/internal/mb.h"
|
#include "jemalloc/internal/mb.h"
|
||||||
#include "jemalloc/internal/bitmap.h"
|
#include "jemalloc/internal/bitmap.h"
|
||||||
#include "jemalloc/internal/extent.h"
|
#define JEMALLOC_ARENA_STRUCTS_A
|
||||||
#include "jemalloc/internal/arena.h"
|
#include "jemalloc/internal/arena.h"
|
||||||
|
#undef JEMALLOC_ARENA_STRUCTS_A
|
||||||
|
#include "jemalloc/internal/extent.h"
|
||||||
|
#define JEMALLOC_ARENA_STRUCTS_B
|
||||||
|
#include "jemalloc/internal/arena.h"
|
||||||
|
#undef JEMALLOC_ARENA_STRUCTS_B
|
||||||
#include "jemalloc/internal/base.h"
|
#include "jemalloc/internal/base.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
#include "jemalloc/internal/rtree.h"
|
||||||
#include "jemalloc/internal/chunk.h"
|
#include "jemalloc/internal/chunk.h"
|
||||||
@ -933,7 +938,8 @@ ivsalloc(const void *ptr, bool demote)
|
|||||||
if (node == NULL)
|
if (node == NULL)
|
||||||
return (0);
|
return (0);
|
||||||
/* Only arena chunks should be looked up via interior pointers. */
|
/* Only arena chunks should be looked up via interior pointers. */
|
||||||
assert(node->addr == ptr || node->size == 0);
|
assert(extent_node_addr_get(node) == ptr ||
|
||||||
|
extent_node_achunk_get(node));
|
||||||
|
|
||||||
return (isalloc(ptr, demote));
|
return (isalloc(ptr, demote));
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,8 @@ arena_choose
|
|||||||
arena_choose_hard
|
arena_choose_hard
|
||||||
arena_chunk_alloc_huge
|
arena_chunk_alloc_huge
|
||||||
arena_chunk_dalloc_huge
|
arena_chunk_dalloc_huge
|
||||||
|
arena_chunk_dirty_maybe_insert
|
||||||
|
arena_chunk_dirty_maybe_remove
|
||||||
arena_chunk_ralloc_huge_expand
|
arena_chunk_ralloc_huge_expand
|
||||||
arena_chunk_ralloc_huge_shrink
|
arena_chunk_ralloc_huge_shrink
|
||||||
arena_chunk_ralloc_huge_similar
|
arena_chunk_ralloc_huge_similar
|
||||||
@ -143,6 +145,7 @@ chunk_npages
|
|||||||
chunk_postfork_child
|
chunk_postfork_child
|
||||||
chunk_postfork_parent
|
chunk_postfork_parent
|
||||||
chunk_prefork
|
chunk_prefork
|
||||||
|
chunk_record
|
||||||
chunk_register
|
chunk_register
|
||||||
chunk_unmap
|
chunk_unmap
|
||||||
chunks_rtree
|
chunks_rtree
|
||||||
@ -173,6 +176,18 @@ ctl_postfork_child
|
|||||||
ctl_postfork_parent
|
ctl_postfork_parent
|
||||||
ctl_prefork
|
ctl_prefork
|
||||||
dss_prec_names
|
dss_prec_names
|
||||||
|
extent_node_achunk_get
|
||||||
|
extent_node_achunk_set
|
||||||
|
extent_node_addr_get
|
||||||
|
extent_node_addr_set
|
||||||
|
extent_node_arena_get
|
||||||
|
extent_node_arena_set
|
||||||
|
extent_node_prof_tctx_get
|
||||||
|
extent_node_prof_tctx_set
|
||||||
|
extent_node_size_get
|
||||||
|
extent_node_size_set
|
||||||
|
extent_node_zeroed_get
|
||||||
|
extent_node_zeroed_set
|
||||||
extent_tree_ad_empty
|
extent_tree_ad_empty
|
||||||
extent_tree_ad_first
|
extent_tree_ad_first
|
||||||
extent_tree_ad_insert
|
extent_tree_ad_insert
|
||||||
|
399
src/arena.c
399
src/arena.c
@ -112,34 +112,94 @@ arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
||||||
size_t npages)
|
size_t npages)
|
||||||
{
|
{
|
||||||
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
|
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
|
||||||
|
|
||||||
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
||||||
LG_PAGE));
|
LG_PAGE));
|
||||||
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
|
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
|
||||||
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
|
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
|
||||||
CHUNK_MAP_DIRTY);
|
CHUNK_MAP_DIRTY);
|
||||||
ql_elm_new(miscelm, dr_link);
|
|
||||||
ql_tail_insert(&arena->runs_dirty, miscelm, dr_link);
|
qr_new(miscelm, rd_link);
|
||||||
|
qr_meld(&arena->runs_dirty, miscelm, rd_link);
|
||||||
arena->ndirty += npages;
|
arena->ndirty += npages;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
||||||
size_t npages)
|
size_t npages)
|
||||||
{
|
{
|
||||||
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
|
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
|
||||||
|
|
||||||
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
||||||
LG_PAGE));
|
LG_PAGE));
|
||||||
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
|
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
|
||||||
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
|
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
|
||||||
CHUNK_MAP_DIRTY);
|
CHUNK_MAP_DIRTY);
|
||||||
ql_remove(&arena->runs_dirty, miscelm, dr_link);
|
|
||||||
|
qr_remove(miscelm, rd_link);
|
||||||
|
assert(arena->ndirty >= npages);
|
||||||
arena->ndirty -= npages;
|
arena->ndirty -= npages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static size_t
|
||||||
|
arena_chunk_dirty_npages(const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (extent_node_size_get(node) >> LG_PAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
arena_chunk_dirty_node_init(extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
qr_new(node, cd_link);
|
||||||
|
qr_new(&node->runs_dirty, rd_link);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
arena_chunk_dirty_insert(arena_chunk_map_misc_t *runs_dirty,
|
||||||
|
extent_node_t *chunks_dirty, extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
qr_meld(chunks_dirty, node, cd_link);
|
||||||
|
qr_meld(runs_dirty, &node->runs_dirty, rd_link);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
arena_chunk_dirty_remove(extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
qr_remove(node, cd_link);
|
||||||
|
qr_remove(&node->runs_dirty, rd_link);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
arena_chunk_dirty_maybe_insert(arena_t *arena, extent_node_t *node, bool dirty)
|
||||||
|
{
|
||||||
|
|
||||||
|
arena_chunk_dirty_node_init(node);
|
||||||
|
if (dirty) {
|
||||||
|
arena_chunk_dirty_insert(&arena->runs_dirty,
|
||||||
|
&arena->chunks_dirty, node);
|
||||||
|
arena->ndirty += arena_chunk_dirty_npages(node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
arena_chunk_dirty_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (dirty) {
|
||||||
|
arena_chunk_dirty_remove(node);
|
||||||
|
assert(arena->ndirty >= arena_chunk_dirty_npages(node));
|
||||||
|
arena->ndirty -= arena_chunk_dirty_npages(node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C void *
|
JEMALLOC_INLINE_C void *
|
||||||
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
|
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
|
||||||
{
|
{
|
||||||
@ -243,7 +303,7 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
|
|||||||
|
|
||||||
arena_avail_remove(arena, chunk, run_ind, total_pages);
|
arena_avail_remove(arena, chunk, run_ind, total_pages);
|
||||||
if (flag_dirty != 0)
|
if (flag_dirty != 0)
|
||||||
arena_dirty_remove(arena, chunk, run_ind, total_pages);
|
arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
|
||||||
arena_cactive_update(arena, need_pages, 0);
|
arena_cactive_update(arena, need_pages, 0);
|
||||||
arena->nactive += need_pages;
|
arena->nactive += need_pages;
|
||||||
|
|
||||||
@ -256,7 +316,7 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
|
|||||||
arena_mapbits_unallocated_set(chunk,
|
arena_mapbits_unallocated_set(chunk,
|
||||||
run_ind+total_pages-1, (rem_pages << LG_PAGE),
|
run_ind+total_pages-1, (rem_pages << LG_PAGE),
|
||||||
flag_dirty);
|
flag_dirty);
|
||||||
arena_dirty_insert(arena, chunk, run_ind+need_pages,
|
arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
|
||||||
rem_pages);
|
rem_pages);
|
||||||
} else {
|
} else {
|
||||||
arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
|
arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
|
||||||
@ -405,9 +465,10 @@ arena_chunk_alloc_internal(arena_t *arena, bool *zero)
|
|||||||
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
|
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
|
||||||
arena->ind, NULL, chunksize, chunksize, zero);
|
arena->ind, NULL, chunksize, chunksize, zero);
|
||||||
if (chunk != NULL) {
|
if (chunk != NULL) {
|
||||||
chunk->node.arena = arena;
|
extent_node_arena_set(&chunk->node, arena);
|
||||||
chunk->node.addr = chunk;
|
extent_node_addr_set(&chunk->node, chunk);
|
||||||
chunk->node.size = 0; /* Indicates this is an arena chunk. */
|
extent_node_size_set(&chunk->node, chunksize);
|
||||||
|
extent_node_achunk_set(&chunk->node, true);
|
||||||
if (chunk_register(chunk, &chunk->node)) {
|
if (chunk_register(chunk, &chunk->node)) {
|
||||||
chunk_dalloc((void *)chunk, chunksize, arena->ind);
|
chunk_dalloc((void *)chunk, chunksize, arena->ind);
|
||||||
chunk = NULL;
|
chunk = NULL;
|
||||||
@ -516,7 +577,7 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
|
|
||||||
arena->spare = chunk;
|
arena->spare = chunk;
|
||||||
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
|
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
|
||||||
arena_dirty_remove(arena, spare, map_bias,
|
arena_run_dirty_remove(arena, spare, map_bias,
|
||||||
chunk_npages-map_bias);
|
chunk_npages-map_bias);
|
||||||
}
|
}
|
||||||
chunk_dalloc = arena->chunk_dalloc;
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
@ -899,18 +960,29 @@ static size_t
|
|||||||
arena_dirty_count(arena_t *arena)
|
arena_dirty_count(arena_t *arena)
|
||||||
{
|
{
|
||||||
size_t ndirty = 0;
|
size_t ndirty = 0;
|
||||||
arena_chunk_map_misc_t *miscelm;
|
arena_chunk_map_misc_t *runselm;
|
||||||
arena_chunk_t *chunk;
|
extent_node_t *chunkselm;
|
||||||
size_t pageind, npages;
|
|
||||||
|
|
||||||
ql_foreach(miscelm, &arena->runs_dirty, dr_link) {
|
for (runselm = qr_next(&arena->runs_dirty, rd_link),
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
chunkselm = qr_next(&arena->chunks_dirty, cd_link);
|
||||||
pageind = arena_miscelm_to_pageind(miscelm);
|
runselm != &arena->runs_dirty; runselm = qr_next(runselm,
|
||||||
assert(arena_mapbits_allocated_get(chunk, pageind) == 0);
|
rd_link)) {
|
||||||
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
size_t npages;
|
||||||
assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
|
|
||||||
npages = arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
if (runselm == &chunkselm->runs_dirty) {
|
||||||
LG_PAGE;
|
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
|
||||||
|
chunkselm = qr_next(chunkselm, cd_link);
|
||||||
|
} else {
|
||||||
|
arena_chunk_t *chunk = (arena_chunk_t
|
||||||
|
*)CHUNK_ADDR2BASE(runselm);
|
||||||
|
size_t pageind = arena_miscelm_to_pageind(runselm);
|
||||||
|
assert(arena_mapbits_allocated_get(chunk, pageind) ==
|
||||||
|
0);
|
||||||
|
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
||||||
|
assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
|
||||||
|
npages = arena_mapbits_unallocated_size_get(chunk,
|
||||||
|
pageind) >> LG_PAGE;
|
||||||
|
}
|
||||||
ndirty += npages;
|
ndirty += npages;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -939,41 +1011,94 @@ arena_compute_npurge(arena_t *arena, bool all)
|
|||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
||||||
arena_chunk_miscelms_t *miscelms)
|
arena_chunk_map_misc_t *purge_runs_sentinel,
|
||||||
|
extent_node_t *purge_chunks_sentinel)
|
||||||
{
|
{
|
||||||
arena_chunk_map_misc_t *miscelm;
|
arena_chunk_map_misc_t *runselm, *runselm_next;
|
||||||
|
extent_node_t *chunkselm;
|
||||||
size_t nstashed = 0;
|
size_t nstashed = 0;
|
||||||
|
|
||||||
/* Add at least npurge pages to purge_list. */
|
/* Stash at least npurge pages. */
|
||||||
for (miscelm = ql_first(&arena->runs_dirty); miscelm != NULL;
|
for (runselm = qr_next(&arena->runs_dirty, rd_link),
|
||||||
miscelm = ql_first(&arena->runs_dirty)) {
|
chunkselm = qr_next(&arena->chunks_dirty, cd_link);
|
||||||
arena_chunk_t *chunk =
|
runselm != &arena->runs_dirty; runselm = runselm_next) {
|
||||||
(arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
size_t npages;
|
||||||
size_t pageind = arena_miscelm_to_pageind(miscelm);
|
runselm_next = qr_next(runselm, rd_link);
|
||||||
size_t run_size = arena_mapbits_unallocated_size_get(chunk,
|
|
||||||
pageind);
|
|
||||||
size_t npages = run_size >> LG_PAGE;
|
|
||||||
arena_run_t *run = &miscelm->run;
|
|
||||||
|
|
||||||
assert(pageind + npages <= chunk_npages);
|
if (runselm == &chunkselm->runs_dirty) {
|
||||||
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
extent_node_t *chunkselm_next, *tnode;
|
||||||
arena_mapbits_dirty_get(chunk, pageind+npages-1));
|
void *addr;
|
||||||
|
size_t size;
|
||||||
|
bool zeroed, zero;
|
||||||
|
UNUSED void *chunk;
|
||||||
|
|
||||||
/*
|
chunkselm_next = qr_next(chunkselm, cd_link);
|
||||||
* If purging the spare chunk's run, make it available prior to
|
/*
|
||||||
* allocation.
|
* Cache contents of chunkselm prior to it being
|
||||||
*/
|
* destroyed as a side effect of allocating the chunk.
|
||||||
if (chunk == arena->spare)
|
*/
|
||||||
arena_chunk_alloc(arena);
|
addr = extent_node_addr_get(chunkselm);
|
||||||
|
size = extent_node_size_get(chunkselm);
|
||||||
|
zeroed = extent_node_zeroed_get(chunkselm);
|
||||||
|
/* Allocate. */
|
||||||
|
zero = false;
|
||||||
|
chunk = arena->chunk_alloc(addr, size, chunksize, &zero,
|
||||||
|
arena->ind);
|
||||||
|
assert(chunk == addr);
|
||||||
|
/*
|
||||||
|
* Create a temporary node to link into the ring of
|
||||||
|
* stashed allocations.
|
||||||
|
*/
|
||||||
|
tnode = arena_node_alloc(arena);
|
||||||
|
/*
|
||||||
|
* OOM shouldn't be possible because chunk allocation
|
||||||
|
* just cached a node.
|
||||||
|
*/
|
||||||
|
assert(tnode != NULL);
|
||||||
|
extent_node_arena_set(tnode, arena);
|
||||||
|
extent_node_addr_set(tnode, addr);
|
||||||
|
extent_node_size_set(tnode, size);
|
||||||
|
extent_node_zeroed_set(tnode, zeroed);
|
||||||
|
arena_chunk_dirty_node_init(tnode);
|
||||||
|
/* Stash. */
|
||||||
|
arena_chunk_dirty_insert(purge_runs_sentinel,
|
||||||
|
purge_chunks_sentinel, tnode);
|
||||||
|
npages = size >> LG_PAGE;
|
||||||
|
chunkselm = chunkselm_next;
|
||||||
|
} else {
|
||||||
|
arena_chunk_t *chunk =
|
||||||
|
(arena_chunk_t *)CHUNK_ADDR2BASE(runselm);
|
||||||
|
size_t pageind = arena_miscelm_to_pageind(runselm);
|
||||||
|
arena_run_t *run = &runselm->run;
|
||||||
|
size_t run_size =
|
||||||
|
arena_mapbits_unallocated_size_get(chunk, pageind);
|
||||||
|
|
||||||
/* Temporarily allocate the free dirty run. */
|
npages = run_size >> LG_PAGE;
|
||||||
arena_run_split_large(arena, run, run_size, false);
|
|
||||||
/* Append to purge_list for later processing. */
|
assert(pageind + npages <= chunk_npages);
|
||||||
ql_elm_new(miscelm, dr_link);
|
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
||||||
ql_tail_insert(miscelms, miscelm, dr_link);
|
arena_mapbits_dirty_get(chunk, pageind+npages-1));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If purging the spare chunk's run, make it available
|
||||||
|
* prior to allocation.
|
||||||
|
*/
|
||||||
|
if (chunk == arena->spare)
|
||||||
|
arena_chunk_alloc(arena);
|
||||||
|
|
||||||
|
/* Temporarily allocate the free dirty run. */
|
||||||
|
arena_run_split_large(arena, run, run_size, false);
|
||||||
|
/* Append to purge_runs for later processing. */
|
||||||
|
if (false)
|
||||||
|
qr_new(runselm, rd_link); /* Redundant. */
|
||||||
|
else {
|
||||||
|
assert(qr_next(runselm, rd_link) == runselm);
|
||||||
|
assert(qr_prev(runselm, rd_link) == runselm);
|
||||||
|
}
|
||||||
|
qr_meld(purge_runs_sentinel, runselm, rd_link);
|
||||||
|
}
|
||||||
|
|
||||||
nstashed += npages;
|
nstashed += npages;
|
||||||
|
|
||||||
if (!all && nstashed >= npurge)
|
if (!all && nstashed >= npurge)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -982,52 +1107,66 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms)
|
arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
|
||||||
|
extent_node_t *purge_chunks_sentinel)
|
||||||
{
|
{
|
||||||
size_t npurged, nmadvise;
|
size_t npurged, nmadvise;
|
||||||
arena_chunk_map_misc_t *miscelm;
|
arena_chunk_map_misc_t *runselm;
|
||||||
|
extent_node_t *chunkselm;
|
||||||
|
|
||||||
if (config_stats)
|
if (config_stats)
|
||||||
nmadvise = 0;
|
nmadvise = 0;
|
||||||
npurged = 0;
|
npurged = 0;
|
||||||
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
for (runselm = qr_next(purge_runs_sentinel, rd_link),
|
||||||
|
chunkselm = qr_next(purge_chunks_sentinel, cd_link);
|
||||||
|
runselm != purge_runs_sentinel; runselm = qr_next(runselm,
|
||||||
|
rd_link)) {
|
||||||
|
size_t npages;
|
||||||
|
|
||||||
ql_foreach(miscelm, miscelms, dr_link) {
|
if (runselm == &chunkselm->runs_dirty) {
|
||||||
arena_chunk_t *chunk;
|
size_t size = extent_node_size_get(chunkselm);
|
||||||
size_t pageind, run_size, npages, flag_unzeroed, i;
|
|
||||||
bool unzeroed;
|
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
pages_purge(extent_node_addr_get(chunkselm), size);
|
||||||
pageind = arena_miscelm_to_pageind(miscelm);
|
npages = size >> LG_PAGE;
|
||||||
run_size = arena_mapbits_large_size_get(chunk, pageind);
|
chunkselm = qr_next(chunkselm, cd_link);
|
||||||
npages = run_size >> LG_PAGE;
|
} else {
|
||||||
|
arena_chunk_t *chunk;
|
||||||
|
size_t pageind, run_size, flag_unzeroed, i;
|
||||||
|
bool unzeroed;
|
||||||
|
|
||||||
assert(pageind + npages <= chunk_npages);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(runselm);
|
||||||
unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
|
pageind = arena_miscelm_to_pageind(runselm);
|
||||||
LG_PAGE)), run_size);
|
run_size = arena_mapbits_large_size_get(chunk, pageind);
|
||||||
flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
|
npages = run_size >> LG_PAGE;
|
||||||
|
|
||||||
/*
|
assert(pageind + npages <= chunk_npages);
|
||||||
* Set the unzeroed flag for all pages, now that pages_purge()
|
unzeroed = pages_purge((void *)((uintptr_t)chunk +
|
||||||
* has returned whether the pages were zeroed as a side effect
|
(pageind << LG_PAGE)), run_size);
|
||||||
* of purging. This chunk map modification is safe even though
|
flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
|
||||||
* the arena mutex isn't currently owned by this thread,
|
|
||||||
* because the run is marked as allocated, thus protecting it
|
/*
|
||||||
* from being modified by any other thread. As long as these
|
* Set the unzeroed flag for all pages, now that
|
||||||
* writes don't perturb the first and last elements'
|
* pages_purge() has returned whether the pages were
|
||||||
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
|
* zeroed as a side effect of purging. This chunk map
|
||||||
*/
|
* modification is safe even though the arena mutex
|
||||||
for (i = 0; i < npages; i++) {
|
* isn't currently owned by this thread, because the run
|
||||||
arena_mapbits_unzeroed_set(chunk, pageind+i,
|
* is marked as allocated, thus protecting it from being
|
||||||
flag_unzeroed);
|
* modified by any other thread. As long as these
|
||||||
|
* writes don't perturb the first and last elements'
|
||||||
|
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < npages; i++) {
|
||||||
|
arena_mapbits_unzeroed_set(chunk, pageind+i,
|
||||||
|
flag_unzeroed);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
npurged += npages;
|
npurged += npages;
|
||||||
if (config_stats)
|
if (config_stats)
|
||||||
nmadvise++;
|
nmadvise++;
|
||||||
}
|
}
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -1039,16 +1178,31 @@ arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_unstash_purged(arena_t *arena, arena_chunk_miscelms_t *miscelms)
|
arena_unstash_purged(arena_t *arena,
|
||||||
|
arena_chunk_map_misc_t *purge_runs_sentinel,
|
||||||
|
extent_node_t *purge_chunks_sentinel)
|
||||||
{
|
{
|
||||||
arena_chunk_map_misc_t *miscelm;
|
arena_chunk_map_misc_t *runselm, *runselm_next;
|
||||||
|
extent_node_t *chunkselm;
|
||||||
|
|
||||||
/* Deallocate runs. */
|
/* Deallocate runs. */
|
||||||
for (miscelm = ql_first(miscelms); miscelm != NULL;
|
for (runselm = qr_next(purge_runs_sentinel, rd_link),
|
||||||
miscelm = ql_first(miscelms)) {
|
chunkselm = qr_next(purge_chunks_sentinel, cd_link);
|
||||||
arena_run_t *run = &miscelm->run;
|
runselm != purge_runs_sentinel; runselm = runselm_next) {
|
||||||
ql_remove(miscelms, miscelm, dr_link);
|
runselm_next = qr_next(runselm, rd_link);
|
||||||
arena_run_dalloc(arena, run, false, true);
|
if (runselm == &chunkselm->runs_dirty) {
|
||||||
|
extent_node_t *chunkselm_next = qr_next(chunkselm,
|
||||||
|
cd_link);
|
||||||
|
arena_chunk_dirty_remove(chunkselm);
|
||||||
|
chunk_unmap(arena, extent_node_addr_get(chunkselm),
|
||||||
|
extent_node_size_get(chunkselm));
|
||||||
|
arena_node_dalloc(arena, chunkselm);
|
||||||
|
chunkselm = chunkselm_next;
|
||||||
|
} else {
|
||||||
|
arena_run_t *run = &runselm->run;
|
||||||
|
qr_remove(runselm, rd_link);
|
||||||
|
arena_run_dalloc(arena, run, false, true);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1056,7 +1210,8 @@ void
|
|||||||
arena_purge(arena_t *arena, bool all)
|
arena_purge(arena_t *arena, bool all)
|
||||||
{
|
{
|
||||||
size_t npurge, npurgeable, npurged;
|
size_t npurge, npurgeable, npurged;
|
||||||
arena_chunk_miscelms_t purge_list;
|
arena_chunk_map_misc_t purge_runs_sentinel;
|
||||||
|
extent_node_t purge_chunks_sentinel;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calls to arena_dirty_count() are disabled even for debug builds
|
* Calls to arena_dirty_count() are disabled even for debug builds
|
||||||
@ -1072,12 +1227,17 @@ arena_purge(arena_t *arena, bool all)
|
|||||||
arena->stats.npurge++;
|
arena->stats.npurge++;
|
||||||
|
|
||||||
npurge = arena_compute_npurge(arena, all);
|
npurge = arena_compute_npurge(arena, all);
|
||||||
ql_new(&purge_list);
|
qr_new(&purge_runs_sentinel, rd_link);
|
||||||
npurgeable = arena_stash_dirty(arena, all, npurge, &purge_list);
|
arena_chunk_dirty_node_init(&purge_chunks_sentinel);
|
||||||
|
|
||||||
|
npurgeable = arena_stash_dirty(arena, all, npurge, &purge_runs_sentinel,
|
||||||
|
&purge_chunks_sentinel);
|
||||||
assert(npurgeable >= npurge);
|
assert(npurgeable >= npurge);
|
||||||
npurged = arena_purge_stashed(arena, &purge_list);
|
npurged = arena_purge_stashed(arena, &purge_runs_sentinel,
|
||||||
|
&purge_chunks_sentinel);
|
||||||
assert(npurged == npurgeable);
|
assert(npurged == npurgeable);
|
||||||
arena_unstash_purged(arena, &purge_list);
|
arena_unstash_purged(arena, &purge_runs_sentinel,
|
||||||
|
&purge_chunks_sentinel);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -1115,9 +1275,12 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
|
|||||||
run_ind+run_pages+nrun_pages-1) == flag_dirty);
|
run_ind+run_pages+nrun_pages-1) == flag_dirty);
|
||||||
arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
|
arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
|
||||||
|
|
||||||
/* If the successor is dirty, remove it from runs_dirty. */
|
/*
|
||||||
|
* If the successor is dirty, remove it from the set of dirty
|
||||||
|
* pages.
|
||||||
|
*/
|
||||||
if (flag_dirty != 0) {
|
if (flag_dirty != 0) {
|
||||||
arena_dirty_remove(arena, chunk, run_ind+run_pages,
|
arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
|
||||||
nrun_pages);
|
nrun_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1148,9 +1311,14 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
|
|||||||
assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
|
assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
|
||||||
arena_avail_remove(arena, chunk, run_ind, prun_pages);
|
arena_avail_remove(arena, chunk, run_ind, prun_pages);
|
||||||
|
|
||||||
/* If the predecessor is dirty, remove it from runs_dirty. */
|
/*
|
||||||
if (flag_dirty != 0)
|
* If the predecessor is dirty, remove it from the set of dirty
|
||||||
arena_dirty_remove(arena, chunk, run_ind, prun_pages);
|
* pages.
|
||||||
|
*/
|
||||||
|
if (flag_dirty != 0) {
|
||||||
|
arena_run_dirty_remove(arena, chunk, run_ind,
|
||||||
|
prun_pages);
|
||||||
|
}
|
||||||
|
|
||||||
size += prun_size;
|
size += prun_size;
|
||||||
run_pages += prun_pages;
|
run_pages += prun_pages;
|
||||||
@ -1224,7 +1392,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
|
|||||||
arena_avail_insert(arena, chunk, run_ind, run_pages);
|
arena_avail_insert(arena, chunk, run_ind, run_pages);
|
||||||
|
|
||||||
if (dirty)
|
if (dirty)
|
||||||
arena_dirty_insert(arena, chunk, run_ind, run_pages);
|
arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
|
||||||
|
|
||||||
/* Deallocate chunk if it is now completely unused. */
|
/* Deallocate chunk if it is now completely unused. */
|
||||||
if (size == arena_maxrun) {
|
if (size == arena_maxrun) {
|
||||||
@ -1843,7 +2011,8 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
|
|||||||
if (run == bin->runcur)
|
if (run == bin->runcur)
|
||||||
bin->runcur = NULL;
|
bin->runcur = NULL;
|
||||||
else {
|
else {
|
||||||
index_t binind = arena_bin_index(chunk->node.arena, bin);
|
index_t binind = arena_bin_index(extent_node_arena_get(
|
||||||
|
&chunk->node), bin);
|
||||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||||
|
|
||||||
if (bin_info->nregs != 1) {
|
if (bin_info->nregs != 1) {
|
||||||
@ -2184,7 +2353,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
arena = chunk->node.arena;
|
arena = extent_node_arena_get(&chunk->node);
|
||||||
|
|
||||||
if (usize < oldsize) {
|
if (usize < oldsize) {
|
||||||
/* Fill before shrinking in order avoid a race. */
|
/* Fill before shrinking in order avoid a race. */
|
||||||
@ -2422,20 +2591,6 @@ arena_new(unsigned ind)
|
|||||||
arena->nthreads = 0;
|
arena->nthreads = 0;
|
||||||
if (malloc_mutex_init(&arena->lock))
|
if (malloc_mutex_init(&arena->lock))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
arena->chunk_alloc = chunk_alloc_default;
|
|
||||||
arena->chunk_dalloc = chunk_dalloc_default;
|
|
||||||
ql_new(&arena->huge);
|
|
||||||
if (malloc_mutex_init(&arena->huge_mtx))
|
|
||||||
return (NULL);
|
|
||||||
extent_tree_szad_new(&arena->chunks_szad_mmap);
|
|
||||||
extent_tree_ad_new(&arena->chunks_ad_mmap);
|
|
||||||
extent_tree_szad_new(&arena->chunks_szad_dss);
|
|
||||||
extent_tree_ad_new(&arena->chunks_ad_dss);
|
|
||||||
ql_new(&arena->node_cache);
|
|
||||||
if (malloc_mutex_init(&arena->chunks_mtx))
|
|
||||||
return (NULL);
|
|
||||||
if (malloc_mutex_init(&arena->node_cache_mtx))
|
|
||||||
return (NULL);
|
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
memset(&arena->stats, 0, sizeof(arena_stats_t));
|
memset(&arena->stats, 0, sizeof(arena_stats_t));
|
||||||
@ -2463,7 +2618,27 @@ arena_new(unsigned ind)
|
|||||||
arena->ndirty = 0;
|
arena->ndirty = 0;
|
||||||
|
|
||||||
arena_avail_tree_new(&arena->runs_avail);
|
arena_avail_tree_new(&arena->runs_avail);
|
||||||
ql_new(&arena->runs_dirty);
|
qr_new(&arena->runs_dirty, rd_link);
|
||||||
|
qr_new(&arena->chunks_dirty, cd_link);
|
||||||
|
|
||||||
|
ql_new(&arena->huge);
|
||||||
|
if (malloc_mutex_init(&arena->huge_mtx))
|
||||||
|
return (NULL);
|
||||||
|
|
||||||
|
extent_tree_szad_new(&arena->chunks_szad_dirty);
|
||||||
|
extent_tree_ad_new(&arena->chunks_ad_dirty);
|
||||||
|
extent_tree_szad_new(&arena->chunks_szad_mmap);
|
||||||
|
extent_tree_ad_new(&arena->chunks_ad_mmap);
|
||||||
|
extent_tree_szad_new(&arena->chunks_szad_dss);
|
||||||
|
extent_tree_ad_new(&arena->chunks_ad_dss);
|
||||||
|
if (malloc_mutex_init(&arena->chunks_mtx))
|
||||||
|
return (NULL);
|
||||||
|
ql_new(&arena->node_cache);
|
||||||
|
if (malloc_mutex_init(&arena->node_cache_mtx))
|
||||||
|
return (NULL);
|
||||||
|
|
||||||
|
arena->chunk_alloc = chunk_alloc_default;
|
||||||
|
arena->chunk_dalloc = chunk_dalloc_default;
|
||||||
|
|
||||||
/* Initialize bins. */
|
/* Initialize bins. */
|
||||||
for (i = 0; i < NBINS; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
|
16
src/base.c
16
src/base.c
@ -60,8 +60,8 @@ base_chunk_alloc(size_t minsize)
|
|||||||
if (config_stats)
|
if (config_stats)
|
||||||
base_allocated += nsize;
|
base_allocated += nsize;
|
||||||
}
|
}
|
||||||
node->addr = addr;
|
extent_node_addr_set(node, addr);
|
||||||
node->size = csize;
|
extent_node_size_set(node, csize);
|
||||||
return (node);
|
return (node);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,8 +84,8 @@ base_alloc(size_t size)
|
|||||||
*/
|
*/
|
||||||
csize = CACHELINE_CEILING(size);
|
csize = CACHELINE_CEILING(size);
|
||||||
|
|
||||||
key.addr = NULL;
|
extent_node_addr_set(&key, NULL);
|
||||||
key.size = csize;
|
extent_node_size_set(&key, csize);
|
||||||
malloc_mutex_lock(&base_mtx);
|
malloc_mutex_lock(&base_mtx);
|
||||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||||
if (node != NULL) {
|
if (node != NULL) {
|
||||||
@ -100,10 +100,10 @@ base_alloc(size_t size)
|
|||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = node->addr;
|
ret = extent_node_addr_get(node);
|
||||||
if (node->size > csize) {
|
if (extent_node_size_get(node) > csize) {
|
||||||
node->addr = (void *)((uintptr_t)ret + csize);
|
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
|
||||||
node->size -= csize;
|
extent_node_size_set(node, extent_node_size_get(node) - csize);
|
||||||
extent_tree_szad_insert(&base_avail_szad, node);
|
extent_tree_szad_insert(&base_avail_szad, node);
|
||||||
} else
|
} else
|
||||||
base_node_dalloc(node);
|
base_node_dalloc(node);
|
||||||
|
146
src/chunk.c
146
src/chunk.c
@ -24,12 +24,13 @@ bool
|
|||||||
chunk_register(const void *chunk, const extent_node_t *node)
|
chunk_register(const void *chunk, const extent_node_t *node)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(node->addr == chunk);
|
assert(extent_node_addr_get(node) == chunk);
|
||||||
|
|
||||||
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
|
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
|
||||||
return (true);
|
return (true);
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
size_t nadd = (node->size == 0) ? 1 : node->size / chunksize;
|
size_t size = extent_node_size_get(node);
|
||||||
|
size_t nadd = (size == 0) ? 1 : size / chunksize;
|
||||||
size_t cur = atomic_add_z(&curchunks, nadd);
|
size_t cur = atomic_add_z(&curchunks, nadd);
|
||||||
size_t high = atomic_read_z(&highchunks);
|
size_t high = atomic_read_z(&highchunks);
|
||||||
while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
|
while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
|
||||||
@ -54,7 +55,8 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
|
|||||||
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
||||||
assert(!err);
|
assert(!err);
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
size_t nsub = (node->size == 0) ? 1 : node->size / chunksize;
|
size_t size = extent_node_size_get(node);
|
||||||
|
size_t nsub = (size == 0) ? 1 : size / chunksize;
|
||||||
assert(atomic_read_z(&curchunks) >= nsub);
|
assert(atomic_read_z(&curchunks) >= nsub);
|
||||||
atomic_sub_z(&curchunks, nsub);
|
atomic_sub_z(&curchunks, nsub);
|
||||||
}
|
}
|
||||||
@ -62,8 +64,8 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
|
|||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||||
extent_tree_t *chunks_ad, void *new_addr, size_t size, size_t alignment,
|
extent_tree_t *chunks_ad, bool dirty, void *new_addr, size_t size,
|
||||||
bool *zero)
|
size_t alignment, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
@ -77,32 +79,35 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
if (alloc_size < size)
|
if (alloc_size < size)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
key.addr = new_addr;
|
extent_node_addr_set(&key, new_addr);
|
||||||
key.size = alloc_size;
|
extent_node_size_set(&key, alloc_size);
|
||||||
malloc_mutex_lock(&arena->chunks_mtx);
|
malloc_mutex_lock(&arena->chunks_mtx);
|
||||||
node = (new_addr != NULL) ? extent_tree_ad_search(chunks_ad, &key) :
|
node = (new_addr != NULL) ? extent_tree_ad_search(chunks_ad, &key) :
|
||||||
extent_tree_szad_nsearch(chunks_szad, &key);
|
extent_tree_szad_nsearch(chunks_szad, &key);
|
||||||
if (node == NULL || (new_addr != NULL && node->size < size)) {
|
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
||||||
|
size)) {
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
|
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
|
||||||
(uintptr_t)node->addr;
|
alignment) - (uintptr_t)extent_node_addr_get(node);
|
||||||
assert(new_addr == NULL || leadsize == 0);
|
assert(new_addr == NULL || leadsize == 0);
|
||||||
assert(node->size >= leadsize + size);
|
assert(extent_node_size_get(node) >= leadsize + size);
|
||||||
trailsize = node->size - leadsize - size;
|
trailsize = extent_node_size_get(node) - leadsize - size;
|
||||||
ret = (void *)((uintptr_t)node->addr + leadsize);
|
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
||||||
zeroed = node->zeroed;
|
zeroed = extent_node_zeroed_get(node);
|
||||||
if (zeroed)
|
if (zeroed)
|
||||||
*zero = true;
|
*zero = true;
|
||||||
/* Remove node from the tree. */
|
/* Remove node from the tree. */
|
||||||
extent_tree_szad_remove(chunks_szad, node);
|
extent_tree_szad_remove(chunks_szad, node);
|
||||||
extent_tree_ad_remove(chunks_ad, node);
|
extent_tree_ad_remove(chunks_ad, node);
|
||||||
|
arena_chunk_dirty_maybe_remove(arena, node, dirty);
|
||||||
if (leadsize != 0) {
|
if (leadsize != 0) {
|
||||||
/* Insert the leading space as a smaller chunk. */
|
/* Insert the leading space as a smaller chunk. */
|
||||||
node->size = leadsize;
|
extent_node_size_set(node, leadsize);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, node);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, node);
|
||||||
|
arena_chunk_dirty_maybe_insert(arena, node, dirty);
|
||||||
node = NULL;
|
node = NULL;
|
||||||
}
|
}
|
||||||
if (trailsize != 0) {
|
if (trailsize != 0) {
|
||||||
@ -111,15 +116,17 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
node = arena_node_alloc(arena);
|
node = arena_node_alloc(arena);
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
chunk_unmap(arena, ret, size);
|
chunk_record(arena, chunks_szad, chunks_ad,
|
||||||
|
dirty, ret, size);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
node->addr = (void *)((uintptr_t)(ret) + size);
|
extent_node_addr_set(node, (void *)((uintptr_t)(ret) + size));
|
||||||
node->size = trailsize;
|
extent_node_size_set(node, trailsize);
|
||||||
node->zeroed = zeroed;
|
extent_node_zeroed_set(node, zeroed);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, node);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, node);
|
||||||
|
arena_chunk_dirty_maybe_insert(arena, node, dirty);
|
||||||
node = NULL;
|
node = NULL;
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
@ -148,7 +155,8 @@ chunk_alloc_core_dss(arena_t *arena, void *new_addr, size_t size,
|
|||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss,
|
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss,
|
||||||
&arena->chunks_ad_dss, new_addr, size, alignment, zero)) != NULL)
|
&arena->chunks_ad_dss, false, new_addr, size, alignment, zero)) !=
|
||||||
|
NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero);
|
ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero);
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -171,6 +179,11 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
|
/* dirty. */
|
||||||
|
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dirty,
|
||||||
|
&arena->chunks_ad_dirty, true, new_addr, size, alignment, zero)) !=
|
||||||
|
NULL)
|
||||||
|
return (ret);
|
||||||
/* "primary" dss. */
|
/* "primary" dss. */
|
||||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||||
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
||||||
@ -178,8 +191,8 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
return (ret);
|
return (ret);
|
||||||
/* mmap. */
|
/* mmap. */
|
||||||
if (!config_munmap && (ret = chunk_recycle(arena,
|
if (!config_munmap && (ret = chunk_recycle(arena,
|
||||||
&arena->chunks_szad_mmap, &arena->chunks_ad_mmap, new_addr, size,
|
&arena->chunks_szad_mmap, &arena->chunks_ad_mmap, false, new_addr,
|
||||||
alignment, zero)) != NULL)
|
size, alignment, zero)) != NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
/*
|
/*
|
||||||
* Requesting an address is not implemented for chunk_alloc_mmap(), so
|
* Requesting an address is not implemented for chunk_alloc_mmap(), so
|
||||||
@ -263,54 +276,62 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|||||||
arena->dss_prec));
|
arena->dss_prec));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void
|
||||||
chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
||||||
extent_tree_t *chunks_ad, void *chunk, size_t size)
|
extent_tree_t *chunks_ad, bool dirty, void *chunk, size_t size)
|
||||||
{
|
{
|
||||||
bool unzeroed;
|
bool unzeroed;
|
||||||
extent_node_t *node, *prev, key;
|
extent_node_t *node, *prev;
|
||||||
|
extent_node_t key;
|
||||||
|
|
||||||
unzeroed = pages_purge(chunk, size);
|
unzeroed = dirty ? true : pages_purge(chunk, size);
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->chunks_mtx);
|
malloc_mutex_lock(&arena->chunks_mtx);
|
||||||
key.addr = (void *)((uintptr_t)chunk + size);
|
extent_node_addr_set(&key, (void *)((uintptr_t)chunk + size));
|
||||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||||
/* Try to coalesce forward. */
|
/* Try to coalesce forward. */
|
||||||
if (node != NULL && node->addr == key.addr) {
|
if (node != NULL && extent_node_addr_get(node) ==
|
||||||
|
extent_node_addr_get(&key)) {
|
||||||
/*
|
/*
|
||||||
* Coalesce chunk with the following address range. This does
|
* Coalesce chunk with the following address range. This does
|
||||||
* not change the position within chunks_ad, so only
|
* not change the position within chunks_ad, so only
|
||||||
* remove/insert from/into chunks_szad.
|
* remove/insert from/into chunks_szad.
|
||||||
*/
|
*/
|
||||||
extent_tree_szad_remove(chunks_szad, node);
|
extent_tree_szad_remove(chunks_szad, node);
|
||||||
node->addr = chunk;
|
arena_chunk_dirty_maybe_remove(arena, node, dirty);
|
||||||
node->size += size;
|
extent_node_addr_set(node, chunk);
|
||||||
node->zeroed = (node->zeroed && !unzeroed);
|
extent_node_size_set(node, extent_node_size_get(node) + size);
|
||||||
|
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||||
|
!unzeroed);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, node);
|
||||||
|
arena_chunk_dirty_maybe_insert(arena, node, dirty);
|
||||||
} else {
|
} else {
|
||||||
/* Coalescing forward failed, so insert a new node. */
|
/* Coalescing forward failed, so insert a new node. */
|
||||||
node = arena_node_alloc(arena);
|
node = arena_node_alloc(arena);
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
/*
|
/*
|
||||||
* Node allocation failed, which is an exceedingly
|
* Node allocation failed, which is an exceedingly
|
||||||
* unlikely failure. Leak chunk; its pages have
|
* unlikely failure. Leak chunk after making sure its
|
||||||
* already been purged, so this is only a virtual
|
* pages have already been purged, so that this is only
|
||||||
* memory leak.
|
* a virtual memory leak.
|
||||||
*/
|
*/
|
||||||
|
if (dirty)
|
||||||
|
pages_purge(chunk, size);
|
||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
node->addr = chunk;
|
extent_node_addr_set(node, chunk);
|
||||||
node->size = size;
|
extent_node_size_set(node, size);
|
||||||
node->zeroed = !unzeroed;
|
extent_node_zeroed_set(node, !unzeroed);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, node);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, node);
|
||||||
|
arena_chunk_dirty_maybe_insert(arena, node, dirty);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Try to coalesce backward. */
|
/* Try to coalesce backward. */
|
||||||
prev = extent_tree_ad_prev(chunks_ad, node);
|
prev = extent_tree_ad_prev(chunks_ad, node);
|
||||||
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
|
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
|
||||||
chunk) {
|
extent_node_size_get(prev)) == chunk) {
|
||||||
/*
|
/*
|
||||||
* Coalesce chunk with the previous address range. This does
|
* Coalesce chunk with the previous address range. This does
|
||||||
* not change the position within chunks_ad, so only
|
* not change the position within chunks_ad, so only
|
||||||
@ -318,12 +339,16 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
*/
|
*/
|
||||||
extent_tree_szad_remove(chunks_szad, prev);
|
extent_tree_szad_remove(chunks_szad, prev);
|
||||||
extent_tree_ad_remove(chunks_ad, prev);
|
extent_tree_ad_remove(chunks_ad, prev);
|
||||||
|
arena_chunk_dirty_maybe_remove(arena, prev, dirty);
|
||||||
extent_tree_szad_remove(chunks_szad, node);
|
extent_tree_szad_remove(chunks_szad, node);
|
||||||
node->addr = prev->addr;
|
arena_chunk_dirty_maybe_remove(arena, node, dirty);
|
||||||
node->size += prev->size;
|
extent_node_addr_set(node, extent_node_addr_get(prev));
|
||||||
node->zeroed = (node->zeroed && prev->zeroed);
|
extent_node_size_set(node, extent_node_size_get(node) +
|
||||||
|
extent_node_size_get(prev));
|
||||||
|
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||||
|
extent_node_zeroed_get(prev));
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, node);
|
||||||
|
arena_chunk_dirty_maybe_insert(arena, node, dirty);
|
||||||
|
|
||||||
arena_node_dalloc(arena, prev);
|
arena_node_dalloc(arena, prev);
|
||||||
}
|
}
|
||||||
@ -332,6 +357,28 @@ label_return:
|
|||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_cache(arena_t *arena, void *chunk, size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(chunk != NULL);
|
||||||
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||||
|
assert(size != 0);
|
||||||
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
|
chunk_record(arena, &arena->chunks_szad_dirty, &arena->chunks_ad_dirty,
|
||||||
|
true, chunk, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Default arena chunk deallocation routine in the absence of user override. */
|
||||||
|
bool
|
||||||
|
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
chunk_cache(chunk_arena_get(arena_ind), chunk, size);
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_unmap(arena_t *arena, void *chunk, size_t size)
|
chunk_unmap(arena_t *arena, void *chunk, size_t size)
|
||||||
{
|
{
|
||||||
@ -343,22 +390,13 @@ chunk_unmap(arena_t *arena, void *chunk, size_t size)
|
|||||||
|
|
||||||
if (have_dss && chunk_in_dss(chunk)) {
|
if (have_dss && chunk_in_dss(chunk)) {
|
||||||
chunk_record(arena, &arena->chunks_szad_dss,
|
chunk_record(arena, &arena->chunks_szad_dss,
|
||||||
&arena->chunks_ad_dss, chunk, size);
|
&arena->chunks_ad_dss, false, chunk, size);
|
||||||
} else if (chunk_dalloc_mmap(chunk, size)) {
|
} else if (chunk_dalloc_mmap(chunk, size)) {
|
||||||
chunk_record(arena, &arena->chunks_szad_mmap,
|
chunk_record(arena, &arena->chunks_szad_mmap,
|
||||||
&arena->chunks_ad_mmap, chunk, size);
|
&arena->chunks_ad_mmap, false, chunk, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Default arena chunk deallocation routine in the absence of user override. */
|
|
||||||
bool
|
|
||||||
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
|
||||||
{
|
|
||||||
|
|
||||||
chunk_unmap(chunk_arena_get(arena_ind), chunk, size);
|
|
||||||
return (false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static rtree_node_elm_t *
|
static rtree_node_elm_t *
|
||||||
chunks_rtree_node_alloc(size_t nelms)
|
chunks_rtree_node_alloc(size_t nelms)
|
||||||
{
|
{
|
||||||
|
@ -133,8 +133,12 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
/* Success. */
|
/* Success. */
|
||||||
dss_max = dss_next;
|
dss_max = dss_next;
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(&dss_mtx);
|
||||||
if (cpad_size != 0)
|
if (cpad_size != 0) {
|
||||||
chunk_unmap(arena, cpad, cpad_size);
|
chunk_record(arena,
|
||||||
|
&arena->chunks_szad_dss,
|
||||||
|
&arena->chunks_ad_dss, false, cpad,
|
||||||
|
cpad_size);
|
||||||
|
}
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
||||||
ret, size);
|
ret, size);
|
||||||
|
12
src/extent.c
12
src/extent.c
@ -7,13 +7,13 @@ JEMALLOC_INLINE_C int
|
|||||||
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
size_t a_size = a->size;
|
size_t a_size = extent_node_size_get(a);
|
||||||
size_t b_size = b->size;
|
size_t b_size = extent_node_size_get(b);
|
||||||
|
|
||||||
ret = (a_size > b_size) - (a_size < b_size);
|
ret = (a_size > b_size) - (a_size < b_size);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
uintptr_t a_addr = (uintptr_t)a->addr;
|
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
||||||
uintptr_t b_addr = (uintptr_t)b->addr;
|
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
||||||
|
|
||||||
ret = (a_addr > b_addr) - (a_addr < b_addr);
|
ret = (a_addr > b_addr) - (a_addr < b_addr);
|
||||||
}
|
}
|
||||||
@ -28,8 +28,8 @@ rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
|
|||||||
JEMALLOC_INLINE_C int
|
JEMALLOC_INLINE_C int
|
||||||
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
||||||
{
|
{
|
||||||
uintptr_t a_addr = (uintptr_t)a->addr;
|
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
||||||
uintptr_t b_addr = (uintptr_t)b->addr;
|
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
||||||
|
|
||||||
return ((a_addr > b_addr) - (a_addr < b_addr));
|
return ((a_addr > b_addr) - (a_addr < b_addr));
|
||||||
}
|
}
|
||||||
|
61
src/huge.c
61
src/huge.c
@ -9,7 +9,7 @@ huge_node_get(const void *ptr)
|
|||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
|
|
||||||
node = chunk_lookup(ptr);
|
node = chunk_lookup(ptr);
|
||||||
assert(node->size != 0);
|
assert(!extent_node_achunk_get(node));
|
||||||
|
|
||||||
return (node);
|
return (node);
|
||||||
}
|
}
|
||||||
@ -18,8 +18,8 @@ static bool
|
|||||||
huge_node_set(const void *ptr, extent_node_t *node)
|
huge_node_set(const void *ptr, extent_node_t *node)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(node->addr == ptr);
|
assert(extent_node_addr_get(node) == ptr);
|
||||||
assert(node->size != 0);
|
assert(!extent_node_achunk_get(node));
|
||||||
return (chunk_register(ptr, node));
|
return (chunk_register(ptr, node));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,10 +73,11 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
node->addr = ret;
|
extent_node_arena_set(node, arena);
|
||||||
node->size = usize;
|
extent_node_addr_set(node, ret);
|
||||||
node->zeroed = is_zeroed;
|
extent_node_size_set(node, usize);
|
||||||
node->arena = arena;
|
extent_node_achunk_set(node, false);
|
||||||
|
extent_node_zeroed_set(node, is_zeroed);
|
||||||
|
|
||||||
if (huge_node_set(ret, node)) {
|
if (huge_node_set(ret, node)) {
|
||||||
arena_chunk_dalloc_huge(arena, ret, usize);
|
arena_chunk_dalloc_huge(arena, ret, usize);
|
||||||
@ -152,13 +153,13 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
|||||||
zeroed = true;
|
zeroed = true;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = node->arena;
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
assert(node->size != usize);
|
assert(extent_node_size_get(node) != usize);
|
||||||
node->size = usize;
|
extent_node_size_set(node, usize);
|
||||||
/* Clear node->zeroed if zeroing failed above. */
|
/* Clear node's zeroed field if zeroing failed above. */
|
||||||
node->zeroed = (node->zeroed && zeroed);
|
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
|
||||||
@ -195,12 +196,12 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = node->arena;
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
node->size = usize;
|
extent_node_size_set(node, usize);
|
||||||
/* Clear node->zeroed if zeroing failed above. */
|
/* Clear node's zeroed field if zeroing failed above. */
|
||||||
node->zeroed = (node->zeroed && zeroed);
|
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
/* Zap the excess chunks. */
|
/* Zap the excess chunks. */
|
||||||
@ -221,9 +222,9 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = node->arena;
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
is_zeroed_subchunk = node->zeroed;
|
is_zeroed_subchunk = extent_node_zeroed_get(node);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -238,7 +239,7 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
|||||||
|
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
node->size = usize;
|
extent_node_size_set(node, usize);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
@ -358,14 +359,16 @@ huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
|||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = node->arena;
|
arena = extent_node_arena_get(node);
|
||||||
huge_node_unset(ptr, node);
|
huge_node_unset(ptr, node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
ql_remove(&arena->huge, node, ql_link);
|
ql_remove(&arena->huge, node, ql_link);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
huge_dalloc_junk(node->addr, node->size);
|
huge_dalloc_junk(extent_node_addr_get(node),
|
||||||
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
|
extent_node_size_get(node));
|
||||||
|
arena_chunk_dalloc_huge(extent_node_arena_get(node),
|
||||||
|
extent_node_addr_get(node), extent_node_size_get(node));
|
||||||
idalloctm(tsd, node, tcache, true);
|
idalloctm(tsd, node, tcache, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -373,7 +376,7 @@ arena_t *
|
|||||||
huge_aalloc(const void *ptr)
|
huge_aalloc(const void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (huge_node_get(ptr)->arena);
|
return (extent_node_arena_get(huge_node_get(ptr)));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
@ -384,9 +387,9 @@ huge_salloc(const void *ptr)
|
|||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = node->arena;
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
size = node->size;
|
size = extent_node_size_get(node);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
return (size);
|
return (size);
|
||||||
@ -400,9 +403,9 @@ huge_prof_tctx_get(const void *ptr)
|
|||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = node->arena;
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
tctx = node->prof_tctx;
|
tctx = extent_node_prof_tctx_get(node);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
|
|
||||||
return (tctx);
|
return (tctx);
|
||||||
@ -415,8 +418,8 @@ huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
|||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = node->arena;
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
node->prof_tctx = tctx;
|
extent_node_prof_tctx_set(node, tctx);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
/* Lock the arena bin associated with the first object. */
|
/* Lock the arena bin associated with the first object. */
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||||
tbin->avail[0]);
|
tbin->avail[0]);
|
||||||
arena_t *bin_arena = chunk->node.arena;
|
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
|
||||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||||
|
|
||||||
if (config_prof && bin_arena == arena) {
|
if (config_prof && bin_arena == arena) {
|
||||||
@ -125,7 +125,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
ptr = tbin->avail[i];
|
ptr = tbin->avail[i];
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk->node.arena == bin_arena) {
|
if (extent_node_arena_get(&chunk->node) == bin_arena) {
|
||||||
size_t pageind = ((uintptr_t)ptr -
|
size_t pageind = ((uintptr_t)ptr -
|
||||||
(uintptr_t)chunk) >> LG_PAGE;
|
(uintptr_t)chunk) >> LG_PAGE;
|
||||||
arena_chunk_map_bits_t *bitselm =
|
arena_chunk_map_bits_t *bitselm =
|
||||||
@ -183,7 +183,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
|||||||
/* Lock the arena associated with the first object. */
|
/* Lock the arena associated with the first object. */
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||||
tbin->avail[0]);
|
tbin->avail[0]);
|
||||||
arena_t *locked_arena = chunk->node.arena;
|
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
|
||||||
UNUSED bool idump;
|
UNUSED bool idump;
|
||||||
|
|
||||||
if (config_prof)
|
if (config_prof)
|
||||||
@ -209,7 +209,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
|||||||
ptr = tbin->avail[i];
|
ptr = tbin->avail[i];
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk->node.arena == locked_arena) {
|
if (extent_node_arena_get(&chunk->node) ==
|
||||||
|
locked_arena) {
|
||||||
arena_dalloc_large_junked_locked(locked_arena,
|
arena_dalloc_large_junked_locked(locked_arena,
|
||||||
chunk, ptr);
|
chunk, ptr);
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
Reference in New Issue
Block a user