Add the "arenas.purge" mallctl.

This commit is contained in:
Jason Evans 2010-09-30 16:55:08 -07:00
parent 075e77cad4
commit 6005f0710c
4 changed files with 68 additions and 12 deletions

View File

@ -38,7 +38,7 @@
.\" @(#)malloc.3 8.1 (Berkeley) 6/4/93
.\" $FreeBSD: head/lib/libc/stdlib/malloc.3 182225 2008-08-27 02:00:53Z jasone $
.\"
.Dd September 17, 2010
.Dd September 30, 2010
.Dt JEMALLOC 3
.Os
.Sh NAME
@ -1185,6 +1185,12 @@ Total number of large size classes.
Maximum size supported by this large size class.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.purge (unsigned) -w"
.Bd -ragged -offset indent -compact
Purge unused dirty pages for the specified arena, or for all arenas if none is
specified.
.Ed
.\"-----------------------------------------------------------------------------
@roff_prof@.It Sy "prof.active (bool) rw"
@roff_prof@.Bd -ragged -offset indent -compact
@roff_prof@Control whether sampling is currently active.

View File

@ -418,6 +418,10 @@ extern size_t sspace_max;
#define nlclasses (chunk_npages - arena_chunk_header_npages)
void arena_purge_all(arena_t *arena);
#ifdef JEMALLOC_PROF
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
#endif
#ifdef JEMALLOC_TCACHE
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
size_t binind
@ -426,9 +430,6 @@ void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
# endif
);
#endif
#ifdef JEMALLOC_PROF
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
#endif
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_malloc(size_t size, bool zero);

View File

@ -165,7 +165,7 @@ static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
bool zero);
static void arena_purge(arena_t *arena);
static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize);
@ -585,7 +585,7 @@ arena_maybe_purge(arena_t *arena)
(arena->ndirty - arena->npurgatory) > chunk_npages &&
(arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
arena->npurgatory))
arena_purge(arena);
arena_purge(arena, false);
}
static inline void
@ -758,7 +758,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
}
static void
arena_purge(arena_t *arena)
arena_purge(arena_t *arena, bool all)
{
arena_chunk_t *chunk;
size_t npurgatory;
@ -772,8 +772,8 @@ arena_purge(arena_t *arena)
assert(ndirty == arena->ndirty);
#endif
assert(arena->ndirty > arena->npurgatory);
assert(arena->ndirty > chunk_npages);
assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty);
assert(arena->ndirty > chunk_npages || all);
assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all);
#ifdef JEMALLOC_STATS
arena->stats.npurge++;
@ -784,8 +784,9 @@ arena_purge(arena_t *arena)
* purge, and add the result to arena->npurgatory. This will keep
* multiple threads from racing to reduce ndirty below the threshold.
*/
npurgatory = (arena->ndirty - arena->npurgatory) - (arena->nactive >>
opt_lg_dirty_mult);
npurgatory = arena->ndirty - arena->npurgatory;
if (all == false)
npurgatory -= arena->nactive >> opt_lg_dirty_mult;
arena->npurgatory += npurgatory;
while (npurgatory > 0) {
@ -841,6 +842,15 @@ arena_purge(arena_t *arena)
}
}
void
arena_purge_all(arena_t *arena)
{
malloc_mutex_lock(&arena->lock);
arena_purge(arena, true);
malloc_mutex_unlock(&arena->lock);
}
static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
{

View File

@ -126,6 +126,7 @@ CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
#endif
CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_purge)
#ifdef JEMALLOC_PROF
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
@ -326,7 +327,8 @@ static const ctl_node_t arenas_node[] = {
#endif
{NAME("bin"), CHILD(arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)},
{NAME("lrun"), CHILD(arenas_lrun)}
{NAME("lrun"), CHILD(arenas_lrun)},
{NAME("purge"), CTL(arenas_purge)}
};
#ifdef JEMALLOC_PROF
@ -1293,6 +1295,43 @@ CTL_RO_GEN(arenas_nhbins, nhbins, unsigned)
#endif
CTL_RO_GEN(arenas_nlruns, nlclasses, size_t)
static int
arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
unsigned arena;
WRITEONLY();
arena = UINT_MAX;
WRITE(arena, unsigned);
if (newp != NULL && arena >= narenas) {
ret = EFAULT;
goto RETURN;
} else {
arena_t *tarenas[narenas];
malloc_mutex_lock(&arenas_lock);
memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
malloc_mutex_unlock(&arenas_lock);
if (arena == UINT_MAX) {
for (unsigned i = 0; i < narenas; i++) {
if (tarenas[i] != NULL)
arena_purge_all(tarenas[i]);
}
} else {
assert(arena < narenas);
if (tarenas[arena] != NULL)
arena_purge_all(tarenas[arena]);
}
}
ret = 0;
RETURN:
return (ret);
}
/******************************************************************************/
#ifdef JEMALLOC_PROF