Remove all vestiges of chunks.

Remove mallctls:
- opt.lg_chunk
- stats.cactive

This resolves #464.
This commit is contained in:
Jason Evans 2016-10-12 11:49:19 -07:00
parent 63b5657aa5
commit 9acd5cf178
23 changed files with 26 additions and 270 deletions

View File

@ -91,10 +91,10 @@ any of the following arguments (not a definitive list) to 'configure':
--with-malloc-conf=<malloc_conf> --with-malloc-conf=<malloc_conf>
Embed <malloc_conf> as a run-time options string that is processed prior to Embed <malloc_conf> as a run-time options string that is processed prior to
the malloc_conf global variable, the /etc/malloc.conf symlink, and the the malloc_conf global variable, the /etc/malloc.conf symlink, and the
MALLOC_CONF environment variable. For example, to change the default chunk MALLOC_CONF environment variable. For example, to change the default decay
size to 256 KiB: time to 30 seconds:
--with-malloc-conf=lg_chunk:18 --with-malloc-conf=decay_time:30
--disable-cc-silence --disable-cc-silence
Disable code that silences non-useful compiler warnings. This is mainly Disable code that silences non-useful compiler warnings. This is mainly

View File

@ -83,7 +83,6 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/atomic.c \ $(srcroot)src/atomic.c \
$(srcroot)src/base.c \ $(srcroot)src/base.c \
$(srcroot)src/bitmap.c \ $(srcroot)src/bitmap.c \
$(srcroot)src/chunk.c \
$(srcroot)src/ckh.c \ $(srcroot)src/ckh.c \
$(srcroot)src/ctl.c \ $(srcroot)src/ctl.c \
$(srcroot)src/extent.c \ $(srcroot)src/extent.c \
@ -144,7 +143,6 @@ TESTS_UNIT := \
$(srcroot)test/unit/junk.c \ $(srcroot)test/unit/junk.c \
$(srcroot)test/unit/junk_alloc.c \ $(srcroot)test/unit/junk_alloc.c \
$(srcroot)test/unit/junk_free.c \ $(srcroot)test/unit/junk_free.c \
$(srcroot)test/unit/lg_chunk.c \
$(srcroot)test/unit/mallctl.c \ $(srcroot)test/unit/mallctl.c \
$(srcroot)test/unit/math.c \ $(srcroot)test/unit/math.c \
$(srcroot)test/unit/mq.c \ $(srcroot)test/unit/mq.c \

View File

@ -900,19 +900,6 @@ for (i = 0; i < nbins; i++) {
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.lg_chunk">
<term>
<mallctl>opt.lg_chunk</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Virtual memory chunk size (log base 2). If a chunk
size outside the supported size range is specified, the size is
silently clipped to the minimum/maximum supported size. The default
chunk size is 2 MiB (2^21).
</para></listitem>
</varlistentry>
<varlistentry id="opt.narenas"> <varlistentry id="opt.narenas">
<term> <term>
<mallctl>opt.narenas</mallctl> <mallctl>opt.narenas</mallctl>
@ -1949,24 +1936,6 @@ struct extent_hooks_s {
option for additional information.</para></listitem> option for additional information.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.cactive">
<term>
<mallctl>stats.cactive</mallctl>
(<type>size_t *</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Pointer to a counter that contains an approximate count
of the current number of bytes in active pages. The estimate may be
high, but never low, because each arena rounds up when computing its
contribution to the counter. Note that the <link
linkend="epoch"><mallctl>epoch</mallctl></link> mallctl has no bearing
on this counter. Furthermore, counter consistency is maintained via
atomic operations, so it is necessary to use an atomic operation in
order to guarantee a consistent read when dereferencing the pointer.
</para></listitem>
</varlistentry>
<varlistentry id="stats.allocated"> <varlistentry id="stats.allocated">
<term> <term>
<mallctl>stats.allocated</mallctl> <mallctl>stats.allocated</mallctl>

View File

@ -1,36 +0,0 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 21
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk;
extern const char *opt_dss;
extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages;
bool chunk_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -21,6 +21,8 @@ extern const char *dss_prec_names[];
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
extern const char *opt_dss;
dss_prec_t extent_dss_prec_get(tsdn_t *tsdn); dss_prec_t extent_dss_prec_get(tsdn_t *tsdn);
bool extent_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec); bool extent_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec);
void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,

View File

@ -363,7 +363,6 @@ typedef unsigned szind_t;
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h" #include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/large.h" #include "jemalloc/internal/large.h"
#include "jemalloc/internal/tcache.h" #include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
@ -397,7 +396,6 @@ typedef unsigned szind_t;
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h" #include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/large.h" #include "jemalloc/internal/large.h"
#include "jemalloc/internal/tcache.h" #include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
@ -483,7 +481,6 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h" #include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/large.h" #include "jemalloc/internal/large.h"
#include "jemalloc/internal/tcache.h" #include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
@ -512,7 +509,6 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/extent.h" #include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/pages.h" #include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/large.h" #include "jemalloc/internal/large.h"
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE

View File

@ -110,10 +110,6 @@ bootstrap_free
bootstrap_malloc bootstrap_malloc
bt_init bt_init
buferror buferror
chunk_boot
chunk_npages
chunksize
chunksize_mask
ckh_count ckh_count
ckh_delete ckh_delete
ckh_insert ckh_insert
@ -306,7 +302,6 @@ opt_dss
opt_junk opt_junk
opt_junk_alloc opt_junk_alloc
opt_junk_free opt_junk_free
opt_lg_chunk
opt_lg_prof_interval opt_lg_prof_interval
opt_lg_prof_sample opt_lg_prof_sample
opt_lg_tcache_max opt_lg_tcache_max
@ -430,10 +425,6 @@ size2index
size2index_compute size2index_compute
size2index_lookup size2index_lookup
size2index_tab size2index_tab
stats_cactive
stats_cactive_add
stats_cactive_get
stats_cactive_sub
stats_print stats_print
tcache_alloc_easy tcache_alloc_easy
tcache_alloc_large tcache_alloc_large

View File

@ -118,8 +118,6 @@ struct arena_stats_s {
extern bool opt_stats_print; extern bool opt_stats_print;
extern size_t stats_cactive;
void stats_print(void (*write)(void *, const char *), void *cbopaque, void stats_print(void (*write)(void *, const char *), void *cbopaque,
const char *opts); const char *opts);
@ -127,44 +125,5 @@ void stats_print(void (*write)(void *, const char *), void *cbopaque,
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(void);
void stats_cactive_add(size_t size);
void stats_cactive_sub(size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE size_t
stats_cactive_get(void)
{
return (atomic_read_z(&stats_cactive));
}
JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
UNUSED size_t cactive;
assert(size > 0);
assert((size & chunksize_mask) == 0);
cactive = atomic_add_z(&stats_cactive, size);
assert(cactive - size < cactive);
}
JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
UNUSED size_t cactive;
assert(size > 0);
assert((size & chunksize_mask) == 0);
cactive = atomic_sub_z(&stats_cactive, size);
assert(cactive + size > cactive);
}
#endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */
/******************************************************************************/ /******************************************************************************/

View File

@ -40,7 +40,6 @@
<ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h" />
@ -93,7 +92,6 @@
<ClCompile Include="..\..\..\..\src\atomic.c" /> <ClCompile Include="..\..\..\..\src\atomic.c" />
<ClCompile Include="..\..\..\..\src\base.c" /> <ClCompile Include="..\..\..\..\src\base.c" />
<ClCompile Include="..\..\..\..\src\bitmap.c" /> <ClCompile Include="..\..\..\..\src\bitmap.c" />
<ClCompile Include="..\..\..\..\src\chunk.c" />
<ClCompile Include="..\..\..\..\src\ckh.c" /> <ClCompile Include="..\..\..\..\src\ckh.c" />
<ClCompile Include="..\..\..\..\src\ctl.c" /> <ClCompile Include="..\..\..\..\src\ctl.c" />
<ClCompile Include="..\..\..\..\src\extent.c" /> <ClCompile Include="..\..\..\..\src\extent.c" />

View File

@ -59,9 +59,6 @@
<ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h"> <ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h">
<Filter>Header Files\internal</Filter> <Filter>Header Files\internal</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h"> <ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h">
<Filter>Header Files\internal</Filter> <Filter>Header Files\internal</Filter>
</ClInclude> </ClInclude>
@ -190,9 +187,6 @@
<ClCompile Include="..\..\..\..\src\bitmap.c"> <ClCompile Include="..\..\..\..\src\bitmap.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\chunk.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ckh.c"> <ClCompile Include="..\..\..\..\src\ckh.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>

View File

@ -229,13 +229,6 @@ static void
arena_nactive_add(arena_t *arena, size_t add_pages) arena_nactive_add(arena_t *arena, size_t add_pages)
{ {
if (config_stats) {
size_t cactive_add = CHUNK_CEILING((arena->nactive +
add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
LG_PAGE);
if (cactive_add != 0)
stats_cactive_add(cactive_add);
}
arena->nactive += add_pages; arena->nactive += add_pages;
} }
@ -244,12 +237,6 @@ arena_nactive_sub(arena_t *arena, size_t sub_pages)
{ {
assert(arena->nactive >= sub_pages); assert(arena->nactive >= sub_pages);
if (config_stats) {
size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
if (cactive_sub != 0)
stats_cactive_sub(cactive_sub);
}
arena->nactive -= sub_pages; arena->nactive -= sub_pages;
} }

View File

@ -41,7 +41,7 @@ static extent_t *
base_extent_alloc(tsdn_t *tsdn, size_t minsize) base_extent_alloc(tsdn_t *tsdn, size_t minsize)
{ {
extent_t *extent; extent_t *extent;
size_t csize, nsize; size_t esize, nsize;
void *addr; void *addr;
malloc_mutex_assert_owner(tsdn, &base_mtx); malloc_mutex_assert_owner(tsdn, &base_mtx);
@ -49,7 +49,7 @@ base_extent_alloc(tsdn_t *tsdn, size_t minsize)
extent = base_extent_try_alloc(tsdn); extent = base_extent_try_alloc(tsdn);
/* Allocate enough space to also carve an extent out if necessary. */ /* Allocate enough space to also carve an extent out if necessary. */
nsize = (extent == NULL) ? CACHELINE_CEILING(sizeof(extent_t)) : 0; nsize = (extent == NULL) ? CACHELINE_CEILING(sizeof(extent_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize); esize = PAGE_CEILING(minsize + nsize);
/* /*
* Directly call extent_alloc_mmap() because it's critical to allocate * Directly call extent_alloc_mmap() because it's critical to allocate
* untouched demand-zeroed virtual memory. * untouched demand-zeroed virtual memory.
@ -57,24 +57,24 @@ base_extent_alloc(tsdn_t *tsdn, size_t minsize)
{ {
bool zero = true; bool zero = true;
bool commit = true; bool commit = true;
addr = extent_alloc_mmap(NULL, csize, PAGE, &zero, &commit); addr = extent_alloc_mmap(NULL, esize, PAGE, &zero, &commit);
} }
if (addr == NULL) { if (addr == NULL) {
if (extent != NULL) if (extent != NULL)
base_extent_dalloc(tsdn, extent); base_extent_dalloc(tsdn, extent);
return (NULL); return (NULL);
} }
base_mapped += csize; base_mapped += esize;
if (extent == NULL) { if (extent == NULL) {
extent = (extent_t *)addr; extent = (extent_t *)addr;
addr = (void *)((uintptr_t)addr + nsize); addr = (void *)((uintptr_t)addr + nsize);
csize -= nsize; esize -= nsize;
if (config_stats) { if (config_stats) {
base_allocated += nsize; base_allocated += nsize;
base_resident += PAGE_CEILING(nsize); base_resident += PAGE_CEILING(nsize);
} }
} }
extent_init(extent, NULL, addr, csize, 0, true, true, true, false); extent_init(extent, NULL, addr, esize, 0, true, true, true, false);
return (extent); return (extent);
} }

View File

@ -1,51 +0,0 @@
#define JEMALLOC_CHUNK_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const char *opt_dss = DSS_DEFAULT;
size_t opt_lg_chunk = 0;
/* Various chunk-related settings. */
size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
/******************************************************************************/
bool
chunk_boot(void)
{
#ifdef _WIN32
SYSTEM_INFO info;
GetSystemInfo(&info);
/*
* Verify actual page size is equal to or an integral multiple of
* configured page size.
*/
if (info.dwPageSize & ((1U << LG_PAGE) - 1))
return (true);
/*
* Configure chunksize (if not set) to match granularity (usually 64K),
* so pages_map will always take fast path.
*/
if (!opt_lg_chunk) {
opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
- 1;
}
#else
if (!opt_lg_chunk)
opt_lg_chunk = LG_CHUNK_DEFAULT;
#endif
/* Set variables according to the value of opt_lg_chunk. */
chunksize = (ZU(1) << opt_lg_chunk);
assert(chunksize >= PAGE);
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
return (false);
}

View File

@ -88,7 +88,6 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_xmalloc) CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort) CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss) CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas) CTL_PROTO(opt_narenas)
CTL_PROTO(opt_decay_time) CTL_PROTO(opt_decay_time)
CTL_PROTO(opt_stats_print) CTL_PROTO(opt_stats_print)
@ -177,7 +176,6 @@ CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged) CTL_PROTO(stats_arenas_i_purged)
CTL_PROTO(stats_arenas_i_metadata) CTL_PROTO(stats_arenas_i_metadata)
INDEX_PROTO(stats_arenas_i) INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated) CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active) CTL_PROTO(stats_active)
CTL_PROTO(stats_metadata) CTL_PROTO(stats_metadata)
@ -244,7 +242,6 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = { static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)}, {NAME("abort"), CTL(opt_abort)},
{NAME("dss"), CTL(opt_dss)}, {NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)}, {NAME("narenas"), CTL(opt_narenas)},
{NAME("decay_time"), CTL(opt_decay_time)}, {NAME("decay_time"), CTL(opt_decay_time)},
{NAME("stats_print"), CTL(opt_stats_print)}, {NAME("stats_print"), CTL(opt_stats_print)},
@ -410,7 +407,6 @@ static const ctl_indexed_node_t stats_arenas_node[] = {
}; };
static const ctl_named_node_t stats_node[] = { static const ctl_named_node_t stats_node[] = {
{NAME("cactive"), CTL(stats_cactive)},
{NAME("allocated"), CTL(stats_allocated)}, {NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)}, {NAME("active"), CTL(stats_active)},
{NAME("metadata"), CTL(stats_metadata)}, {NAME("metadata"), CTL(stats_metadata)},
@ -1136,7 +1132,6 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t) CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
@ -1888,7 +1883,6 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
/******************************************************************************/ /******************************************************************************/
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)

View File

@ -3,6 +3,8 @@
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
const char *opt_dss = DSS_DEFAULT;
const char *dss_prec_names[] = { const char *dss_prec_names[] = {
"disabled", "disabled",
"primary", "primary",

View File

@ -1024,8 +1024,6 @@ malloc_conf_init(void)
} }
CONF_HANDLE_BOOL(opt_abort, "abort", true) CONF_HANDLE_BOOL(opt_abort, "abort", true)
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE,
(sizeof(size_t) << 3) - 1, true)
if (strncmp("dss", k, klen) == 0) { if (strncmp("dss", k, klen) == 0) {
int i; int i;
bool match = false; bool match = false;
@ -1176,8 +1174,6 @@ malloc_init_hard_a0_locked()
pages_boot(); pages_boot();
if (base_boot()) if (base_boot())
return (true); return (true);
if (chunk_boot())
return (true);
if (extent_boot()) if (extent_boot())
return (true); return (true);
if (ctl_boot()) if (ctl_boot())

View File

@ -30,8 +30,6 @@
bool opt_stats_print = false; bool opt_stats_print = false;
size_t stats_cactive = 0;
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
@ -416,7 +414,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"Run-time option settings:\n"); "Run-time option settings:\n");
OPT_WRITE_BOOL(abort) OPT_WRITE_BOOL(abort)
OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_CHAR_P(dss) OPT_WRITE_CHAR_P(dss)
OPT_WRITE_UNSIGNED(narenas) OPT_WRITE_UNSIGNED(narenas)
OPT_WRITE_CHAR_P(purge) OPT_WRITE_CHAR_P(purge)
@ -486,16 +483,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
"Average profile dump interval: N/A\n"); "Average profile dump interval: N/A\n");
} }
} }
CTL_GET("opt.lg_chunk", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv);
} }
if (config_stats) { if (config_stats) {
size_t *cactive;
size_t allocated, active, metadata, resident, mapped, retained; size_t allocated, active, metadata, resident, mapped, retained;
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t); CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.metadata", &metadata, size_t); CTL_GET("stats.metadata", &metadata, size_t);
@ -506,9 +498,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
"Allocated: %zu, active: %zu, metadata: %zu," "Allocated: %zu, active: %zu, metadata: %zu,"
" resident: %zu, mapped: %zu, retained: %zu\n", " resident: %zu, mapped: %zu, retained: %zu\n",
allocated, active, metadata, resident, mapped, retained); allocated, active, metadata, resident, mapped, retained);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n",
atomic_read_z(cactive));
if (merged) { if (merged) {
unsigned narenas; unsigned narenas;

View File

@ -140,7 +140,7 @@ TEST_BEGIN(test_junk_large)
{ {
test_skip_if(!config_fill); test_skip_if(!config_fill);
test_junk(SMALL_MAXCLASS+1, chunksize*2); test_junk(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
} }
TEST_END TEST_END

View File

@ -1,26 +0,0 @@
#include "test/jemalloc_test.h"
/*
* Make sure that opt.lg_chunk clamping is sufficient. In practice, this test
* program will fail a debug assertion during initialization and abort (rather
* than the test soft-failing) if clamping is insufficient.
*/
const char *malloc_conf = "lg_chunk:0";
TEST_BEGIN(test_lg_chunk_clamp)
{
void *p;
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
dallocx(p, 0);
}
TEST_END
int
main(void)
{
return (test(
test_lg_chunk_clamp));
}

View File

@ -160,7 +160,6 @@ TEST_BEGIN(test_mallctl_opt)
} while (0) } while (0)
TEST_MALLCTL_OPT(bool, abort, always); TEST_MALLCTL_OPT(bool, abort, always);
TEST_MALLCTL_OPT(size_t, lg_chunk, always);
TEST_MALLCTL_OPT(const char *, dss, always); TEST_MALLCTL_OPT(const char *, dss, always);
TEST_MALLCTL_OPT(unsigned, narenas, always); TEST_MALLCTL_OPT(unsigned, narenas, always);
TEST_MALLCTL_OPT(ssize_t, decay_time, always); TEST_MALLCTL_OPT(ssize_t, decay_time, always);

View File

@ -34,12 +34,12 @@ TEST_BEGIN(test_gdump)
prof_dump_open = prof_dump_open_intercept; prof_dump_open = prof_dump_open_intercept;
did_prof_dump_open = false; did_prof_dump_open = false;
p = mallocx(chunksize, 0); p = mallocx((1U << LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump"); assert_true(did_prof_dump_open, "Expected a profile dump");
did_prof_dump_open = false; did_prof_dump_open = false;
q = mallocx(chunksize, 0); q = mallocx((1U << LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump"); assert_true(did_prof_dump_open, "Expected a profile dump");
@ -50,7 +50,7 @@ TEST_BEGIN(test_gdump)
"Unexpected mallctl failure while disabling prof.gdump"); "Unexpected mallctl failure while disabling prof.gdump");
assert(gdump_old); assert(gdump_old);
did_prof_dump_open = false; did_prof_dump_open = false;
r = mallocx(chunksize, 0); r = mallocx((1U << LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_false(did_prof_dump_open, "Unexpected profile dump"); assert_false(did_prof_dump_open, "Unexpected profile dump");
@ -61,7 +61,7 @@ TEST_BEGIN(test_gdump)
"Unexpected mallctl failure while enabling prof.gdump"); "Unexpected mallctl failure while enabling prof.gdump");
assert(!gdump_old); assert(!gdump_old);
did_prof_dump_open = false; did_prof_dump_open = false;
s = mallocx(chunksize, 0); s = mallocx((1U << LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump"); assert_true(did_prof_dump_open, "Expected a profile dump");

View File

@ -2,14 +2,9 @@
TEST_BEGIN(test_stats_summary) TEST_BEGIN(test_stats_summary)
{ {
size_t *cactive;
size_t sz, allocated, active, resident, mapped; size_t sz, allocated, active, resident, mapped;
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
sz = sizeof(cactive);
assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0), assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0),
expected, "Unexpected mallctl() result"); expected, "Unexpected mallctl() result");
@ -21,8 +16,6 @@ TEST_BEGIN(test_stats_summary)
"Unexpected mallctl() result"); "Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_zu_le(active, *cactive,
"active should be no larger than cactive");
assert_zu_le(allocated, active, assert_zu_le(allocated, active,
"allocated should be no larger than active"); "allocated should be no larger than active");
assert_zu_lt(active, resident, assert_zu_lt(active, resident,
@ -88,12 +81,14 @@ TEST_BEGIN(test_stats_arenas_summary)
little = mallocx(SMALL_MAXCLASS, 0); little = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(little, "Unexpected mallocx() failure"); assert_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx(chunksize, 0); large = mallocx((1U << LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(large, "Unexpected mallocx() failure"); assert_ptr_not_null(large, "Unexpected mallocx() failure");
dallocx(little, 0); dallocx(little, 0);
dallocx(large, 0); dallocx(large, 0);
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure"); "Unexpected mallctl() failure");
@ -197,7 +192,7 @@ TEST_BEGIN(test_stats_arenas_large)
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure"); 0, "Unexpected mallctl() failure");
p = mallocx(chunksize, 0); p = mallocx((1U << LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@ -336,7 +331,7 @@ TEST_BEGIN(test_stats_arenas_lextents)
assert_u64_ge(nmalloc, ndalloc, assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc"); "nmalloc should be at least as large as ndalloc");
assert_u64_gt(curlextents, 0, assert_u64_gt(curlextents, 0,
"At least one chunk should be currently allocated"); "At least one extent should be currently allocated");
} }
dallocx(p, 0); dallocx(p, 0);

View File

@ -57,7 +57,7 @@ TEST_BEGIN(test_zero_large)
{ {
test_skip_if(!config_fill); test_skip_if(!config_fill);
test_zero(SMALL_MAXCLASS+1, chunksize*2); test_zero(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
} }
TEST_END TEST_END