diff --git a/INSTALL b/INSTALL index 00c428b1..a31871b0 100644 --- a/INSTALL +++ b/INSTALL @@ -91,10 +91,10 @@ any of the following arguments (not a definitive list) to 'configure': --with-malloc-conf= Embed as a run-time options string that is processed prior to the malloc_conf global variable, the /etc/malloc.conf symlink, and the - MALLOC_CONF environment variable. For example, to change the default chunk - size to 256 KiB: + MALLOC_CONF environment variable. For example, to change the default decay + time to 30 seconds: - --with-malloc-conf=lg_chunk:18 + --with-malloc-conf=decay_time:30 --disable-cc-silence Disable code that silences non-useful compiler warnings. This is mainly diff --git a/Makefile.in b/Makefile.in index 5feb71d1..f6f06211 100644 --- a/Makefile.in +++ b/Makefile.in @@ -83,7 +83,6 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/atomic.c \ $(srcroot)src/base.c \ $(srcroot)src/bitmap.c \ - $(srcroot)src/chunk.c \ $(srcroot)src/ckh.c \ $(srcroot)src/ctl.c \ $(srcroot)src/extent.c \ @@ -144,7 +143,6 @@ TESTS_UNIT := \ $(srcroot)test/unit/junk.c \ $(srcroot)test/unit/junk_alloc.c \ $(srcroot)test/unit/junk_free.c \ - $(srcroot)test/unit/lg_chunk.c \ $(srcroot)test/unit/mallctl.c \ $(srcroot)test/unit/math.c \ $(srcroot)test/unit/mq.c \ diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index f5a72473..5ba44d23 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -900,19 +900,6 @@ for (i = 0; i < nbins; i++) { - - - opt.lg_chunk - (size_t) - r- - - Virtual memory chunk size (log base 2). If a chunk - size outside the supported size range is specified, the size is - silently clipped to the minimum/maximum supported size. The default - chunk size is 2 MiB (2^21). - - - opt.narenas @@ -1949,24 +1936,6 @@ struct extent_hooks_s { option for additional information. - - - stats.cactive - (size_t *) - r- - [] - - Pointer to a counter that contains an approximate count - of the current number of bytes in active pages. The estimate may be - high, but never low, because each arena rounds up when computing its - contribution to the counter. Note that the epoch mallctl has no bearing - on this counter. Furthermore, counter consistency is maintained via - atomic operations, so it is necessary to use an atomic operation in - order to guarantee a consistent read when dereferencing the pointer. - - - stats.allocated diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h deleted file mode 100644 index 7a5ebbca..00000000 --- a/include/jemalloc/internal/chunk.h +++ /dev/null @@ -1,36 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Size and alignment of memory chunks that are allocated by the OS's virtual - * memory system. - */ -#define LG_CHUNK_DEFAULT 21 - -/* Return the smallest chunk multiple that is >= s. */ -#define CHUNK_CEILING(s) \ - (((s) + chunksize_mask) & ~chunksize_mask) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern size_t opt_lg_chunk; -extern const char *opt_dss; - -extern size_t chunksize; -extern size_t chunksize_mask; /* (chunksize - 1). */ -extern size_t chunk_npages; - -bool chunk_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/include/jemalloc/internal/extent_dss.h b/include/jemalloc/internal/extent_dss.h index 43573775..0aabc2ec 100644 --- a/include/jemalloc/internal/extent_dss.h +++ b/include/jemalloc/internal/extent_dss.h @@ -21,6 +21,8 @@ extern const char *dss_prec_names[]; /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS +extern const char *opt_dss; + dss_prec_t extent_dss_prec_get(tsdn_t *tsdn); bool extent_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec); void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index ba8a9296..b69ddb18 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -363,7 +363,6 @@ typedef unsigned szind_t; #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/large.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" @@ -397,7 +396,6 @@ typedef unsigned szind_t; #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/large.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" @@ -483,7 +481,6 @@ void jemalloc_postfork_child(void); #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/large.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" @@ -512,7 +509,6 @@ void jemalloc_postfork_child(void); #include "jemalloc/internal/extent.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/large.h" #ifndef JEMALLOC_ENABLE_INLINE diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index e52e7fed..d1f39cff 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -110,10 +110,6 @@ bootstrap_free bootstrap_malloc bt_init buferror -chunk_boot -chunk_npages -chunksize -chunksize_mask ckh_count ckh_delete ckh_insert @@ -306,7 +302,6 @@ opt_dss opt_junk opt_junk_alloc opt_junk_free -opt_lg_chunk opt_lg_prof_interval opt_lg_prof_sample opt_lg_tcache_max @@ -430,10 +425,6 @@ size2index size2index_compute size2index_lookup size2index_tab -stats_cactive -stats_cactive_add -stats_cactive_get -stats_cactive_sub stats_print tcache_alloc_easy tcache_alloc_large diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h index da019605..52279f56 100644 --- a/include/jemalloc/internal/stats.h +++ b/include/jemalloc/internal/stats.h @@ -118,8 +118,6 @@ struct arena_stats_s { extern bool opt_stats_print; -extern size_t stats_cactive; - void stats_print(void (*write)(void *, const char *), void *cbopaque, const char *opts); @@ -127,44 +125,5 @@ void stats_print(void (*write)(void *, const char *), void *cbopaque, /******************************************************************************/ #ifdef JEMALLOC_H_INLINES -#ifndef JEMALLOC_ENABLE_INLINE -size_t stats_cactive_get(void); -void stats_cactive_add(size_t size); -void stats_cactive_sub(size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) -JEMALLOC_INLINE size_t -stats_cactive_get(void) -{ - - return (atomic_read_z(&stats_cactive)); -} - -JEMALLOC_INLINE void -stats_cactive_add(size_t size) -{ - UNUSED size_t cactive; - - assert(size > 0); - assert((size & chunksize_mask) == 0); - - cactive = atomic_add_z(&stats_cactive, size); - assert(cactive - size < cactive); -} - -JEMALLOC_INLINE void -stats_cactive_sub(size_t size) -{ - UNUSED size_t cactive; - - assert(size > 0); - assert((size & chunksize_mask) == 0); - - cactive = atomic_sub_z(&stats_cactive, size); - assert(cactive + size > cactive); -} -#endif - #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj index da75a968..e5ecb351 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -40,7 +40,6 @@ - @@ -93,7 +92,6 @@ - @@ -395,4 +393,4 @@ - \ No newline at end of file + diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters index 57395e70..74b45112 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -59,9 +59,6 @@ Header Files\internal - - Header Files\internal - Header Files\internal @@ -190,9 +187,6 @@ Source Files - - Source Files - Source Files @@ -257,4 +251,4 @@ Source Files - \ No newline at end of file + diff --git a/src/arena.c b/src/arena.c index 3de02373..2b8aead7 100644 --- a/src/arena.c +++ b/src/arena.c @@ -229,13 +229,6 @@ static void arena_nactive_add(arena_t *arena, size_t add_pages) { - if (config_stats) { - size_t cactive_add = CHUNK_CEILING((arena->nactive + - add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << - LG_PAGE); - if (cactive_add != 0) - stats_cactive_add(cactive_add); - } arena->nactive += add_pages; } @@ -244,12 +237,6 @@ arena_nactive_sub(arena_t *arena, size_t sub_pages) { assert(arena->nactive >= sub_pages); - if (config_stats) { - size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - - CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); - if (cactive_sub != 0) - stats_cactive_sub(cactive_sub); - } arena->nactive -= sub_pages; } diff --git a/src/base.c b/src/base.c index 667786e1..9c3f36cd 100644 --- a/src/base.c +++ b/src/base.c @@ -41,7 +41,7 @@ static extent_t * base_extent_alloc(tsdn_t *tsdn, size_t minsize) { extent_t *extent; - size_t csize, nsize; + size_t esize, nsize; void *addr; malloc_mutex_assert_owner(tsdn, &base_mtx); @@ -49,7 +49,7 @@ base_extent_alloc(tsdn_t *tsdn, size_t minsize) extent = base_extent_try_alloc(tsdn); /* Allocate enough space to also carve an extent out if necessary. */ nsize = (extent == NULL) ? CACHELINE_CEILING(sizeof(extent_t)) : 0; - csize = CHUNK_CEILING(minsize + nsize); + esize = PAGE_CEILING(minsize + nsize); /* * Directly call extent_alloc_mmap() because it's critical to allocate * untouched demand-zeroed virtual memory. @@ -57,24 +57,24 @@ base_extent_alloc(tsdn_t *tsdn, size_t minsize) { bool zero = true; bool commit = true; - addr = extent_alloc_mmap(NULL, csize, PAGE, &zero, &commit); + addr = extent_alloc_mmap(NULL, esize, PAGE, &zero, &commit); } if (addr == NULL) { if (extent != NULL) base_extent_dalloc(tsdn, extent); return (NULL); } - base_mapped += csize; + base_mapped += esize; if (extent == NULL) { extent = (extent_t *)addr; addr = (void *)((uintptr_t)addr + nsize); - csize -= nsize; + esize -= nsize; if (config_stats) { base_allocated += nsize; base_resident += PAGE_CEILING(nsize); } } - extent_init(extent, NULL, addr, csize, 0, true, true, true, false); + extent_init(extent, NULL, addr, esize, 0, true, true, true, false); return (extent); } diff --git a/src/chunk.c b/src/chunk.c deleted file mode 100644 index d750f715..00000000 --- a/src/chunk.c +++ /dev/null @@ -1,51 +0,0 @@ -#define JEMALLOC_CHUNK_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -const char *opt_dss = DSS_DEFAULT; -size_t opt_lg_chunk = 0; - -/* Various chunk-related settings. */ -size_t chunksize; -size_t chunksize_mask; /* (chunksize - 1). */ -size_t chunk_npages; - -/******************************************************************************/ - -bool -chunk_boot(void) -{ -#ifdef _WIN32 - SYSTEM_INFO info; - GetSystemInfo(&info); - - /* - * Verify actual page size is equal to or an integral multiple of - * configured page size. - */ - if (info.dwPageSize & ((1U << LG_PAGE) - 1)) - return (true); - - /* - * Configure chunksize (if not set) to match granularity (usually 64K), - * so pages_map will always take fast path. - */ - if (!opt_lg_chunk) { - opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity) - - 1; - } -#else - if (!opt_lg_chunk) - opt_lg_chunk = LG_CHUNK_DEFAULT; -#endif - - /* Set variables according to the value of opt_lg_chunk. */ - chunksize = (ZU(1) << opt_lg_chunk); - assert(chunksize >= PAGE); - chunksize_mask = chunksize - 1; - chunk_npages = (chunksize >> LG_PAGE); - - return (false); -} diff --git a/src/ctl.c b/src/ctl.c index b00991a6..b4e2208c 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -88,7 +88,6 @@ CTL_PROTO(config_utrace) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) CTL_PROTO(opt_dss) -CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_narenas) CTL_PROTO(opt_decay_time) CTL_PROTO(opt_stats_print) @@ -177,7 +176,6 @@ CTL_PROTO(stats_arenas_i_nmadvise) CTL_PROTO(stats_arenas_i_purged) CTL_PROTO(stats_arenas_i_metadata) INDEX_PROTO(stats_arenas_i) -CTL_PROTO(stats_cactive) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) CTL_PROTO(stats_metadata) @@ -244,7 +242,6 @@ static const ctl_named_node_t config_node[] = { static const ctl_named_node_t opt_node[] = { {NAME("abort"), CTL(opt_abort)}, {NAME("dss"), CTL(opt_dss)}, - {NAME("lg_chunk"), CTL(opt_lg_chunk)}, {NAME("narenas"), CTL(opt_narenas)}, {NAME("decay_time"), CTL(opt_decay_time)}, {NAME("stats_print"), CTL(opt_stats_print)}, @@ -410,7 +407,6 @@ static const ctl_indexed_node_t stats_arenas_node[] = { }; static const ctl_named_node_t stats_node[] = { - {NAME("cactive"), CTL(stats_cactive)}, {NAME("allocated"), CTL(stats_allocated)}, {NAME("active"), CTL(stats_active)}, {NAME("metadata"), CTL(stats_metadata)}, @@ -1136,7 +1132,6 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) -CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) @@ -1888,7 +1883,6 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) /******************************************************************************/ -CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) diff --git a/src/extent_dss.c b/src/extent_dss.c index 9c5cd25a..e0e6635d 100644 --- a/src/extent_dss.c +++ b/src/extent_dss.c @@ -3,6 +3,8 @@ /******************************************************************************/ /* Data. */ +const char *opt_dss = DSS_DEFAULT; + const char *dss_prec_names[] = { "disabled", "primary", diff --git a/src/jemalloc.c b/src/jemalloc.c index 580b23f9..95cd0545 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1024,8 +1024,6 @@ malloc_conf_init(void) } CONF_HANDLE_BOOL(opt_abort, "abort", true) - CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE, - (sizeof(size_t) << 3) - 1, true) if (strncmp("dss", k, klen) == 0) { int i; bool match = false; @@ -1176,8 +1174,6 @@ malloc_init_hard_a0_locked() pages_boot(); if (base_boot()) return (true); - if (chunk_boot()) - return (true); if (extent_boot()) return (true); if (ctl_boot()) diff --git a/src/stats.c b/src/stats.c index 185ccac6..ca716d5e 100644 --- a/src/stats.c +++ b/src/stats.c @@ -30,8 +30,6 @@ bool opt_stats_print = false; -size_t stats_cactive = 0; - /******************************************************************************/ /* Function prototypes for non-inline static functions. */ @@ -416,7 +414,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, malloc_cprintf(write_cb, cbopaque, "Run-time option settings:\n"); OPT_WRITE_BOOL(abort) - OPT_WRITE_SIZE_T(lg_chunk) OPT_WRITE_CHAR_P(dss) OPT_WRITE_UNSIGNED(narenas) OPT_WRITE_CHAR_P(purge) @@ -486,16 +483,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, "Average profile dump interval: N/A\n"); } } - CTL_GET("opt.lg_chunk", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv); } if (config_stats) { - size_t *cactive; size_t allocated, active, metadata, resident, mapped, retained; - CTL_GET("stats.cactive", &cactive, size_t *); CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.active", &active, size_t); CTL_GET("stats.metadata", &metadata, size_t); @@ -506,9 +498,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, "Allocated: %zu, active: %zu, metadata: %zu," " resident: %zu, mapped: %zu, retained: %zu\n", allocated, active, metadata, resident, mapped, retained); - malloc_cprintf(write_cb, cbopaque, - "Current active ceiling: %zu\n", - atomic_read_z(cactive)); if (merged) { unsigned narenas; diff --git a/test/unit/junk.c b/test/unit/junk.c index dea0f615..fe453b6c 100644 --- a/test/unit/junk.c +++ b/test/unit/junk.c @@ -140,7 +140,7 @@ TEST_BEGIN(test_junk_large) { test_skip_if(!config_fill); - test_junk(SMALL_MAXCLASS+1, chunksize*2); + test_junk(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1))); } TEST_END diff --git a/test/unit/lg_chunk.c b/test/unit/lg_chunk.c deleted file mode 100644 index 7e5df381..00000000 --- a/test/unit/lg_chunk.c +++ /dev/null @@ -1,26 +0,0 @@ -#include "test/jemalloc_test.h" - -/* - * Make sure that opt.lg_chunk clamping is sufficient. In practice, this test - * program will fail a debug assertion during initialization and abort (rather - * than the test soft-failing) if clamping is insufficient. - */ -const char *malloc_conf = "lg_chunk:0"; - -TEST_BEGIN(test_lg_chunk_clamp) -{ - void *p; - - p = mallocx(1, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - dallocx(p, 0); -} -TEST_END - -int -main(void) -{ - - return (test( - test_lg_chunk_clamp)); -} diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index ee57dd5c..0e979a11 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -160,7 +160,6 @@ TEST_BEGIN(test_mallctl_opt) } while (0) TEST_MALLCTL_OPT(bool, abort, always); - TEST_MALLCTL_OPT(size_t, lg_chunk, always); TEST_MALLCTL_OPT(const char *, dss, always); TEST_MALLCTL_OPT(unsigned, narenas, always); TEST_MALLCTL_OPT(ssize_t, decay_time, always); diff --git a/test/unit/prof_gdump.c b/test/unit/prof_gdump.c index a0e6ee92..ca93f300 100644 --- a/test/unit/prof_gdump.c +++ b/test/unit/prof_gdump.c @@ -34,12 +34,12 @@ TEST_BEGIN(test_gdump) prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; - p = mallocx(chunksize, 0); + p = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); did_prof_dump_open = false; - q = mallocx(chunksize, 0); + q = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); @@ -50,7 +50,7 @@ TEST_BEGIN(test_gdump) "Unexpected mallctl failure while disabling prof.gdump"); assert(gdump_old); did_prof_dump_open = false; - r = mallocx(chunksize, 0); + r = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_false(did_prof_dump_open, "Unexpected profile dump"); @@ -61,7 +61,7 @@ TEST_BEGIN(test_gdump) "Unexpected mallctl failure while enabling prof.gdump"); assert(!gdump_old); did_prof_dump_open = false; - s = mallocx(chunksize, 0); + s = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); diff --git a/test/unit/stats.c b/test/unit/stats.c index 9fa9cead..ed0d3fe9 100644 --- a/test/unit/stats.c +++ b/test/unit/stats.c @@ -2,14 +2,9 @@ TEST_BEGIN(test_stats_summary) { - size_t *cactive; size_t sz, allocated, active, resident, mapped; int expected = config_stats ? 0 : ENOENT; - sz = sizeof(cactive); - assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); - sz = sizeof(size_t); assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); @@ -21,8 +16,6 @@ TEST_BEGIN(test_stats_summary) "Unexpected mallctl() result"); if (config_stats) { - assert_zu_le(active, *cactive, - "active should be no larger than cactive"); assert_zu_le(allocated, active, "allocated should be no larger than active"); assert_zu_lt(active, resident, @@ -88,12 +81,14 @@ TEST_BEGIN(test_stats_arenas_summary) little = mallocx(SMALL_MAXCLASS, 0); assert_ptr_not_null(little, "Unexpected mallocx() failure"); - large = mallocx(chunksize, 0); + large = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(large, "Unexpected mallocx() failure"); dallocx(little, 0); dallocx(large, 0); + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); @@ -197,7 +192,7 @@ TEST_BEGIN(test_stats_arenas_large) assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); - p = mallocx(chunksize, 0); + p = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, @@ -336,7 +331,7 @@ TEST_BEGIN(test_stats_arenas_lextents) assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(curlextents, 0, - "At least one chunk should be currently allocated"); + "At least one extent should be currently allocated"); } dallocx(p, 0); diff --git a/test/unit/zero.c b/test/unit/zero.c index 3c35f4bd..c025c831 100644 --- a/test/unit/zero.c +++ b/test/unit/zero.c @@ -57,7 +57,7 @@ TEST_BEGIN(test_zero_large) { test_skip_if(!config_fill); - test_zero(SMALL_MAXCLASS+1, chunksize*2); + test_zero(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1))); } TEST_END