diff --git a/Makefile.in b/Makefile.in index 2e9bbbc2..f90e2a4f 100644 --- a/Makefile.in +++ b/Makefile.in @@ -88,7 +88,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/ctl.c \ $(srcroot)src/extent.c \ $(srcroot)src/hash.c \ - $(srcroot)src/huge.c \ + $(srcroot)src/large.c \ $(srcroot)src/mb.c \ $(srcroot)src/mutex.c \ $(srcroot)src/nstime.c \ diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index 923097d4..7613c24c 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -1872,22 +1872,22 @@ typedef struct { Number of bytes per slab. - + - arenas.nhchunks + arenas.nlextents (unsigned) r- - Total number of huge size classes. + Total number of large size classes. - + - arenas.hchunk.<i>.size + arenas.lextent.<i>.size (size_t) r- - Maximum size supported by this huge size + Maximum size supported by this large size class. @@ -2361,50 +2361,6 @@ typedef struct { - - - stats.arenas.<i>.huge.allocated - (size_t) - r- - [] - - Number of bytes currently allocated by huge objects. - - - - - - stats.arenas.<i>.huge.nmalloc - (uint64_t) - r- - [] - - Cumulative number of huge allocation requests served - directly by the arena. - - - - - stats.arenas.<i>.huge.ndalloc - (uint64_t) - r- - [] - - Cumulative number of huge deallocation requests served - directly by the arena. - - - - - stats.arenas.<i>.huge.nrequests - (uint64_t) - r- - [] - - Cumulative number of huge allocation requests. - - - stats.arenas.<i>.bins.<j>.nmalloc @@ -2500,9 +2456,9 @@ typedef struct { Current number of slabs. - + - stats.arenas.<i>.hchunks.<j>.nmalloc + stats.arenas.<i>.lextents.<j>.nmalloc (uint64_t) r- [] @@ -2511,9 +2467,9 @@ typedef struct { class served directly by the arena. - + - stats.arenas.<i>.hchunks.<j>.ndalloc + stats.arenas.<i>.lextents.<j>.ndalloc (uint64_t) r- [] @@ -2522,9 +2478,9 @@ typedef struct { size class served directly by the arena. - + - stats.arenas.<i>.hchunks.<j>.nrequests + stats.arenas.<i>.lextents.<j>.nrequests (uint64_t) r- [] @@ -2533,14 +2489,14 @@ typedef struct { class. - + - stats.arenas.<i>.hchunks.<j>.curhchunks + stats.arenas.<i>.lextents.<j>.curlextents (size_t) r- [] - Current number of huge allocations for this size class. + Current number of large allocations for this size class. diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index d66548f2..56f78571 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -229,10 +229,10 @@ struct arena_s { */ size_t decay_backlog[SMOOTHSTEP_NSTEPS]; - /* Extant huge allocations. */ - ql_head(extent_t) huge; - /* Synchronizes all huge allocation/update/deallocation. */ - malloc_mutex_t huge_mtx; + /* Extant large allocations. */ + ql_head(extent_t) large; + /* Synchronizes all large allocation/update/deallocation. */ + malloc_mutex_t large_mtx; /* * Heaps of chunks that were previously allocated. These are used when @@ -287,13 +287,13 @@ void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache); void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent, bool cache); -extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, +extent_t *arena_chunk_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero); -void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent, +void arena_chunk_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent, bool locked); -void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, +void arena_chunk_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldsize); -void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, +void arena_chunk_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldsize); ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, @@ -341,7 +341,7 @@ void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, size_t *ndirty, arena_stats_t *astats, - malloc_bin_stats_t *bstats, malloc_huge_stats_t *hstats); + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats); unsigned arena_nthreads_get(arena_t *arena, bool internal); void arena_nthreads_inc(arena_t *arena, bool internal); void arena_nthreads_dec(arena_t *arena, bool internal); @@ -470,7 +470,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr) assert(ptr != NULL); if (unlikely(!extent_slab_get(extent))) - return (huge_prof_tctx_get(tsdn, extent)); + return (large_prof_tctx_get(tsdn, extent)); return ((prof_tctx_t *)(uintptr_t)1U); } @@ -483,7 +483,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr, assert(ptr != NULL); if (unlikely(!extent_slab_get(extent))) - huge_prof_tctx_set(tsdn, extent, tctx); + large_prof_tctx_set(tsdn, extent, tctx); } JEMALLOC_INLINE void @@ -495,7 +495,7 @@ arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr, assert(ptr != NULL); assert(!extent_slab_get(extent)); - huge_prof_tctx_reset(tsdn, extent); + large_prof_tctx_reset(tsdn, extent); } JEMALLOC_ALWAYS_INLINE void @@ -535,7 +535,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache, size, ind, zero, slow_path)); } if (likely(size <= tcache_maxclass)) { - return (tcache_alloc_huge(tsdn_tsd(tsdn), arena, + return (tcache_alloc_large(tsdn_tsd(tsdn), arena, tcache, size, ind, zero, slow_path)); } /* (size > tcache_maxclass) case falls through. */ @@ -563,7 +563,7 @@ arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) if (likely(extent_slab_get(extent))) ret = index2size(extent_slab_data_get_const(extent)->binind); else - ret = huge_salloc(tsdn, extent); + ret = large_salloc(tsdn, extent); return (ret); } @@ -594,11 +594,11 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache, arena_dalloc_promoted(tsdn, extent, ptr, tcache, slow_path); } else { - tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, usize, slow_path); } } else - huge_dalloc(tsdn, extent); + large_dalloc(tsdn, extent); } } @@ -627,11 +627,11 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size, arena_dalloc_promoted(tsdn, extent, ptr, tcache, slow_path); } else { - tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr, + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, size, slow_path); } } else - huge_dalloc(tsdn, extent); + large_dalloc(tsdn, extent); } } # endif /* JEMALLOC_ARENA_INLINE_B */ diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h index 00deeb8a..3fbac205 100644 --- a/include/jemalloc/internal/ctl.h +++ b/include/jemalloc/internal/ctl.h @@ -51,7 +51,7 @@ struct ctl_arena_stats_s { uint64_t nrequests_small; malloc_bin_stats_t bstats[NBINS]; - malloc_huge_stats_t hstats[NSIZES - NBINS]; + malloc_large_stats_t lstats[NSIZES - NBINS]; }; struct ctl_stats_s { diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h index bfe61811..cf717d9e 100644 --- a/include/jemalloc/internal/extent.h +++ b/include/jemalloc/internal/extent.h @@ -51,7 +51,7 @@ struct extent_s { /* Small region slab metadata. */ arena_slab_data_t e_slab_data; - /* Profile counters, used for huge objects. */ + /* Profile counters, used for large objects. */ union { void *e_prof_tctx_pun; prof_tctx_t *e_prof_tctx; @@ -67,7 +67,7 @@ struct extent_s { /* Linkage for per size class address-ordered heaps. */ phn(extent_t) ph_link; - /* Linkage for arena's huge and extent_cache lists. */ + /* Linkage for arena's large and extent_cache lists. */ ql_elm(extent_t) ql_link; }; }; diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h deleted file mode 100644 index 836f1b50..00000000 --- a/include/jemalloc/internal/huge.h +++ /dev/null @@ -1,37 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); -void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero); -bool huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, - size_t usize_max, bool zero); -void *huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, - size_t usize, size_t alignment, bool zero, tcache_t *tcache); -#ifdef JEMALLOC_JET -typedef void (huge_dalloc_junk_t)(void *, size_t); -extern huge_dalloc_junk_t *huge_dalloc_junk; -#else -void huge_dalloc_junk(void *ptr, size_t usize); -#endif -void huge_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent); -void huge_dalloc(tsdn_t *tsdn, extent_t *extent); -size_t huge_salloc(tsdn_t *tsdn, const extent_t *extent); -prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent); -void huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx); -void huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index f4d26beb..58a18ae5 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -362,7 +362,7 @@ typedef unsigned szind_t; #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/large.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/prof.h" @@ -396,7 +396,7 @@ typedef unsigned szind_t; #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/large.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/prof.h" @@ -486,7 +486,7 @@ void jemalloc_postfork_child(void); #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/large.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/prof.h" @@ -515,7 +515,7 @@ void jemalloc_postfork_child(void); #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/large.h" #ifndef JEMALLOC_ENABLE_INLINE pszind_t psz2ind(size_t psz); @@ -547,7 +547,7 @@ JEMALLOC_INLINE pszind_t psz2ind(size_t psz) { - if (unlikely(psz > HUGE_MAXCLASS)) + if (unlikely(psz > LARGE_MAXCLASS)) return (NPSIZES); { pszind_t x = lg_floor((psz<<1)-1); @@ -608,7 +608,7 @@ JEMALLOC_INLINE size_t psz2u(size_t psz) { - if (unlikely(psz > HUGE_MAXCLASS)) + if (unlikely(psz > LARGE_MAXCLASS)) return (0); { size_t x = lg_floor((psz<<1)-1); @@ -625,7 +625,7 @@ JEMALLOC_INLINE szind_t size2index_compute(size_t size) { - if (unlikely(size > HUGE_MAXCLASS)) + if (unlikely(size > LARGE_MAXCLASS)) return (NSIZES); #if (NTBINS != 0) if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { @@ -721,7 +721,7 @@ JEMALLOC_ALWAYS_INLINE size_t s2u_compute(size_t size) { - if (unlikely(size > HUGE_MAXCLASS)) + if (unlikely(size > LARGE_MAXCLASS)) return (0); #if (NTBINS > 0) if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { @@ -797,9 +797,9 @@ sa2u(size_t size, size_t alignment) return (usize); } - /* Huge size class. Beware of overflow. */ + /* Large size class. Beware of overflow. */ - if (unlikely(alignment > HUGE_MAXCLASS)) + if (unlikely(alignment > LARGE_MAXCLASS)) return (0); /* Make sure result is a large size class. */ @@ -814,7 +814,7 @@ sa2u(size_t size, size_t alignment) } /* - * Calculate the multi-page mapping that huge_palloc() would need in + * Calculate the multi-page mapping that large_palloc() would need in * order to guarantee the alignment. */ if (usize + large_pad + PAGE_CEILING(alignment) < usize) { @@ -1113,7 +1113,7 @@ iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t usize, copysize; usize = sa2u(size + extra, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) return (NULL); p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); if (p == NULL) { @@ -1121,7 +1121,7 @@ iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, return (NULL); /* Try again, without extra this time. */ usize = sa2u(size, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) return (NULL); p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); if (p == NULL) diff --git a/include/jemalloc/internal/large.h b/include/jemalloc/internal/large.h new file mode 100644 index 00000000..afaa6c3c --- /dev/null +++ b/include/jemalloc/internal/large.h @@ -0,0 +1,37 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); +void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero); +bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, + size_t usize_max, bool zero); +void *large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t usize, size_t alignment, bool zero, tcache_t *tcache); +#ifdef JEMALLOC_JET +typedef void (large_dalloc_junk_t)(void *, size_t); +extern large_dalloc_junk_t *large_dalloc_junk; +#else +void large_dalloc_junk(void *ptr, size_t usize); +#endif +void large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent); +void large_dalloc(tsdn_t *tsdn, extent_t *extent); +size_t large_salloc(tsdn_t *tsdn, const extent_t *extent); +prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent); +void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx); +void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index b8ed4341..cab0fc54 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -9,14 +9,14 @@ arena_boot arena_choose arena_choose_hard arena_choose_impl -arena_chunk_alloc_huge +arena_chunk_alloc_large arena_chunk_cache_alloc arena_chunk_cache_dalloc arena_chunk_cache_maybe_insert arena_chunk_cache_maybe_remove -arena_chunk_dalloc_huge -arena_chunk_ralloc_huge_expand -arena_chunk_ralloc_huge_shrink +arena_chunk_dalloc_large +arena_chunk_ralloc_large_expand +arena_chunk_ralloc_large_shrink arena_cleanup arena_dalloc arena_dalloc_bin_junked_locked @@ -222,17 +222,6 @@ hash_rotl_64 hash_x64_128 hash_x86_128 hash_x86_32 -huge_dalloc -huge_dalloc_junk -huge_dalloc_junked_locked -huge_malloc -huge_palloc -huge_prof_tctx_get -huge_prof_tctx_reset -huge_prof_tctx_set -huge_ralloc -huge_ralloc_no_move -huge_salloc iaalloc ialloc iallocztm @@ -258,6 +247,17 @@ ixalloc jemalloc_postfork_child jemalloc_postfork_parent jemalloc_prefork +large_dalloc +large_dalloc_junk +large_dalloc_junked_locked +large_malloc +large_palloc +large_prof_tctx_get +large_prof_tctx_reset +large_prof_tctx_set +large_ralloc +large_ralloc_no_move +large_salloc lg_floor lg_prof_sample malloc_cprintf @@ -438,17 +438,17 @@ stats_cactive_get stats_cactive_sub stats_print tcache_alloc_easy -tcache_alloc_huge +tcache_alloc_large tcache_alloc_small tcache_alloc_small_hard tcache_arena_reassociate -tcache_bin_flush_huge +tcache_bin_flush_large tcache_bin_flush_small tcache_bin_info tcache_boot tcache_cleanup tcache_create -tcache_dalloc_huge +tcache_dalloc_large tcache_dalloc_small tcache_enabled_cleanup tcache_enabled_get diff --git a/include/jemalloc/internal/size_classes.sh b/include/jemalloc/internal/size_classes.sh index b73064d1..38fe4902 100755 --- a/include/jemalloc/internal/size_classes.sh +++ b/include/jemalloc/internal/size_classes.sh @@ -237,7 +237,7 @@ size_classes() { fi fi # Final written value is correct: - huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" + large_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) done @@ -257,7 +257,7 @@ size_classes() { # - lookup_maxclass # - small_maxclass # - lg_large_minclass - # - huge_maxclass + # - large_maxclass } cat <tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (unlikely(tbin->ncached == tbin_info->ncached_max)) { - tcache_bin_flush_huge(tsd, tbin, binind, + tcache_bin_flush_large(tsd, tbin, binind, (tbin_info->ncached_max >> 1), tcache); } assert(tbin->ncached < tbin_info->ncached_max); diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h index f15665bc..8c56c21a 100644 --- a/include/jemalloc/internal/witness.h +++ b/include/jemalloc/internal/witness.h @@ -32,7 +32,7 @@ typedef int witness_comp_t (const witness_t *, void *, const witness_t *, #define WITNESS_RANK_LEAF 0xffffffffU #define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF -#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF +#define WITNESS_RANK_ARENA_LARGE WITNESS_RANK_LEAF #define WITNESS_RANK_DSS WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj index 537cb6ab..91c949aa 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -47,11 +47,11 @@ - + @@ -98,8 +98,8 @@ - + diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters index d2b5595f..09d4cb20 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -80,9 +80,6 @@ Header Files\internal - - Header Files\internal - Header Files\internal @@ -95,6 +92,9 @@ Header Files\internal + + Header Files\internal + Header Files\internal @@ -205,10 +205,10 @@ Source Files - + Source Files - + Source Files diff --git a/src/arena.c b/src/arena.c index ffde2e31..8194ced7 100644 --- a/src/arena.c +++ b/src/arena.c @@ -256,71 +256,71 @@ arena_nactive_sub(arena_t *arena, size_t sub_pages) } static void -arena_huge_malloc_stats_update(arena_t *arena, size_t usize) +arena_large_malloc_stats_update(arena_t *arena, size_t usize) { szind_t index = size2index(usize); szind_t hindex = (index >= NBINS) ? index - NBINS : 0; cassert(config_stats); - arena->stats.nmalloc_huge++; - arena->stats.allocated_huge += usize; - arena->stats.hstats[hindex].nmalloc++; - arena->stats.hstats[hindex].nrequests++; - arena->stats.hstats[hindex].curhchunks++; + arena->stats.nmalloc_large++; + arena->stats.allocated_large += usize; + arena->stats.lstats[hindex].nmalloc++; + arena->stats.lstats[hindex].nrequests++; + arena->stats.lstats[hindex].curlextents++; } static void -arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) +arena_large_malloc_stats_update_undo(arena_t *arena, size_t usize) { szind_t index = size2index(usize); szind_t hindex = (index >= NBINS) ? index - NBINS : 0; cassert(config_stats); - arena->stats.nmalloc_huge--; - arena->stats.allocated_huge -= usize; - arena->stats.hstats[hindex].nmalloc--; - arena->stats.hstats[hindex].nrequests--; - arena->stats.hstats[hindex].curhchunks--; + arena->stats.nmalloc_large--; + arena->stats.allocated_large -= usize; + arena->stats.lstats[hindex].nmalloc--; + arena->stats.lstats[hindex].nrequests--; + arena->stats.lstats[hindex].curlextents--; } static void -arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) +arena_large_dalloc_stats_update(arena_t *arena, size_t usize) { szind_t index = size2index(usize); szind_t hindex = (index >= NBINS) ? index - NBINS : 0; cassert(config_stats); - arena->stats.ndalloc_huge++; - arena->stats.allocated_huge -= usize; - arena->stats.hstats[hindex].ndalloc++; - arena->stats.hstats[hindex].curhchunks--; + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= usize; + arena->stats.lstats[hindex].ndalloc++; + arena->stats.lstats[hindex].curlextents--; } static void -arena_huge_reset_stats_cancel(arena_t *arena, size_t usize) +arena_large_reset_stats_cancel(arena_t *arena, size_t usize) { szind_t index = size2index(usize); szind_t hindex = (index >= NBINS) ? index - NBINS : 0; cassert(config_stats); - arena->stats.ndalloc_huge++; - arena->stats.hstats[hindex].ndalloc--; + arena->stats.ndalloc_large++; + arena->stats.lstats[hindex].ndalloc--; } static void -arena_huge_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize) +arena_large_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize) { - arena_huge_dalloc_stats_update(arena, oldusize); - arena_huge_malloc_stats_update(arena, usize); + arena_large_dalloc_stats_update(arena, oldusize); + arena_large_malloc_stats_update(arena, usize); } static extent_t * -arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, +arena_chunk_alloc_large_hard(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero) { extent_t *extent; @@ -332,7 +332,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, /* Revert optimistic stats updates. */ malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { - arena_huge_malloc_stats_update_undo(arena, usize); + arena_large_malloc_stats_update_undo(arena, usize); arena->stats.mapped -= usize; } arena_nactive_sub(arena, (usize + large_pad) >> LG_PAGE); @@ -343,7 +343,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, } extent_t * -arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, +arena_chunk_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero) { extent_t *extent; @@ -353,7 +353,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, /* Optimistically update stats. */ if (config_stats) { - arena_huge_malloc_stats_update(arena, usize); + arena_large_malloc_stats_update(arena, usize); arena->stats.mapped += usize; } arena_nactive_add(arena, (usize + large_pad) >> LG_PAGE); @@ -362,7 +362,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, usize, large_pad, alignment, zero, false); malloc_mutex_unlock(tsdn, &arena->lock); if (extent == NULL) { - extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, + extent = arena_chunk_alloc_large_hard(tsdn, arena, &chunk_hooks, usize, alignment, zero); } @@ -370,7 +370,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, } void -arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent, +arena_chunk_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent, bool locked) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; @@ -378,7 +378,8 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent, if (!locked) malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { - arena_huge_dalloc_stats_update(arena, extent_usize_get(extent)); + arena_large_dalloc_stats_update(arena, + extent_usize_get(extent)); arena->stats.mapped -= extent_size_get(extent); } arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); @@ -389,7 +390,7 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent, } void -arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, +arena_chunk_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldusize) { size_t usize = extent_usize_get(extent); @@ -397,7 +398,7 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { - arena_huge_ralloc_stats_update(arena, oldusize, usize); + arena_large_ralloc_stats_update(arena, oldusize, usize); arena->stats.mapped -= udiff; } arena_nactive_sub(arena, udiff >> LG_PAGE); @@ -405,7 +406,7 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, } void -arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, +arena_chunk_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldusize) { size_t usize = extent_usize_get(extent); @@ -413,7 +414,7 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { - arena_huge_ralloc_stats_update(arena, oldusize, usize); + arena_large_ralloc_stats_update(arena, oldusize, usize); arena->stats.mapped += udiff; } arena_nactive_add(arena, udiff >> LG_PAGE); @@ -891,26 +892,26 @@ arena_reset(tsd_t *tsd, arena_t *arena) * stats refreshes would impose an inconvenient burden. */ - /* Huge allocations. */ - malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); - for (extent = ql_last(&arena->huge, ql_link); extent != NULL; extent = - ql_last(&arena->huge, ql_link)) { + /* Large allocations. */ + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); + for (extent = ql_last(&arena->large, ql_link); extent != NULL; extent = + ql_last(&arena->large, ql_link)) { void *ptr = extent_base_get(extent); size_t usize; - malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); if (config_stats || (config_prof && opt_prof)) usize = isalloc(tsd_tsdn(tsd), extent, ptr); - /* Remove huge allocation from prof sample set. */ + /* Remove large allocation from prof sample set. */ if (config_prof && opt_prof) prof_free(tsd, extent, ptr, usize); - huge_dalloc(tsd_tsdn(tsd), extent); - malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); + large_dalloc(tsd_tsdn(tsd), extent); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); /* Cancel out unwanted effects on stats. */ if (config_stats) - arena_huge_reset_stats_cancel(arena, usize); + arena_large_reset_stats_cancel(arena, usize); } - malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); @@ -1283,7 +1284,7 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, if (likely(size <= SMALL_MAXCLASS)) return (arena_malloc_small(tsdn, arena, ind, zero)); - return (huge_malloc(tsdn, arena, index2size(ind), zero)); + return (large_malloc(tsdn, arena, index2size(ind), zero)); } void * @@ -1299,9 +1300,9 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, tcache, true); } else { if (likely(alignment <= CACHELINE)) - ret = huge_malloc(tsdn, arena, usize, zero); + ret = large_malloc(tsdn, arena, usize, zero); else - ret = huge_palloc(tsdn, arena, usize, alignment, zero); + ret = large_palloc(tsdn, arena, usize, alignment, zero); } return (ret); } @@ -1360,10 +1361,10 @@ arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr, usize = arena_prof_demote(tsdn, extent, ptr); if (usize <= tcache_maxclass) { - tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr, usize, + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, usize, slow_path); } else - huge_dalloc(tsdn, extent); + large_dalloc(tsdn, extent); } static void @@ -1493,9 +1494,9 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t usize_min, usize_max; /* Calls with non-zero extra had to clamp extra. */ - assert(extra == 0 || size + extra <= HUGE_MAXCLASS); + assert(extra == 0 || size + extra <= LARGE_MAXCLASS); - if (unlikely(size > HUGE_MAXCLASS)) + if (unlikely(size > LARGE_MAXCLASS)) return (true); usize_min = s2u(size); @@ -1515,7 +1516,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, arena_decay_tick(tsdn, extent_arena_get(extent)); return (false); } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { - return (huge_ralloc_no_move(tsdn, extent, usize_min, usize_max, + return (large_ralloc_no_move(tsdn, extent, usize_min, usize_max, zero)); } @@ -1531,7 +1532,7 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, return (arena_malloc(tsdn, arena, usize, size2index(usize), zero, tcache, true)); usize = sa2u(usize, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) return (NULL); return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); } @@ -1544,7 +1545,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr, size_t usize, copysize; usize = s2u(size); - if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) + if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) return (NULL); if (likely(usize <= SMALL_MAXCLASS)) { @@ -1555,8 +1556,8 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr, } if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { - return (huge_ralloc(tsdn, arena, extent, usize, alignment, zero, - tcache)); + return (large_ralloc(tsdn, arena, extent, usize, alignment, + zero, tcache)); } /* @@ -1670,7 +1671,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, size_t *ndirty, arena_stats_t *astats, - malloc_bin_stats_t *bstats, malloc_huge_stats_t *hstats) + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) { unsigned i; @@ -1687,16 +1688,16 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, astats->purged += arena->stats.purged; astats->metadata_mapped += arena->stats.metadata_mapped; astats->metadata_allocated += arena_metadata_allocated_get(arena); - astats->allocated_huge += arena->stats.allocated_huge; - astats->nmalloc_huge += arena->stats.nmalloc_huge; - astats->ndalloc_huge += arena->stats.ndalloc_huge; - astats->nrequests_huge += arena->stats.nrequests_huge; + astats->allocated_large += arena->stats.allocated_large; + astats->nmalloc_large += arena->stats.nmalloc_large; + astats->ndalloc_large += arena->stats.ndalloc_large; + astats->nrequests_large += arena->stats.nrequests_large; for (i = 0; i < NSIZES - NBINS; i++) { - hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; - hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; - hstats[i].nrequests += arena->stats.hstats[i].nrequests; - hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; + lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; + lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; + lstats[i].nrequests += arena->stats.lstats[i].nrequests; + lstats[i].curlextents += arena->stats.lstats[i].curlextents; } malloc_mutex_unlock(tsdn, &arena->lock); @@ -1786,9 +1787,9 @@ arena_new(tsdn_t *tsdn, unsigned ind) if (opt_purge == purge_mode_decay) arena_decay_init(arena, arena_decay_time_default_get()); - ql_new(&arena->huge); - if (malloc_mutex_init(&arena->huge_mtx, "arena_huge", - WITNESS_RANK_ARENA_HUGE)) + ql_new(&arena->large); + if (malloc_mutex_init(&arena->large_mtx, "arena_large", + WITNESS_RANK_ARENA_LARGE)) return (NULL); for (i = 0; i < NPSIZES; i++) { @@ -1859,7 +1860,7 @@ arena_prefork3(tsdn_t *tsdn, arena_t *arena) for (i = 0; i < NBINS; i++) malloc_mutex_prefork(tsdn, &arena->bins[i].lock); - malloc_mutex_prefork(tsdn, &arena->huge_mtx); + malloc_mutex_prefork(tsdn, &arena->large_mtx); } void @@ -1867,7 +1868,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { unsigned i; - malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); for (i = 0; i < NBINS; i++) malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx); @@ -1880,7 +1881,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; - malloc_mutex_postfork_child(tsdn, &arena->huge_mtx); + malloc_mutex_postfork_child(tsdn, &arena->large_mtx); for (i = 0; i < NBINS; i++) malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx); diff --git a/src/chunk_dss.c b/src/chunk_dss.c index f890a5cd..f8c968b3 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -78,7 +78,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, /* * sbrk() uses a signed increment argument, so take care not to - * interpret a huge allocation request as a negative increment. + * interpret a large allocation request as a negative increment. */ if ((intptr_t)size < 0) return (NULL); diff --git a/src/ckh.c b/src/ckh.c index 5ec0f60a..90a81155 100644 --- a/src/ckh.c +++ b/src/ckh.c @@ -267,7 +267,7 @@ ckh_grow(tsdn_t *tsdn, ckh_t *ckh) lg_curcells++; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } @@ -315,7 +315,7 @@ ckh_shrink(tsdn_t *tsdn, ckh_t *ckh) lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) return; tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL, true, arena_ichoose(tsdn, NULL)); @@ -390,7 +390,7 @@ ckh_new(tsdn_t *tsdn, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh->keycomp = keycomp; usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } diff --git a/src/ctl.c b/src/ctl.c index 34c7e1bd..85ca2e86 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -126,8 +126,8 @@ CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_slab_size) INDEX_PROTO(arenas_bin_i) -CTL_PROTO(arenas_hchunk_i_size) -INDEX_PROTO(arenas_hchunk_i) +CTL_PROTO(arenas_lextent_i_size) +INDEX_PROTO(arenas_lextent_i) CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_initialized) CTL_PROTO(arenas_lg_dirty_mult) @@ -137,7 +137,7 @@ CTL_PROTO(arenas_page) CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_nbins) CTL_PROTO(arenas_nhbins) -CTL_PROTO(arenas_nhchunks) +CTL_PROTO(arenas_nlextents) CTL_PROTO(arenas_extend) CTL_PROTO(prof_thread_active_init) CTL_PROTO(prof_active) @@ -150,10 +150,10 @@ CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_ndalloc) CTL_PROTO(stats_arenas_i_small_nrequests) -CTL_PROTO(stats_arenas_i_huge_allocated) -CTL_PROTO(stats_arenas_i_huge_nmalloc) -CTL_PROTO(stats_arenas_i_huge_ndalloc) -CTL_PROTO(stats_arenas_i_huge_nrequests) +CTL_PROTO(stats_arenas_i_large_allocated) +CTL_PROTO(stats_arenas_i_large_nmalloc) +CTL_PROTO(stats_arenas_i_large_ndalloc) +CTL_PROTO(stats_arenas_i_large_nrequests) CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_nrequests) @@ -164,11 +164,11 @@ CTL_PROTO(stats_arenas_i_bins_j_nslabs) CTL_PROTO(stats_arenas_i_bins_j_nreslabs) CTL_PROTO(stats_arenas_i_bins_j_curslabs) INDEX_PROTO(stats_arenas_i_bins_j) -CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc) -CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc) -CTL_PROTO(stats_arenas_i_hchunks_j_nrequests) -CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks) -INDEX_PROTO(stats_arenas_i_hchunks_j) +CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) +CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) +CTL_PROTO(stats_arenas_i_lextents_j_nrequests) +CTL_PROTO(stats_arenas_i_lextents_j_curlextents) +INDEX_PROTO(stats_arenas_i_lextents_j) CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_lg_dirty_mult) @@ -310,15 +310,15 @@ static const ctl_indexed_node_t arenas_bin_node[] = { {INDEX(arenas_bin_i)} }; -static const ctl_named_node_t arenas_hchunk_i_node[] = { - {NAME("size"), CTL(arenas_hchunk_i_size)} +static const ctl_named_node_t arenas_lextent_i_node[] = { + {NAME("size"), CTL(arenas_lextent_i_size)} }; -static const ctl_named_node_t super_arenas_hchunk_i_node[] = { - {NAME(""), CHILD(named, arenas_hchunk_i)} +static const ctl_named_node_t super_arenas_lextent_i_node[] = { + {NAME(""), CHILD(named, arenas_lextent_i)} }; -static const ctl_indexed_node_t arenas_hchunk_node[] = { - {INDEX(arenas_hchunk_i)} +static const ctl_indexed_node_t arenas_lextent_node[] = { + {INDEX(arenas_lextent_i)} }; static const ctl_named_node_t arenas_node[] = { @@ -332,8 +332,8 @@ static const ctl_named_node_t arenas_node[] = { {NAME("nbins"), CTL(arenas_nbins)}, {NAME("nhbins"), CTL(arenas_nhbins)}, {NAME("bin"), CHILD(indexed, arenas_bin)}, - {NAME("nhchunks"), CTL(arenas_nhchunks)}, - {NAME("hchunk"), CHILD(indexed, arenas_hchunk)}, + {NAME("nlextents"), CTL(arenas_nlextents)}, + {NAME("lextent"), CHILD(indexed, arenas_lextent)}, {NAME("extend"), CTL(arenas_extend)} }; @@ -359,11 +359,11 @@ static const ctl_named_node_t stats_arenas_i_small_node[] = { {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} }; -static const ctl_named_node_t stats_arenas_i_huge_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)} +static const ctl_named_node_t stats_arenas_i_large_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} }; static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { @@ -385,18 +385,18 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { {INDEX(stats_arenas_i_bins_j)} }; -static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)}, - {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)} +static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, + {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)} }; -static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)} +static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_lextents_j)} }; -static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = { - {INDEX(stats_arenas_i_hchunks_j)} +static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { + {INDEX(stats_arenas_i_lextents_j)} }; static const ctl_named_node_t stats_arenas_i_node[] = { @@ -413,9 +413,9 @@ static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("purged"), CTL(stats_arenas_i_purged)}, {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)}, {NAME("small"), CHILD(named, stats_arenas_i_small)}, - {NAME("huge"), CHILD(named, stats_arenas_i_huge)}, + {NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, - {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)} + {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)} }; static const ctl_named_node_t super_stats_arenas_i_node[] = { {NAME(""), CHILD(named, stats_arenas_i)} @@ -476,8 +476,8 @@ ctl_arena_clear(ctl_arena_stats_t *astats) astats->ndalloc_small = 0; astats->nrequests_small = 0; memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); - memset(astats->hstats, 0, (NSIZES - NBINS) * - sizeof(malloc_huge_stats_t)); + memset(astats->lstats, 0, (NSIZES - NBINS) * + sizeof(malloc_large_stats_t)); } } @@ -490,7 +490,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena) arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time, &cstats->pactive, &cstats->pdirty, &cstats->astats, - cstats->bstats, cstats->hstats); + cstats->bstats, cstats->lstats); for (i = 0; i < NBINS; i++) { cstats->allocated_small += cstats->bstats[i].curregs * @@ -532,10 +532,12 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) sstats->ndalloc_small += astats->ndalloc_small; sstats->nrequests_small += astats->nrequests_small; - sstats->astats.allocated_huge += astats->astats.allocated_huge; - sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; - sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; - sstats->astats.nrequests_huge += astats->astats.nrequests_huge; + sstats->astats.allocated_large += + astats->astats.allocated_large; + sstats->astats.nmalloc_large += astats->astats.nmalloc_large; + sstats->astats.ndalloc_large += astats->astats.ndalloc_large; + sstats->astats.nrequests_large += + astats->astats.nrequests_large; for (i = 0; i < NBINS; i++) { sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; @@ -556,12 +558,12 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) } for (i = 0; i < NSIZES - NBINS; i++) { - sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; - sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; - sstats->hstats[i].nrequests += - astats->hstats[i].nrequests; - sstats->hstats[i].curhchunks += - astats->hstats[i].curhchunks; + sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; + sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; + sstats->lstats[i].nrequests += + astats->lstats[i].nrequests; + sstats->lstats[i].curlextents += + astats->lstats[i].curlextents; } } } @@ -643,7 +645,7 @@ ctl_refresh(tsdn_t *tsdn) &base_mapped); ctl_stats.allocated = ctl_stats.arenas[ctl_stats.narenas].allocated_small + - ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large; ctl_stats.active = (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); ctl_stats.metadata = base_allocated + @@ -1812,15 +1814,15 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) return (super_arenas_bin_i_node); } -CTL_RO_NL_GEN(arenas_nhchunks, NSIZES - NBINS, unsigned) -CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+(szind_t)mib[2]), size_t) +CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned) +CTL_RO_NL_GEN(arenas_lextent_i_size, index2size(NBINS+(szind_t)mib[2]), size_t) static const ctl_named_node_t * -arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) +arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > NSIZES - NBINS) return (NULL); - return (super_arenas_hchunk_i_node); + return (super_arenas_lextent_i_node); } static int @@ -2012,14 +2014,14 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests, - ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */ +CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, + ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, + ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, + ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, + ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) /* Intentional. */ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) @@ -2050,23 +2052,23 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, return (super_stats_arenas_i_bins_j_node); } -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc, - ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc, - ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests, - ctl_stats.arenas[mib[2]].hstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, - ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, + ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, + ctl_stats.arenas[mib[2]].lstats[mib[4]].curlextents, size_t) static const ctl_named_node_t * -stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, +stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j > NSIZES - NBINS) return (NULL); - return (super_stats_arenas_i_hchunks_j_node); + return (super_stats_arenas_i_lextents_j_node); } static const ctl_named_node_t * diff --git a/src/extent.c b/src/extent.c index 757a6e21..2f929a83 100644 --- a/src/extent.c +++ b/src/extent.c @@ -40,7 +40,7 @@ extent_size_quantize_floor(size_t size) pszind_t pind; assert(size > 0); - assert(size - large_pad <= HUGE_MAXCLASS); + assert(size - large_pad <= LARGE_MAXCLASS); assert((size & PAGE_MASK) == 0); assert(size != 0); @@ -77,7 +77,7 @@ extent_size_quantize_ceil(size_t size) size_t ret; assert(size > 0); - assert(size - large_pad <= HUGE_MAXCLASS); + assert(size - large_pad <= LARGE_MAXCLASS); assert((size & PAGE_MASK) == 0); ret = extent_size_quantize_floor(size); diff --git a/src/jemalloc.c b/src/jemalloc.c index 429667f6..85a592e9 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1457,7 +1457,7 @@ ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, if (config_stats || (config_prof && opt_prof)) { *usize = index2size(ind); - assert(*usize > 0 && *usize <= HUGE_MAXCLASS); + assert(*usize > 0 && *usize <= LARGE_MAXCLASS); } if (config_prof && opt_prof) @@ -1589,7 +1589,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) } usize = sa2u(size, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { result = NULL; goto label_oom; } @@ -1663,7 +1663,7 @@ je_calloc(size_t num, size_t size) if (num == 0 || size == 0) num_size = 1; else - num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ + num_size = LARGE_MAXCLASS + 1; /* Trigger OOM. */ /* * Try to avoid division here. We know that it isn't possible to * overflow during multiplication if neither operand uses any of the @@ -1671,7 +1671,7 @@ je_calloc(size_t num, size_t size) */ } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) && (num_size / size != num))) - num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ + num_size = LARGE_MAXCLASS + 1; /* size_t overflow. */ if (likely(!malloc_slow)) { ret = ialloc_body(num_size, true, &tsdn, &usize, false); @@ -1819,7 +1819,7 @@ je_realloc(void *ptr, size_t size) old_usize = isalloc(tsd_tsdn(tsd), extent, ptr); if (config_prof && opt_prof) { usize = s2u(size); - ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? + ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? NULL : irealloc_prof(tsd, extent, ptr, old_usize, usize); } else { @@ -1956,7 +1956,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); *usize = sa2u(size, *alignment); } - if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) + if (unlikely(*usize == 0 || *usize > LARGE_MAXCLASS)) return (true); *zero = MALLOCX_ZERO_GET(flags); if ((flags & MALLOCX_TCACHE_MASK) != 0) { @@ -2084,7 +2084,7 @@ imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, return (NULL); if (config_stats || (config_prof && opt_prof)) { *usize = index2size(ind); - assert(*usize > 0 && *usize <= HUGE_MAXCLASS); + assert(*usize > 0 && *usize <= LARGE_MAXCLASS); } if (config_prof && opt_prof) { @@ -2233,7 +2233,7 @@ je_rallocx(void *ptr, size_t size, int flags) if (config_prof && opt_prof) { usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) goto label_oom; p = irallocx_prof(tsd, extent, ptr, old_usize, size, alignment, &usize, zero, tcache, arena); @@ -2314,17 +2314,17 @@ ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize, */ if (alignment == 0) { usize_max = s2u(size+extra); - assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); + assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); } else { usize_max = sa2u(size+extra, alignment); - if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { + if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { /* * usize_max is out of range, and chances are that * allocation will fail, but use the maximum possible * value and carry on with prof_alloc_prep(), just in * case allocation succeeds. */ - usize_max = HUGE_MAXCLASS; + usize_max = LARGE_MAXCLASS; } } tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); @@ -2368,18 +2368,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) /* * The API explicitly absolves itself of protecting against (size + * extra) numerical overflow, but we may need to clamp extra to avoid - * exceeding HUGE_MAXCLASS. + * exceeding LARGE_MAXCLASS. * * Ordinarily, size limit checking is handled deeper down, but here we * have to check as part of (size + extra) clamping, since we need the * clamped value in the above helper functions. */ - if (unlikely(size > HUGE_MAXCLASS)) { + if (unlikely(size > LARGE_MAXCLASS)) { usize = old_usize; goto label_not_resized; } - if (unlikely(HUGE_MAXCLASS - size < extra)) - extra = HUGE_MAXCLASS - size; + if (unlikely(LARGE_MAXCLASS - size < extra)) + extra = LARGE_MAXCLASS - size; if (config_prof && opt_prof) { usize = ixallocx_prof(tsd, extent, ptr, old_usize, size, extra, @@ -2512,7 +2512,7 @@ je_nallocx(size_t size, int flags) witness_assert_lockless(tsdn); usize = inallocx(tsdn, size, flags); - if (unlikely(usize > HUGE_MAXCLASS)) + if (unlikely(usize > LARGE_MAXCLASS)) return (0); witness_assert_lockless(tsdn); diff --git a/src/huge.c b/src/large.c similarity index 66% rename from src/huge.c rename to src/large.c index 8aa3dfd2..43bfb284 100644 --- a/src/huge.c +++ b/src/large.c @@ -1,19 +1,19 @@ -#define JEMALLOC_HUGE_C_ +#define JEMALLOC_LARGE_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ void * -huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) +large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { assert(usize == s2u(usize)); - return (huge_palloc(tsdn, arena, usize, CACHELINE, zero)); + return (large_palloc(tsdn, arena, usize, CACHELINE, zero)); } void * -huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, +large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { size_t ausize; @@ -24,7 +24,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, assert(!tsdn_null(tsdn) || arena != NULL); ausize = sa2u(usize, alignment); - if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS)) + if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) return (NULL); /* @@ -34,15 +34,15 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, is_zeroed = zero; if (likely(!tsdn_null(tsdn))) arena = arena_choose(tsdn_tsd(tsdn), arena); - if (unlikely(arena == NULL) || (extent = arena_chunk_alloc_huge(tsdn, + if (unlikely(arena == NULL) || (extent = arena_chunk_alloc_large(tsdn, arena, usize, alignment, &is_zeroed)) == NULL) return (NULL); - /* Insert extent into huge. */ - malloc_mutex_lock(tsdn, &arena->huge_mtx); + /* Insert extent into large. */ + malloc_mutex_lock(tsdn, &arena->large_mtx); ql_elm_new(extent, ql_link); - ql_tail_insert(&arena->huge, extent, ql_link); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + ql_tail_insert(&arena->large, extent, ql_link); + malloc_mutex_unlock(tsdn, &arena->large_mtx); if (config_prof && arena_prof_accum(tsdn, arena, usize)) prof_idump(tsdn); @@ -61,23 +61,23 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, } #ifdef JEMALLOC_JET -#undef huge_dalloc_junk -#define huge_dalloc_junk JEMALLOC_N(n_huge_dalloc_junk) +#undef large_dalloc_junk +#define large_dalloc_junk JEMALLOC_N(n_large_dalloc_junk) #endif void -huge_dalloc_junk(void *ptr, size_t usize) +large_dalloc_junk(void *ptr, size_t usize) { memset(ptr, JEMALLOC_FREE_JUNK, usize); } #ifdef JEMALLOC_JET -#undef huge_dalloc_junk -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) -huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(n_huge_dalloc_junk); +#undef large_dalloc_junk +#define large_dalloc_junk JEMALLOC_N(large_dalloc_junk) +large_dalloc_junk_t *large_dalloc_junk = JEMALLOC_N(n_large_dalloc_junk); #endif static void -huge_dalloc_maybe_junk(tsdn_t *tsdn, void *ptr, size_t usize) +large_dalloc_maybe_junk(tsdn_t *tsdn, void *ptr, size_t usize) { if (config_fill && have_dss && unlikely(opt_junk_free)) { @@ -86,13 +86,13 @@ huge_dalloc_maybe_junk(tsdn_t *tsdn, void *ptr, size_t usize) * unmapped. */ if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr))) - huge_dalloc_junk(ptr, usize); + large_dalloc_junk(ptr, usize); memset(ptr, JEMALLOC_FREE_JUNK, usize); } } static bool -huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) +large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { arena_t *arena = extent_arena_get(extent); size_t oldusize = extent_usize_get(extent); @@ -109,20 +109,20 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) return (true); if (config_fill && unlikely(opt_junk_free)) { - huge_dalloc_maybe_junk(tsdn, extent_addr_get(trail), + large_dalloc_maybe_junk(tsdn, extent_addr_get(trail), extent_usize_get(trail)); } arena_chunk_cache_dalloc(tsdn, arena, &chunk_hooks, trail); } - arena_chunk_ralloc_huge_shrink(tsdn, arena, extent, oldusize); + arena_chunk_ralloc_large_shrink(tsdn, arena, extent, oldusize); return (false); } static bool -huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, +large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, bool zero) { arena_t *arena = extent_arena_get(extent); @@ -173,34 +173,35 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, JEMALLOC_ALLOC_JUNK, usize - oldusize); } - arena_chunk_ralloc_huge_expand(tsdn, arena, extent, oldusize); + arena_chunk_ralloc_large_expand(tsdn, arena, extent, oldusize); return (false); } bool -huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, +large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, size_t usize_max, bool zero) { assert(s2u(extent_usize_get(extent)) == extent_usize_get(extent)); /* The following should have been caught by callers. */ - assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS); - /* Both allocation sizes must be huge to avoid a move. */ + assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS); + /* Both allocation sizes must be large to avoid a move. */ assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS); if (usize_max > extent_usize_get(extent)) { /* Attempt to expand the allocation in-place. */ - if (!huge_ralloc_no_move_expand(tsdn, extent, usize_max, + if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, zero)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return (false); } /* Try again, this time with usize_min. */ if (usize_min < usize_max && usize_min > - extent_usize_get(extent) && huge_ralloc_no_move_expand(tsdn, - extent, usize_min, zero)) { + extent_usize_get(extent) && + large_ralloc_no_move_expand(tsdn, extent, usize_min, + zero)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return (false); } @@ -218,7 +219,7 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, /* Attempt to shrink the allocation in-place. */ if (extent_usize_get(extent) > usize_max) { - if (!huge_ralloc_no_move_shrink(tsdn, extent, usize_max)) { + if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return (false); } @@ -227,30 +228,30 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, } static void * -huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, +large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { if (alignment <= CACHELINE) - return (huge_malloc(tsdn, arena, usize, zero)); - return (huge_palloc(tsdn, arena, usize, alignment, zero)); + return (large_malloc(tsdn, arena, usize, zero)); + return (large_palloc(tsdn, arena, usize, alignment, zero)); } void * -huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, +large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t copysize; /* The following should have been caught by callers. */ - assert(usize > 0 && usize <= HUGE_MAXCLASS); - /* Both allocation sizes must be huge to avoid a move. */ + assert(usize > 0 && usize <= LARGE_MAXCLASS); + /* Both allocation sizes must be large to avoid a move. */ assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize >= LARGE_MINCLASS); /* Try to avoid moving the allocation. */ - if (!huge_ralloc_no_move(tsdn, extent, usize, usize, zero)) + if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) return (extent_addr_get(extent)); /* @@ -258,7 +259,7 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, * different size class. In that case, fall back to allocating new * space and copying. */ - ret = huge_ralloc_move_helper(tsdn, arena, usize, alignment, zero); + ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero); if (ret == NULL) return (NULL); @@ -271,82 +272,82 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, } static void -huge_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked) +large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked) { arena_t *arena; arena = extent_arena_get(extent); if (!junked_locked) - malloc_mutex_lock(tsdn, &arena->huge_mtx); - ql_remove(&arena->huge, extent, ql_link); + malloc_mutex_lock(tsdn, &arena->large_mtx); + ql_remove(&arena->large, extent, ql_link); if (!junked_locked) { - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->large_mtx); - huge_dalloc_maybe_junk(tsdn, extent_addr_get(extent), + large_dalloc_maybe_junk(tsdn, extent_addr_get(extent), extent_usize_get(extent)); } - arena_chunk_dalloc_huge(tsdn, arena, extent, junked_locked); + arena_chunk_dalloc_large(tsdn, arena, extent, junked_locked); if (!junked_locked) arena_decay_tick(tsdn, arena); } void -huge_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent) +large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent) { - huge_dalloc_impl(tsdn, extent, true); + large_dalloc_impl(tsdn, extent, true); } void -huge_dalloc(tsdn_t *tsdn, extent_t *extent) +large_dalloc(tsdn_t *tsdn, extent_t *extent) { - huge_dalloc_impl(tsdn, extent, false); + large_dalloc_impl(tsdn, extent, false); } size_t -huge_salloc(tsdn_t *tsdn, const extent_t *extent) +large_salloc(tsdn_t *tsdn, const extent_t *extent) { size_t usize; arena_t *arena; arena = extent_arena_get(extent); - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->large_mtx); usize = extent_usize_get(extent); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->large_mtx); return (usize); } prof_tctx_t * -huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) +large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { prof_tctx_t *tctx; arena_t *arena; arena = extent_arena_get(extent); - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->large_mtx); tctx = extent_prof_tctx_get(extent); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->large_mtx); return (tctx); } void -huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) +large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { arena_t *arena; arena = extent_arena_get(extent); - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->large_mtx); extent_prof_tctx_set(extent, tctx); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->large_mtx); } void -huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) +large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { - huge_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); + large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); } diff --git a/src/stats.c b/src/stats.c index 599e377d..493e409a 100644 --- a/src/stats.c +++ b/src/stats.c @@ -37,10 +37,10 @@ size_t stats_cactive = 0; static void stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); -static void stats_arena_hchunks_print( +static void stats_arena_lextents_print( void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); static void stats_arena_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i, bool bins, bool huge); + void *cbopaque, unsigned i, bool bins, bool large); /******************************************************************************/ @@ -157,34 +157,34 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, } static void -stats_arena_hchunks_print(void (*write_cb)(void *, const char *), +stats_arena_lextents_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i) { - unsigned nbins, nhchunks, j; + unsigned nbins, nlextents, j; bool in_gap; malloc_cprintf(write_cb, cbopaque, - "huge: size ind allocated nmalloc ndalloc" - " nrequests curhchunks\n"); + "large: size ind allocated nmalloc ndalloc" + " nrequests curlextents\n"); CTL_GET("arenas.nbins", &nbins, unsigned); - CTL_GET("arenas.nhchunks", &nhchunks, unsigned); - for (j = 0, in_gap = false; j < nhchunks; j++) { + CTL_GET("arenas.nlextents", &nlextents, unsigned); + for (j = 0, in_gap = false; j < nlextents; j++) { uint64_t nmalloc, ndalloc, nrequests; - size_t hchunk_size, curhchunks; + size_t lextent_size, curlextents; - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j, + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j, &nmalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j, + CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j, &ndalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j, &nrequests, uint64_t); if (nrequests == 0) in_gap = true; else { - CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, + CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, - j, &curhchunks, size_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", + i, j, &curlextents, size_t); if (in_gap) { malloc_cprintf(write_cb, cbopaque, " ---\n"); @@ -193,9 +193,9 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *), malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64" %12zu\n", - hchunk_size, nbins + j, - curhchunks * hchunk_size, nmalloc, ndalloc, - nrequests, curhchunks); + lextent_size, nbins + j, + curlextents * lextent_size, nmalloc, ndalloc, + nrequests, curlextents); } } if (in_gap) { @@ -206,7 +206,7 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *), static void stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i, bool bins, bool huge) + unsigned i, bool bins, bool large) { unsigned nthreads; const char *dss; @@ -216,8 +216,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, uint64_t npurge, nmadvise, purged; size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests; - size_t huge_allocated; - uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests; + size_t large_allocated; + uint64_t large_nmalloc, large_ndalloc, large_nrequests; CTL_GET("arenas.page", &page, size_t); @@ -268,20 +268,21 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, "small: %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64"\n", small_allocated, small_nmalloc, small_ndalloc, small_nrequests); - CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); - CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, + CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, + size_t); + CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, uint64_t); malloc_cprintf(write_cb, cbopaque, - "huge: %12zu %12"FMTu64" %12"FMTu64 + "large: %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64"\n", - huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); + large_allocated, large_nmalloc, large_ndalloc, large_nrequests); malloc_cprintf(write_cb, cbopaque, "total: %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64"\n", - small_allocated + huge_allocated, small_nmalloc + huge_nmalloc, - small_ndalloc + huge_ndalloc, small_nrequests + huge_nrequests); + small_allocated + large_allocated, small_nmalloc + large_nmalloc, + small_ndalloc + large_ndalloc, small_nrequests + large_nrequests); malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page); CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); @@ -300,8 +301,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, if (bins) stats_arena_bins_print(write_cb, cbopaque, i); - if (huge) - stats_arena_hchunks_print(write_cb, cbopaque, i); + if (large) + stats_arena_lextents_print(write_cb, cbopaque, i); } void @@ -315,7 +316,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, bool merged = true; bool unmerged = true; bool bins = true; - bool huge = true; + bool large = true; /* * Refresh stats, in case mallctl() was called by the application. @@ -356,7 +357,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, bins = false; break; case 'l': - huge = false; + large = false; break; default:; } @@ -568,7 +569,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, malloc_cprintf(write_cb, cbopaque, "\nMerged arenas stats:\n"); stats_arena_print(write_cb, cbopaque, - narenas, bins, huge); + narenas, bins, large); } } } @@ -594,7 +595,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, cbopaque, "\narenas[%u]:\n", i); stats_arena_print(write_cb, - cbopaque, i, bins, huge); + cbopaque, i, bins, large); } } } diff --git a/src/tcache.c b/src/tcache.c index 02015227..69444fac 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -46,7 +46,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) tbin->ncached - tbin->low_water + (tbin->low_water >> 2)); } else { - tcache_bin_flush_huge(tsd, tbin, binind, tbin->ncached + tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2), tcache); } /* @@ -164,7 +164,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, } void -tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, +tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache) { arena_t *arena; @@ -194,9 +194,9 @@ tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, } if (config_stats) { merged_stats = true; - arena->stats.nrequests_huge += + arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.hstats[binind - NBINS].nrequests += + arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } @@ -207,7 +207,7 @@ tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, assert(ptr != NULL); extent = iealloc(tsd_tsdn(tsd), ptr); if (extent_arena_get(extent) == locked_arena) { - huge_dalloc_junked_locked(tsd_tsdn(tsd), + large_dalloc_junked_locked(tsd_tsdn(tsd), extent); } else { /* @@ -232,8 +232,8 @@ tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, * arena, so the stats didn't get merged. Manually do so now. */ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); - arena->stats.nrequests_huge += tbin->tstats.nrequests; - arena->stats.hstats[binind - NBINS].nrequests += + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); @@ -371,12 +371,12 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) for (; i < nhbins; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_huge(tsd, tbin, i, 0, tcache); + tcache_bin_flush_large(tsd, tbin, i, 0, tcache); if (config_stats && tbin->tstats.nrequests != 0) { malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); - arena->stats.nrequests_huge += tbin->tstats.nrequests; - arena->stats.hstats[i - NBINS].nrequests += + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[i - NBINS].nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } @@ -431,10 +431,10 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) } for (; i < nhbins; i++) { - malloc_huge_stats_t *hstats = &arena->stats.hstats[i - NBINS]; + malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; tcache_bin_t *tbin = &tcache->tbins[i]; - arena->stats.nrequests_huge += tbin->tstats.nrequests; - hstats->nrequests += tbin->tstats.nrequests; + arena->stats.nrequests_large += tbin->tstats.nrequests; + lstats->nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } } @@ -537,7 +537,7 @@ tcache_boot(tsdn_t *tsdn) stack_nelms += tcache_bin_info[i].ncached_max; } for (; i < nhbins; i++) { - tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_HUGE; + tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; stack_nelms += tcache_bin_info[i].ncached_max; } diff --git a/test/integration/chunk.c b/test/integration/chunk.c index 3aad7a8a..ca87e80f 100644 --- a/test/integration/chunk.c +++ b/test/integration/chunk.c @@ -120,7 +120,7 @@ chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, TEST_BEGIN(test_chunk) { void *p; - size_t old_size, new_size, huge0, huge1, huge2, sz; + size_t old_size, new_size, large0, large1, large2, sz; unsigned arena_ind; int flags; size_t hooks_mib[3], purge_mib[3]; @@ -162,14 +162,14 @@ TEST_BEGIN(test_chunk) assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error"); assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error"); - /* Get huge size classes. */ + /* Get large size classes. */ sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0, - "Unexpected arenas.hchunk.0.size failure"); - assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0, - "Unexpected arenas.hchunk.1.size failure"); - assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0, - "Unexpected arenas.hchunk.2.size failure"); + assert_d_eq(mallctl("arenas.lextent.0.size", &large0, &sz, NULL, 0), 0, + "Unexpected arenas.lextent.0.size failure"); + assert_d_eq(mallctl("arenas.lextent.1.size", &large1, &sz, NULL, 0), 0, + "Unexpected arenas.lextent.1.size failure"); + assert_d_eq(mallctl("arenas.lextent.2.size", &large2, &sz, NULL, 0), 0, + "Unexpected arenas.lextent.2.size failure"); /* Test dalloc/decommit/purge cascade. */ purge_miblen = sizeof(purge_mib)/sizeof(size_t); @@ -178,13 +178,13 @@ TEST_BEGIN(test_chunk) purge_mib[1] = (size_t)arena_ind; do_dalloc = false; do_decommit = false; - p = mallocx(huge0 * 2, flags); + p = mallocx(large0 * 2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); did_dalloc = false; did_decommit = false; did_purge = false; did_split = false; - xallocx_success_a = (xallocx(p, huge0, 0, flags) == huge0); + xallocx_success_a = (xallocx(p, large0, 0, flags) == large0); assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), 0, "Unexpected arena.%u.purge error", arena_ind); if (xallocx_success_a) { @@ -199,18 +199,18 @@ TEST_BEGIN(test_chunk) /* Test decommit/commit and observe split/merge. */ do_dalloc = false; do_decommit = true; - p = mallocx(huge0 * 2, flags); + p = mallocx(large0 * 2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); did_decommit = false; did_commit = false; did_split = false; did_merge = false; - xallocx_success_b = (xallocx(p, huge0, 0, flags) == huge0); + xallocx_success_b = (xallocx(p, large0, 0, flags) == large0); assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), 0, "Unexpected arena.%u.purge error", arena_ind); if (xallocx_success_b) assert_true(did_split, "Expected split"); - xallocx_success_c = (xallocx(p, huge0 * 2, 0, flags) == huge0 * 2); + xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2); assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); if (xallocx_success_b && xallocx_success_c) assert_true(did_merge, "Expected merge"); @@ -218,7 +218,7 @@ TEST_BEGIN(test_chunk) do_dalloc = true; do_decommit = false; - /* Make sure non-huge allocation succeeds. */ + /* Make sure non-large allocation succeeds. */ p = mallocx(42, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, flags); diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c index 55e1a090..9d623eb7 100644 --- a/test/integration/mallocx.c +++ b/test/integration/mallocx.c @@ -18,10 +18,10 @@ get_nsizes_impl(const char *cmd) } static unsigned -get_nhuge(void) +get_nlarge(void) { - return (get_nsizes_impl("arenas.nhchunks")); + return (get_nsizes_impl("arenas.nlextents")); } static size_t @@ -44,20 +44,20 @@ get_size_impl(const char *cmd, size_t ind) } static size_t -get_huge_size(size_t ind) +get_large_size(size_t ind) { - return (get_size_impl("arenas.hchunk.0.size", ind)); + return (get_size_impl("arenas.lextent.0.size", ind)); } TEST_BEGIN(test_overflow) { - size_t hugemax; + size_t largemax; - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); - assert_ptr_null(mallocx(hugemax+1, 0), - "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1); + assert_ptr_null(mallocx(largemax+1, 0), + "Expected OOM for mallocx(size=%#zx, 0)", largemax+1); assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); @@ -73,7 +73,7 @@ TEST_END TEST_BEGIN(test_oom) { - size_t hugemax; + size_t largemax; bool oom; void *ptrs[3]; unsigned i; @@ -82,16 +82,16 @@ TEST_BEGIN(test_oom) * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); oom = false; for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { - ptrs[i] = mallocx(hugemax, 0); + ptrs[i] = mallocx(largemax, 0); if (ptrs[i] == NULL) oom = true; } assert_true(oom, "Expected OOM during series of calls to mallocx(size=%zu, 0)", - hugemax); + largemax); for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { if (ptrs[i] != NULL) dallocx(ptrs[i], 0); diff --git a/test/integration/overflow.c b/test/integration/overflow.c index 303d9b2d..8dea1c95 100644 --- a/test/integration/overflow.c +++ b/test/integration/overflow.c @@ -2,19 +2,19 @@ TEST_BEGIN(test_overflow) { - unsigned nhchunks; + unsigned nlextents; size_t mib[4]; size_t sz, miblen, max_size_class; void *p; sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.nlextents", &nlextents, &sz, NULL, 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); - assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); - mib[2] = nhchunks - 1; + mib[2] = nlextents - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c index 66ad8660..6278a490 100644 --- a/test/integration/rallocx.c +++ b/test/integration/rallocx.c @@ -14,10 +14,10 @@ get_nsizes_impl(const char *cmd) } static unsigned -get_nhuge(void) +get_nlarge(void) { - return (get_nsizes_impl("arenas.nhchunks")); + return (get_nsizes_impl("arenas.nlextents")); } static size_t @@ -40,10 +40,10 @@ get_size_impl(const char *cmd, size_t ind) } static size_t -get_huge_size(size_t ind) +get_large_size(size_t ind) { - return (get_size_impl("arenas.hchunk.0.size", ind)); + return (get_size_impl("arenas.lextent.0.size", ind)); } TEST_BEGIN(test_grow_and_shrink) @@ -221,16 +221,16 @@ TEST_END TEST_BEGIN(test_overflow) { - size_t hugemax; + size_t largemax; void *p; - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); - assert_ptr_null(rallocx(p, hugemax+1, 0), - "Expected OOM for rallocx(p, size=%#zx, 0)", hugemax+1); + assert_ptr_null(rallocx(p, largemax+1, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1); assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); diff --git a/test/integration/xallocx.c b/test/integration/xallocx.c index 7af1b194..4ff099f8 100644 --- a/test/integration/xallocx.c +++ b/test/integration/xallocx.c @@ -92,10 +92,10 @@ get_nsmall(void) } static unsigned -get_nhuge(void) +get_nlarge(void) { - return (get_nsizes_impl("arenas.nhchunks")); + return (get_nsizes_impl("arenas.nlextents")); } static size_t @@ -125,20 +125,20 @@ get_small_size(size_t ind) } static size_t -get_huge_size(size_t ind) +get_large_size(size_t ind) { - return (get_size_impl("arenas.hchunk.0.size", ind)); + return (get_size_impl("arenas.lextent.0.size", ind)); } TEST_BEGIN(test_size) { - size_t small0, hugemax; + size_t small0, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); @@ -148,13 +148,13 @@ TEST_BEGIN(test_size) "Unexpected xallocx() behavior"); /* Test largest supported size. */ - assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax, + assert_zu_le(xallocx(p, largemax, 0, 0), largemax, "Unexpected xallocx() behavior"); /* Test size overflow. */ - assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax, + assert_zu_le(xallocx(p, largemax+1, 0, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax, + assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); @@ -163,30 +163,30 @@ TEST_END TEST_BEGIN(test_size_extra_overflow) { - size_t small0, hugemax; + size_t small0, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test overflows that can be resolved by clamping extra. */ - assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax, + assert_zu_le(xallocx(p, largemax-1, 2, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax, + assert_zu_le(xallocx(p, largemax, 1, 0), largemax, "Unexpected xallocx() behavior"); - /* Test overflow such that hugemax-size underflows. */ - assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax, + /* Test overflow such that largemax-size underflows. */ + assert_zu_le(xallocx(p, largemax+1, 2, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax, + assert_zu_le(xallocx(p, largemax+2, 3, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax, + assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax, + assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); @@ -195,13 +195,13 @@ TEST_END TEST_BEGIN(test_extra_small) { - size_t small0, small1, hugemax; + size_t small0, small1, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); small1 = get_small_size(1); - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); @@ -216,7 +216,7 @@ TEST_BEGIN(test_extra_small) "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ - assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0, + assert_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, "Unexpected xallocx() behavior"); @@ -225,66 +225,66 @@ TEST_BEGIN(test_extra_small) } TEST_END -TEST_BEGIN(test_extra_huge) +TEST_BEGIN(test_extra_large) { int flags = MALLOCX_ARENA(arena_ind()); - size_t smallmax, huge1, huge2, huge3, hugemax; + size_t smallmax, large1, large2, large3, largemax; void *p; /* Get size classes. */ smallmax = get_small_size(get_nsmall()-1); - huge1 = get_huge_size(1); - huge2 = get_huge_size(2); - huge3 = get_huge_size(3); - hugemax = get_huge_size(get_nhuge()-1); + large1 = get_large_size(1); + large2 = get_large_size(2); + large3 = get_large_size(3); + largemax = get_large_size(get_nlarge()-1); - p = mallocx(huge3, flags); + p = mallocx(large3, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); - assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, + assert_zu_eq(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); /* Test size decrease with zero extra. */ - assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, smallmax, 0, flags), huge1, + assert_zu_ge(xallocx(p, smallmax, 0, flags), large1, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, + assert_zu_eq(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); /* Test size decrease with non-zero extra. */ - assert_zu_eq(xallocx(p, huge1, huge3 - huge1, flags), huge3, + assert_zu_eq(xallocx(p, large1, large3 - large1, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge2, huge3 - huge2, flags), huge3, + assert_zu_eq(xallocx(p, large2, large3 - large2, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2, + assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, smallmax, huge1 - smallmax, flags), huge1, + assert_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with zero extra. */ - assert_zu_le(xallocx(p, huge3, 0, flags), huge3, + assert_zu_le(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, hugemax+1, 0, flags), huge3, + assert_zu_le(xallocx(p, largemax+1, 0, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ - assert_zu_le(xallocx(p, huge1, SIZE_T_MAX - huge1, flags), hugemax, + assert_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ - assert_zu_le(xallocx(p, huge1, huge3 - huge1, flags), huge3, + assert_zu_le(xallocx(p, large1, large3 - large1, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, + assert_zu_eq(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ - assert_zu_le(xallocx(p, huge3, hugemax - huge3 + 1, flags), hugemax, + assert_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax, "Unexpected xallocx() behavior"); dallocx(p, flags); @@ -374,15 +374,15 @@ test_zero(size_t szmin, size_t szmax) dallocx(p, flags); } -TEST_BEGIN(test_zero_huge) +TEST_BEGIN(test_zero_large) { - size_t huge0, huge1; + size_t large0, large1; /* Get size classes. */ - huge0 = get_huge_size(0); - huge1 = get_huge_size(1); + large0 = get_large_size(0); + large1 = get_large_size(1); - test_zero(huge1, huge0 * 2); + test_zero(large1, large0 * 2); } TEST_END @@ -397,6 +397,6 @@ main(void) test_size, test_size_extra_overflow, test_extra_small, - test_extra_huge, - test_zero_huge)); + test_extra_large, + test_zero_large)); } diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c index 546d3cc8..a9476b89 100644 --- a/test/unit/arena_reset.c +++ b/test/unit/arena_reset.c @@ -25,10 +25,10 @@ get_nsmall(void) } static unsigned -get_nhuge(void) +get_nlarge(void) { - return (get_nsizes_impl("arenas.nhchunks")); + return (get_nsizes_impl("arenas.nlextents")); } static size_t @@ -58,10 +58,10 @@ get_small_size(size_t ind) } static size_t -get_huge_size(size_t ind) +get_large_size(size_t ind) { - return (get_size_impl("arenas.hchunk.0.size", ind)); + return (get_size_impl("arenas.lextent.0.size", ind)); } /* Like ivsalloc(), but safe to call on discarded allocations. */ @@ -81,8 +81,8 @@ vsalloc(tsdn_t *tsdn, const void *ptr) TEST_BEGIN(test_arena_reset) { -#define NHUGE 32 - unsigned arena_ind, nsmall, nhuge, nptrs, i; +#define NLARGE 32 + unsigned arena_ind, nsmall, nlarge, nptrs, i; size_t sz, miblen; void **ptrs; int flags; @@ -96,8 +96,8 @@ TEST_BEGIN(test_arena_reset) flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; nsmall = get_nsmall(); - nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge(); - nptrs = nsmall + nhuge; + nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge(); + nptrs = nsmall + nlarge; ptrs = (void **)malloc(nptrs * sizeof(void *)); assert_ptr_not_null(ptrs, "Unexpected malloc() failure"); @@ -108,8 +108,8 @@ TEST_BEGIN(test_arena_reset) assert_ptr_not_null(ptrs[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); } - for (i = 0; i < nhuge; i++) { - sz = get_huge_size(i); + for (i = 0; i < nlarge; i++) { + sz = get_large_size(i); ptrs[nsmall + i] = mallocx(sz, flags); assert_ptr_not_null(ptrs[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); diff --git a/test/unit/decay.c b/test/unit/decay.c index 786cc934..592935d3 100644 --- a/test/unit/decay.c +++ b/test/unit/decay.c @@ -22,7 +22,7 @@ TEST_BEGIN(test_decay_ticks) { ticker_t *decay_ticker; unsigned tick0, tick1; - size_t sz, huge0; + size_t sz, large0; void *p; test_skip_if(opt_purge != purge_mode_decay); @@ -32,18 +32,18 @@ TEST_BEGIN(test_decay_ticks) "Unexpected failure getting decay ticker"); sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.lextent.0.size", &large0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); /* - * Test the standard APIs using a huge size class, since we can't + * Test the standard APIs using a large size class, since we can't * control tcache interactions for small size classes (except by * completely disabling tcache for the entire test program). */ /* malloc(). */ tick0 = ticker_read(decay_ticker); - p = malloc(huge0); + p = malloc(large0); assert_ptr_not_null(p, "Unexpected malloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()"); @@ -55,7 +55,7 @@ TEST_BEGIN(test_decay_ticks) /* calloc(). */ tick0 = ticker_read(decay_ticker); - p = calloc(1, huge0); + p = calloc(1, large0); assert_ptr_not_null(p, "Unexpected calloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()"); @@ -63,7 +63,7 @@ TEST_BEGIN(test_decay_ticks) /* posix_memalign(). */ tick0 = ticker_read(decay_ticker); - assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0, + assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0, "Unexpected posix_memalign() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, @@ -72,7 +72,7 @@ TEST_BEGIN(test_decay_ticks) /* aligned_alloc(). */ tick0 = ticker_read(decay_ticker); - p = aligned_alloc(sizeof(size_t), huge0); + p = aligned_alloc(sizeof(size_t), large0); assert_ptr_not_null(p, "Unexpected aligned_alloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, @@ -82,13 +82,13 @@ TEST_BEGIN(test_decay_ticks) /* realloc(). */ /* Allocate. */ tick0 = ticker_read(decay_ticker); - p = realloc(NULL, huge0); + p = realloc(NULL, large0); assert_ptr_not_null(p, "Unexpected realloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* Reallocate. */ tick0 = ticker_read(decay_ticker); - p = realloc(p, huge0); + p = realloc(p, large0); assert_ptr_not_null(p, "Unexpected realloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); @@ -99,13 +99,13 @@ TEST_BEGIN(test_decay_ticks) assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* - * Test the *allocx() APIs using huge and small size classes, with + * Test the *allocx() APIs using large and small size classes, with * tcache explicitly disabled. */ { unsigned i; size_t allocx_sizes[2]; - allocx_sizes[0] = huge0; + allocx_sizes[0] = large0; allocx_sizes[1] = 1; for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) { @@ -154,13 +154,13 @@ TEST_BEGIN(test_decay_ticks) } /* - * Test tcache fill/flush interactions for huge and small size classes, + * Test tcache fill/flush interactions for large and small size classes, * using an explicit tcache. */ if (config_tcache) { unsigned tcache_ind, i; size_t tcache_sizes[2]; - tcache_sizes[0] = huge0; + tcache_sizes[0] = large0; tcache_sizes[1] = 1; sz = sizeof(unsigned); @@ -201,14 +201,14 @@ TEST_BEGIN(test_decay_ticker) uint64_t epoch; uint64_t npurge0 = 0; uint64_t npurge1 = 0; - size_t sz, huge; + size_t sz, large; unsigned i, nupdates0; nstime_t time, decay_time, deadline; test_skip_if(opt_purge != purge_mode_decay); /* - * Allocate a bunch of huge objects, pause the clock, deallocate the + * Allocate a bunch of large objects, pause the clock, deallocate the * objects, restore the clock, then [md]allocx() in a tight loop to * verify the ticker triggers purging. */ @@ -219,10 +219,10 @@ TEST_BEGIN(test_decay_ticker) sz = sizeof(size_t); assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, 0), 0, "Unexpected mallctl failure"); - huge = nallocx(tcache_max + 1, flags); + large = nallocx(tcache_max + 1, flags); } else { sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.hchunk.0.size", &huge, &sz, NULL, + assert_d_eq(mallctl("arenas.lextent.0.size", &large, &sz, NULL, 0), 0, "Unexpected mallctl failure"); } @@ -235,7 +235,7 @@ TEST_BEGIN(test_decay_ticker) config_stats ? 0 : ENOENT, "Unexpected mallctl result"); for (i = 0; i < NPS; i++) { - ps[i] = mallocx(huge, flags); + ps[i] = mallocx(large, flags); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); } @@ -293,13 +293,13 @@ TEST_BEGIN(test_decay_nonmonotonic) uint64_t epoch; uint64_t npurge0 = 0; uint64_t npurge1 = 0; - size_t sz, huge0; + size_t sz, large0; unsigned i, nupdates0; test_skip_if(opt_purge != purge_mode_decay); sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.lextent.0.size", &large0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, @@ -319,7 +319,7 @@ TEST_BEGIN(test_decay_nonmonotonic) nstime_update = nstime_update_mock; for (i = 0; i < NPS; i++) { - ps[i] = mallocx(huge0, flags); + ps[i] = mallocx(large0, flags); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); } diff --git a/test/unit/extent_quantize.c b/test/unit/extent_quantize.c index a165aece..d8928da0 100644 --- a/test/unit/extent_quantize.c +++ b/test/unit/extent_quantize.c @@ -35,16 +35,16 @@ TEST_BEGIN(test_small_extent_size) } TEST_END -TEST_BEGIN(test_huge_extent_size) +TEST_BEGIN(test_large_extent_size) { bool cache_oblivious; - unsigned nhchunks, i; + unsigned nlextents, i; size_t sz, extent_size_prev, ceil_prev; size_t mib[4]; size_t miblen = sizeof(mib) / sizeof(size_t); /* - * Iterate over all huge size classes, get their extent sizes, and + * Iterate over all large size classes, get their extent sizes, and * verify that the quantized size is the same as the extent size. */ @@ -53,12 +53,12 @@ TEST_BEGIN(test_huge_extent_size) NULL, 0), 0, "Unexpected mallctl failure"); sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.nlextents", &nlextents, &sz, NULL, 0), 0, "Unexpected mallctl failure"); - assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); - for (i = 0; i < nhchunks; i++) { + for (i = 0; i < nlextents; i++) { size_t lextent_size, extent_size, floor, ceil; mib[2] = i; @@ -91,7 +91,7 @@ TEST_BEGIN(test_huge_extent_size) ceil_prev, extent_size); } } - if (i + 1 < nhchunks) { + if (i + 1 < nlextents) { extent_size_prev = floor; ceil_prev = extent_size_quantize_ceil(extent_size + PAGE); @@ -141,6 +141,6 @@ main(void) return (test( test_small_extent_size, - test_huge_extent_size, + test_large_extent_size, test_monotonic)); } diff --git a/test/unit/junk.c b/test/unit/junk.c index cdf8fb3c..7a923509 100644 --- a/test/unit/junk.c +++ b/test/unit/junk.c @@ -9,7 +9,7 @@ const char *malloc_conf = #endif static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; -static huge_dalloc_junk_t *huge_dalloc_junk_orig; +static large_dalloc_junk_t *large_dalloc_junk_orig; static void *watch_for_junking; static bool saw_junking; @@ -37,10 +37,10 @@ arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info) } static void -huge_dalloc_junk_intercept(void *ptr, size_t usize) +large_dalloc_junk_intercept(void *ptr, size_t usize) { - huge_dalloc_junk_orig(ptr, usize); + large_dalloc_junk_orig(ptr, usize); /* * The conditions under which junk filling actually occurs are nuanced * enough that it doesn't make sense to duplicate the decision logic in @@ -59,8 +59,8 @@ test_junk(size_t sz_min, size_t sz_max) if (opt_junk_free) { arena_dalloc_junk_small_orig = arena_dalloc_junk_small; arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; - huge_dalloc_junk_orig = huge_dalloc_junk; - huge_dalloc_junk = huge_dalloc_junk_intercept; + large_dalloc_junk_orig = large_dalloc_junk; + large_dalloc_junk = large_dalloc_junk_intercept; } sz_prev = 0; @@ -110,7 +110,7 @@ test_junk(size_t sz_min, size_t sz_max) if (opt_junk_free) { arena_dalloc_junk_small = arena_dalloc_junk_small_orig; - huge_dalloc_junk = huge_dalloc_junk_orig; + large_dalloc_junk = large_dalloc_junk_orig; } } @@ -122,7 +122,7 @@ TEST_BEGIN(test_junk_small) } TEST_END -TEST_BEGIN(test_junk_huge) +TEST_BEGIN(test_junk_large) { test_skip_if(!config_fill); @@ -136,5 +136,5 @@ main(void) return (test( test_junk_small, - test_junk_huge)); + test_junk_large)); } diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 872aeaa0..8eb5a60c 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -596,7 +596,7 @@ TEST_BEGIN(test_arenas_constants) TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); TEST_ARENAS_CONSTANT(size_t, page, PAGE); TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); - TEST_ARENAS_CONSTANT(unsigned, nhchunks, NSIZES - NBINS); + TEST_ARENAS_CONSTANT(unsigned, nlextents, NSIZES - NBINS); #undef TEST_ARENAS_CONSTANT } @@ -622,13 +622,13 @@ TEST_BEGIN(test_arenas_bin_constants) } TEST_END -TEST_BEGIN(test_arenas_hchunk_constants) +TEST_BEGIN(test_arenas_lextent_constants) { #define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \ + assert_d_eq(mallctl("arenas.lextent.0."#name, &name, &sz, NULL, \ 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) @@ -704,7 +704,7 @@ main(void) test_arenas_decay_time, test_arenas_constants, test_arenas_bin_constants, - test_arenas_hchunk_constants, + test_arenas_lextent_constants, test_arenas_extend, test_stats_arenas)); } diff --git a/test/unit/size_classes.c b/test/unit/size_classes.c index 4e1e0ce4..f5a5873d 100644 --- a/test/unit/size_classes.c +++ b/test/unit/size_classes.c @@ -3,18 +3,18 @@ static size_t get_max_size_class(void) { - unsigned nhchunks; + unsigned nlextents; size_t mib[4]; size_t sz, miblen, max_size_class; sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.nlextents", &nlextents, &sz, NULL, 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); - assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); - mib[2] = nhchunks - 1; + mib[2] = nlextents - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, diff --git a/test/unit/stats.c b/test/unit/stats.c index f524c005..9fa9cead 100644 --- a/test/unit/stats.c +++ b/test/unit/stats.c @@ -33,7 +33,7 @@ TEST_BEGIN(test_stats_summary) } TEST_END -TEST_BEGIN(test_stats_huge) +TEST_BEGIN(test_stats_large) { void *p; uint64_t epoch; @@ -49,14 +49,14 @@ TEST_BEGIN(test_stats_huge) "Unexpected mallctl() failure"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, + assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL, + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL, + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz, + assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { @@ -75,7 +75,7 @@ TEST_END TEST_BEGIN(test_stats_arenas_summary) { unsigned arena; - void *little, *huge; + void *little, *large; uint64_t epoch; size_t sz; int expected = config_stats ? 0 : ENOENT; @@ -88,11 +88,11 @@ TEST_BEGIN(test_stats_arenas_summary) little = mallocx(SMALL_MAXCLASS, 0); assert_ptr_not_null(little, "Unexpected mallocx() failure"); - huge = mallocx(chunksize, 0); - assert_ptr_not_null(huge, "Unexpected mallocx() failure"); + large = mallocx(chunksize, 0); + assert_ptr_not_null(large, "Unexpected mallocx() failure"); dallocx(little, 0); - dallocx(huge, 0); + dallocx(large, 0); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); @@ -185,7 +185,7 @@ TEST_BEGIN(test_stats_arenas_small) } TEST_END -TEST_BEGIN(test_stats_arenas_huge) +TEST_BEGIN(test_stats_arenas_large) { unsigned arena; void *p; @@ -204,12 +204,12 @@ TEST_BEGIN(test_stats_arenas_huge) "Unexpected mallctl() failure"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, + assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { @@ -299,12 +299,12 @@ TEST_BEGIN(test_stats_arenas_bins) } TEST_END -TEST_BEGIN(test_stats_arenas_hchunks) +TEST_BEGIN(test_stats_arenas_lextents) { unsigned arena; void *p; uint64_t epoch, nmalloc, ndalloc; - size_t curhchunks, sz, hsize; + size_t curlextents, sz, hsize; int expected = config_stats ? 0 : ENOENT; arena = 0; @@ -312,7 +312,7 @@ TEST_BEGIN(test_stats_arenas_hchunks) 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.hchunk.0.size", &hsize, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.lextent.0.size", &hsize, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); p = mallocx(hsize, 0); @@ -322,20 +322,20 @@ TEST_BEGIN(test_stats_arenas_hchunks) "Unexpected mallctl() failure"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz, + assert_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz, + assert_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents", + &curlextents, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(curhchunks, 0, + assert_u64_gt(curlextents, 0, "At least one chunk should be currently allocated"); } @@ -349,10 +349,10 @@ main(void) return (test( test_stats_summary, - test_stats_huge, + test_stats_large, test_stats_arenas_summary, test_stats_arenas_small, - test_stats_arenas_huge, + test_stats_arenas_large, test_stats_arenas_bins, - test_stats_arenas_hchunks)); + test_stats_arenas_lextents)); } diff --git a/test/unit/zero.c b/test/unit/zero.c index 2da288ac..3c35f4bd 100644 --- a/test/unit/zero.c +++ b/test/unit/zero.c @@ -53,7 +53,7 @@ TEST_BEGIN(test_zero_small) } TEST_END -TEST_BEGIN(test_zero_huge) +TEST_BEGIN(test_zero_large) { test_skip_if(!config_fill); @@ -67,5 +67,5 @@ main(void) return (test( test_zero_small, - test_zero_huge)); + test_zero_large)); }