Rename huge to large.
This commit is contained in:
@@ -229,10 +229,10 @@ struct arena_s {
|
||||
*/
|
||||
size_t decay_backlog[SMOOTHSTEP_NSTEPS];
|
||||
|
||||
/* Extant huge allocations. */
|
||||
ql_head(extent_t) huge;
|
||||
/* Synchronizes all huge allocation/update/deallocation. */
|
||||
malloc_mutex_t huge_mtx;
|
||||
/* Extant large allocations. */
|
||||
ql_head(extent_t) large;
|
||||
/* Synchronizes all large allocation/update/deallocation. */
|
||||
malloc_mutex_t large_mtx;
|
||||
|
||||
/*
|
||||
* Heaps of chunks that were previously allocated. These are used when
|
||||
@@ -287,13 +287,13 @@ void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent,
|
||||
bool cache);
|
||||
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
|
||||
bool cache);
|
||||
extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *arena_chunk_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
||||
size_t usize, size_t alignment, bool *zero);
|
||||
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
void arena_chunk_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
bool locked);
|
||||
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||
void arena_chunk_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent, size_t oldsize);
|
||||
void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
||||
void arena_chunk_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent, size_t oldsize);
|
||||
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
|
||||
bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
|
||||
@@ -341,7 +341,7 @@ void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
||||
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
||||
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
|
||||
malloc_bin_stats_t *bstats, malloc_huge_stats_t *hstats);
|
||||
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats);
|
||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||
@@ -470,7 +470,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||
assert(ptr != NULL);
|
||||
|
||||
if (unlikely(!extent_slab_get(extent)))
|
||||
return (huge_prof_tctx_get(tsdn, extent));
|
||||
return (large_prof_tctx_get(tsdn, extent));
|
||||
return ((prof_tctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
|
||||
@@ -483,7 +483,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
assert(ptr != NULL);
|
||||
|
||||
if (unlikely(!extent_slab_get(extent)))
|
||||
huge_prof_tctx_set(tsdn, extent, tctx);
|
||||
large_prof_tctx_set(tsdn, extent, tctx);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
@@ -495,7 +495,7 @@ arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
assert(ptr != NULL);
|
||||
assert(!extent_slab_get(extent));
|
||||
|
||||
huge_prof_tctx_reset(tsdn, extent);
|
||||
large_prof_tctx_reset(tsdn, extent);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
@@ -535,7 +535,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||
tcache, size, ind, zero, slow_path));
|
||||
}
|
||||
if (likely(size <= tcache_maxclass)) {
|
||||
return (tcache_alloc_huge(tsdn_tsd(tsdn), arena,
|
||||
return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
||||
tcache, size, ind, zero, slow_path));
|
||||
}
|
||||
/* (size > tcache_maxclass) case falls through. */
|
||||
@@ -563,7 +563,7 @@ arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||
if (likely(extent_slab_get(extent)))
|
||||
ret = index2size(extent_slab_data_get_const(extent)->binind);
|
||||
else
|
||||
ret = huge_salloc(tsdn, extent);
|
||||
ret = large_salloc(tsdn, extent);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@@ -594,11 +594,11 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||
arena_dalloc_promoted(tsdn, extent, ptr,
|
||||
tcache, slow_path);
|
||||
} else {
|
||||
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache,
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache,
|
||||
ptr, usize, slow_path);
|
||||
}
|
||||
} else
|
||||
huge_dalloc(tsdn, extent);
|
||||
large_dalloc(tsdn, extent);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -627,11 +627,11 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||
arena_dalloc_promoted(tsdn, extent, ptr,
|
||||
tcache, slow_path);
|
||||
} else {
|
||||
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr,
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
||||
size, slow_path);
|
||||
}
|
||||
} else
|
||||
huge_dalloc(tsdn, extent);
|
||||
large_dalloc(tsdn, extent);
|
||||
}
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||
|
@@ -51,7 +51,7 @@ struct ctl_arena_stats_s {
|
||||
uint64_t nrequests_small;
|
||||
|
||||
malloc_bin_stats_t bstats[NBINS];
|
||||
malloc_huge_stats_t hstats[NSIZES - NBINS];
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
};
|
||||
|
||||
struct ctl_stats_s {
|
||||
|
@@ -51,7 +51,7 @@ struct extent_s {
|
||||
/* Small region slab metadata. */
|
||||
arena_slab_data_t e_slab_data;
|
||||
|
||||
/* Profile counters, used for huge objects. */
|
||||
/* Profile counters, used for large objects. */
|
||||
union {
|
||||
void *e_prof_tctx_pun;
|
||||
prof_tctx_t *e_prof_tctx;
|
||||
@@ -67,7 +67,7 @@ struct extent_s {
|
||||
/* Linkage for per size class address-ordered heaps. */
|
||||
phn(extent_t) ph_link;
|
||||
|
||||
/* Linkage for arena's huge and extent_cache lists. */
|
||||
/* Linkage for arena's large and extent_cache lists. */
|
||||
ql_elm(extent_t) ql_link;
|
||||
};
|
||||
};
|
||||
|
@@ -1,37 +0,0 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
|
||||
void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero);
|
||||
bool huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
size_t usize_max, bool zero);
|
||||
void *huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||
#else
|
||||
void huge_dalloc_junk(void *ptr, size_t usize);
|
||||
#endif
|
||||
void huge_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
||||
void huge_dalloc(tsdn_t *tsdn, extent_t *extent);
|
||||
size_t huge_salloc(tsdn_t *tsdn, const extent_t *extent);
|
||||
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
||||
void huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
|
||||
void huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
@@ -362,7 +362,7 @@ typedef unsigned szind_t;
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/chunk.h"
|
||||
#include "jemalloc/internal/huge.h"
|
||||
#include "jemalloc/internal/large.h"
|
||||
#include "jemalloc/internal/tcache.h"
|
||||
#include "jemalloc/internal/hash.h"
|
||||
#include "jemalloc/internal/prof.h"
|
||||
@@ -396,7 +396,7 @@ typedef unsigned szind_t;
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/chunk.h"
|
||||
#include "jemalloc/internal/huge.h"
|
||||
#include "jemalloc/internal/large.h"
|
||||
#include "jemalloc/internal/tcache.h"
|
||||
#include "jemalloc/internal/hash.h"
|
||||
#include "jemalloc/internal/prof.h"
|
||||
@@ -486,7 +486,7 @@ void jemalloc_postfork_child(void);
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/chunk.h"
|
||||
#include "jemalloc/internal/huge.h"
|
||||
#include "jemalloc/internal/large.h"
|
||||
#include "jemalloc/internal/tcache.h"
|
||||
#include "jemalloc/internal/hash.h"
|
||||
#include "jemalloc/internal/prof.h"
|
||||
@@ -515,7 +515,7 @@ void jemalloc_postfork_child(void);
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/chunk.h"
|
||||
#include "jemalloc/internal/huge.h"
|
||||
#include "jemalloc/internal/large.h"
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
pszind_t psz2ind(size_t psz);
|
||||
@@ -547,7 +547,7 @@ JEMALLOC_INLINE pszind_t
|
||||
psz2ind(size_t psz)
|
||||
{
|
||||
|
||||
if (unlikely(psz > HUGE_MAXCLASS))
|
||||
if (unlikely(psz > LARGE_MAXCLASS))
|
||||
return (NPSIZES);
|
||||
{
|
||||
pszind_t x = lg_floor((psz<<1)-1);
|
||||
@@ -608,7 +608,7 @@ JEMALLOC_INLINE size_t
|
||||
psz2u(size_t psz)
|
||||
{
|
||||
|
||||
if (unlikely(psz > HUGE_MAXCLASS))
|
||||
if (unlikely(psz > LARGE_MAXCLASS))
|
||||
return (0);
|
||||
{
|
||||
size_t x = lg_floor((psz<<1)-1);
|
||||
@@ -625,7 +625,7 @@ JEMALLOC_INLINE szind_t
|
||||
size2index_compute(size_t size)
|
||||
{
|
||||
|
||||
if (unlikely(size > HUGE_MAXCLASS))
|
||||
if (unlikely(size > LARGE_MAXCLASS))
|
||||
return (NSIZES);
|
||||
#if (NTBINS != 0)
|
||||
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||
@@ -721,7 +721,7 @@ JEMALLOC_ALWAYS_INLINE size_t
|
||||
s2u_compute(size_t size)
|
||||
{
|
||||
|
||||
if (unlikely(size > HUGE_MAXCLASS))
|
||||
if (unlikely(size > LARGE_MAXCLASS))
|
||||
return (0);
|
||||
#if (NTBINS > 0)
|
||||
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||
@@ -797,9 +797,9 @@ sa2u(size_t size, size_t alignment)
|
||||
return (usize);
|
||||
}
|
||||
|
||||
/* Huge size class. Beware of overflow. */
|
||||
/* Large size class. Beware of overflow. */
|
||||
|
||||
if (unlikely(alignment > HUGE_MAXCLASS))
|
||||
if (unlikely(alignment > LARGE_MAXCLASS))
|
||||
return (0);
|
||||
|
||||
/* Make sure result is a large size class. */
|
||||
@@ -814,7 +814,7 @@ sa2u(size_t size, size_t alignment)
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the multi-page mapping that huge_palloc() would need in
|
||||
* Calculate the multi-page mapping that large_palloc() would need in
|
||||
* order to guarantee the alignment.
|
||||
*/
|
||||
if (usize + large_pad + PAGE_CEILING(alignment) < usize) {
|
||||
@@ -1113,7 +1113,7 @@ iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
||||
size_t usize, copysize;
|
||||
|
||||
usize = sa2u(size + extra, alignment);
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
|
||||
return (NULL);
|
||||
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
||||
if (p == NULL) {
|
||||
@@ -1121,7 +1121,7 @@ iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
||||
return (NULL);
|
||||
/* Try again, without extra this time. */
|
||||
usize = sa2u(size, alignment);
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
|
||||
return (NULL);
|
||||
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
||||
if (p == NULL)
|
||||
|
37
include/jemalloc/internal/large.h
Normal file
37
include/jemalloc/internal/large.h
Normal file
@@ -0,0 +1,37 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
|
||||
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero);
|
||||
bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
size_t usize_max, bool zero);
|
||||
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (large_dalloc_junk_t)(void *, size_t);
|
||||
extern large_dalloc_junk_t *large_dalloc_junk;
|
||||
#else
|
||||
void large_dalloc_junk(void *ptr, size_t usize);
|
||||
#endif
|
||||
void large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
||||
void large_dalloc(tsdn_t *tsdn, extent_t *extent);
|
||||
size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
|
||||
prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
||||
void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
|
||||
void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
@@ -9,14 +9,14 @@ arena_boot
|
||||
arena_choose
|
||||
arena_choose_hard
|
||||
arena_choose_impl
|
||||
arena_chunk_alloc_huge
|
||||
arena_chunk_alloc_large
|
||||
arena_chunk_cache_alloc
|
||||
arena_chunk_cache_dalloc
|
||||
arena_chunk_cache_maybe_insert
|
||||
arena_chunk_cache_maybe_remove
|
||||
arena_chunk_dalloc_huge
|
||||
arena_chunk_ralloc_huge_expand
|
||||
arena_chunk_ralloc_huge_shrink
|
||||
arena_chunk_dalloc_large
|
||||
arena_chunk_ralloc_large_expand
|
||||
arena_chunk_ralloc_large_shrink
|
||||
arena_cleanup
|
||||
arena_dalloc
|
||||
arena_dalloc_bin_junked_locked
|
||||
@@ -222,17 +222,6 @@ hash_rotl_64
|
||||
hash_x64_128
|
||||
hash_x86_128
|
||||
hash_x86_32
|
||||
huge_dalloc
|
||||
huge_dalloc_junk
|
||||
huge_dalloc_junked_locked
|
||||
huge_malloc
|
||||
huge_palloc
|
||||
huge_prof_tctx_get
|
||||
huge_prof_tctx_reset
|
||||
huge_prof_tctx_set
|
||||
huge_ralloc
|
||||
huge_ralloc_no_move
|
||||
huge_salloc
|
||||
iaalloc
|
||||
ialloc
|
||||
iallocztm
|
||||
@@ -258,6 +247,17 @@ ixalloc
|
||||
jemalloc_postfork_child
|
||||
jemalloc_postfork_parent
|
||||
jemalloc_prefork
|
||||
large_dalloc
|
||||
large_dalloc_junk
|
||||
large_dalloc_junked_locked
|
||||
large_malloc
|
||||
large_palloc
|
||||
large_prof_tctx_get
|
||||
large_prof_tctx_reset
|
||||
large_prof_tctx_set
|
||||
large_ralloc
|
||||
large_ralloc_no_move
|
||||
large_salloc
|
||||
lg_floor
|
||||
lg_prof_sample
|
||||
malloc_cprintf
|
||||
@@ -438,17 +438,17 @@ stats_cactive_get
|
||||
stats_cactive_sub
|
||||
stats_print
|
||||
tcache_alloc_easy
|
||||
tcache_alloc_huge
|
||||
tcache_alloc_large
|
||||
tcache_alloc_small
|
||||
tcache_alloc_small_hard
|
||||
tcache_arena_reassociate
|
||||
tcache_bin_flush_huge
|
||||
tcache_bin_flush_large
|
||||
tcache_bin_flush_small
|
||||
tcache_bin_info
|
||||
tcache_boot
|
||||
tcache_cleanup
|
||||
tcache_create
|
||||
tcache_dalloc_huge
|
||||
tcache_dalloc_large
|
||||
tcache_dalloc_small
|
||||
tcache_enabled_cleanup
|
||||
tcache_enabled_get
|
||||
|
@@ -237,7 +237,7 @@ size_classes() {
|
||||
fi
|
||||
fi
|
||||
# Final written value is correct:
|
||||
huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
|
||||
large_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
|
||||
index=$((${index} + 1))
|
||||
ndelta=$((${ndelta} + 1))
|
||||
done
|
||||
@@ -257,7 +257,7 @@ size_classes() {
|
||||
# - lookup_maxclass
|
||||
# - small_maxclass
|
||||
# - lg_large_minclass
|
||||
# - huge_maxclass
|
||||
# - large_maxclass
|
||||
}
|
||||
|
||||
cat <<EOF
|
||||
@@ -290,7 +290,7 @@ cat <<EOF
|
||||
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
|
||||
* SMALL_MAXCLASS: Maximum small size class.
|
||||
* LG_LARGE_MINCLASS: Lg of minimum large size class.
|
||||
* HUGE_MAXCLASS: Maximum (huge) size class.
|
||||
* LARGE_MAXCLASS: Maximum (large) size class.
|
||||
*/
|
||||
|
||||
#define LG_SIZE_CLASS_GROUP ${lg_g}
|
||||
@@ -315,7 +315,7 @@ for lg_z in ${lg_zarr} ; do
|
||||
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
|
||||
echo "#define SMALL_MAXCLASS ${small_maxclass}"
|
||||
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
|
||||
echo "#define HUGE_MAXCLASS ${huge_maxclass}"
|
||||
echo "#define LARGE_MAXCLASS ${large_maxclass}"
|
||||
echo "#endif"
|
||||
echo
|
||||
done
|
||||
|
@@ -3,7 +3,7 @@
|
||||
|
||||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
||||
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
|
||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
typedef struct chunk_stats_s chunk_stats_t;
|
||||
|
||||
@@ -61,7 +61,7 @@ struct malloc_bin_stats_s {
|
||||
size_t curslabs;
|
||||
};
|
||||
|
||||
struct malloc_huge_stats_s {
|
||||
struct malloc_large_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena.
|
||||
@@ -77,7 +77,7 @@ struct malloc_huge_stats_s {
|
||||
uint64_t nrequests;
|
||||
|
||||
/* Current number of (multi-)chunk allocations of this size class. */
|
||||
size_t curhchunks;
|
||||
size_t curlextents;
|
||||
};
|
||||
|
||||
struct arena_stats_s {
|
||||
@@ -108,13 +108,13 @@ struct arena_stats_s {
|
||||
size_t metadata_mapped;
|
||||
size_t metadata_allocated; /* Protected via atomic_*_z(). */
|
||||
|
||||
size_t allocated_huge;
|
||||
uint64_t nmalloc_huge;
|
||||
uint64_t ndalloc_huge;
|
||||
uint64_t nrequests_huge;
|
||||
size_t allocated_large;
|
||||
uint64_t nmalloc_large;
|
||||
uint64_t ndalloc_large;
|
||||
uint64_t nrequests_large;
|
||||
|
||||
/* One element for each huge size class. */
|
||||
malloc_huge_stats_t hstats[NSIZES - NBINS];
|
||||
/* One element for each large size class. */
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
|
@@ -30,8 +30,8 @@ typedef struct tcaches_s tcaches_t;
|
||||
*/
|
||||
#define TCACHE_NSLOTS_SMALL_MAX 200
|
||||
|
||||
/* Number of cache slots for huge size classes. */
|
||||
#define TCACHE_NSLOTS_HUGE 20
|
||||
/* Number of cache slots for large size classes. */
|
||||
#define TCACHE_NSLOTS_LARGE 20
|
||||
|
||||
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
|
||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||
@@ -113,7 +113,7 @@ extern tcache_bin_info_t *tcache_bin_info;
|
||||
|
||||
/*
|
||||
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
||||
* huge-object bins.
|
||||
* large-object bins.
|
||||
*/
|
||||
extern unsigned nhbins;
|
||||
|
||||
@@ -136,7 +136,7 @@ void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
||||
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
szind_t binind, unsigned rem);
|
||||
void tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
unsigned rem, tcache_t *tcache);
|
||||
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
|
||||
arena_t *oldarena, arena_t *newarena);
|
||||
@@ -163,11 +163,11 @@ void tcache_enabled_set(bool enabled);
|
||||
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
|
||||
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
size_t size, szind_t ind, bool zero, bool slow_path);
|
||||
void *tcache_alloc_huge(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
size_t size, szind_t ind, bool zero, bool slow_path);
|
||||
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||
szind_t binind, bool slow_path);
|
||||
void tcache_dalloc_huge(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||
size_t size, bool slow_path);
|
||||
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
||||
#endif
|
||||
@@ -336,7 +336,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_huge(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
szind_t binind, bool zero, bool slow_path)
|
||||
{
|
||||
void *ret;
|
||||
@@ -349,14 +349,14 @@ tcache_alloc_huge(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
assert(tcache_success == (ret != NULL));
|
||||
if (unlikely(!tcache_success)) {
|
||||
/*
|
||||
* Only allocate one huge object at a time, because it's quite
|
||||
* Only allocate one large object at a time, because it's quite
|
||||
* expensive to create one and not use it.
|
||||
*/
|
||||
arena = arena_choose(tsd, arena);
|
||||
if (unlikely(arena == NULL))
|
||||
return (NULL);
|
||||
|
||||
ret = huge_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
|
||||
ret = large_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
} else {
|
||||
@@ -416,7 +416,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_huge(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
||||
bool slow_path)
|
||||
{
|
||||
szind_t binind;
|
||||
@@ -429,12 +429,12 @@ tcache_dalloc_huge(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
||||
binind = size2index(size);
|
||||
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free))
|
||||
huge_dalloc_junk(ptr, size);
|
||||
large_dalloc_junk(ptr, size);
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
||||
tcache_bin_flush_huge(tsd, tbin, binind,
|
||||
tcache_bin_flush_large(tsd, tbin, binind,
|
||||
(tbin_info->ncached_max >> 1), tcache);
|
||||
}
|
||||
assert(tbin->ncached < tbin_info->ncached_max);
|
||||
|
@@ -32,7 +32,7 @@ typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
|
||||
|
||||
#define WITNESS_RANK_LEAF 0xffffffffU
|
||||
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_ARENA_LARGE WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
|
||||
|
Reference in New Issue
Block a user