Add configure options.
Add: --with-lg-page --with-lg-page-sizes --with-lg-size-class-group --with-lg-quantum Get rid of STATIC_PAGE_SHIFT, in favor of directly setting LG_PAGE. Fix various edge conditions exposed by the configure options.
This commit is contained in:
@@ -362,8 +362,8 @@ void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
|
||||
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
|
||||
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
|
||||
void arena_prof_promoted(const void *ptr, size_t size);
|
||||
void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
arena_chunk_map_bits_t *bitselm);
|
||||
void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
|
||||
void *ptr, arena_chunk_map_bits_t *bitselm);
|
||||
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t pageind, arena_chunk_map_bits_t *bitselm);
|
||||
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
@@ -371,8 +371,10 @@ void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
|
||||
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
|
||||
#else
|
||||
void arena_dalloc_junk_large(void *ptr, size_t usize);
|
||||
#endif
|
||||
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
|
||||
void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
|
||||
void *ptr);
|
||||
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
||||
#ifdef JEMALLOC_JET
|
||||
|
@@ -9,19 +9,20 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero);
|
||||
void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
||||
bool try_tcache);
|
||||
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero);
|
||||
bool zero, bool try_tcache);
|
||||
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, bool zero);
|
||||
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
||||
size_t size, size_t extra, size_t alignment, bool zero,
|
||||
bool try_tcache_dalloc);
|
||||
bool try_tcache_alloc, bool try_tcache_dalloc);
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||
#endif
|
||||
void huge_dalloc(tsd_t *tsd, void *ptr);
|
||||
void huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache);
|
||||
size_t huge_salloc(const void *ptr);
|
||||
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
|
||||
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
||||
|
@@ -185,7 +185,7 @@ typedef unsigned index_t;
|
||||
#define TINY_MIN (1U << LG_TINY_MIN)
|
||||
|
||||
/*
|
||||
* Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
#ifndef LG_QUANTUM
|
||||
@@ -235,7 +235,8 @@ typedef unsigned index_t;
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifndef LG_QUANTUM
|
||||
# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
|
||||
# error "Unknown minimum alignment for architecture; specify via "
|
||||
"--with-lg-quantum"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
@@ -275,12 +276,11 @@ typedef unsigned index_t;
|
||||
#define CACHELINE_CEILING(s) \
|
||||
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
|
||||
|
||||
/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
|
||||
/* Page size. LG_PAGE is determined by the configure script. */
|
||||
#ifdef PAGE_MASK
|
||||
# undef PAGE_MASK
|
||||
#endif
|
||||
#define LG_PAGE STATIC_PAGE_SHIFT
|
||||
#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
|
||||
#define PAGE ((size_t)(1U << LG_PAGE))
|
||||
#define PAGE_MASK ((size_t)(PAGE - 1))
|
||||
|
||||
/* Return the smallest pagesize multiple that is >= s. */
|
||||
@@ -809,7 +809,7 @@ imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena)
|
||||
if (size <= arena_maxclass)
|
||||
return (arena_malloc(tsd, arena, size, false, try_tcache));
|
||||
else
|
||||
return (huge_malloc(tsd, arena, size, false));
|
||||
return (huge_malloc(tsd, arena, size, false, try_tcache));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
@@ -826,7 +826,7 @@ icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena)
|
||||
if (size <= arena_maxclass)
|
||||
return (arena_malloc(tsd, arena, size, true, try_tcache));
|
||||
else
|
||||
return (huge_malloc(tsd, arena, size, true));
|
||||
return (huge_malloc(tsd, arena, size, true, try_tcache));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
@@ -854,9 +854,11 @@ ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, bool try_tcache,
|
||||
return (NULL);
|
||||
ret = arena_palloc(arena, usize, alignment, zero);
|
||||
} else if (alignment <= chunksize)
|
||||
ret = huge_malloc(tsd, arena, usize, zero);
|
||||
else
|
||||
ret = huge_palloc(tsd, arena, usize, alignment, zero);
|
||||
ret = huge_malloc(tsd, arena, usize, zero, try_tcache);
|
||||
else {
|
||||
ret = huge_palloc(tsd, arena, usize, alignment, zero,
|
||||
try_tcache);
|
||||
}
|
||||
}
|
||||
|
||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||
@@ -938,7 +940,7 @@ idalloct(tsd_t *tsd, void *ptr, bool try_tcache)
|
||||
if (chunk != ptr)
|
||||
arena_dalloc(tsd, chunk, ptr, try_tcache);
|
||||
else
|
||||
huge_dalloc(tsd, ptr);
|
||||
huge_dalloc(tsd, ptr, try_tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
@@ -952,7 +954,7 @@ isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache)
|
||||
if (chunk != ptr)
|
||||
arena_sdalloc(tsd, chunk, ptr, size, try_tcache);
|
||||
else
|
||||
huge_dalloc(tsd, ptr);
|
||||
huge_dalloc(tsd, ptr, try_tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
@@ -1042,7 +1044,7 @@ iralloct(tsd_t *tsd, void *ptr, size_t size, size_t alignment, bool zero,
|
||||
alignment, zero, try_tcache_alloc, try_tcache_dalloc));
|
||||
} else {
|
||||
return (huge_ralloc(tsd, arena, ptr, oldsize, size, 0,
|
||||
alignment, zero, try_tcache_dalloc));
|
||||
alignment, zero, try_tcache_alloc, try_tcache_dalloc));
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -144,8 +144,14 @@
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
#undef JEMALLOC_LAZY_LOCK
|
||||
|
||||
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
|
||||
#undef STATIC_PAGE_SHIFT
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
#undef LG_QUANTUM
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#undef LG_PAGE
|
||||
|
||||
/*
|
||||
* If defined, use munmap() to unmap freed chunks, rather than storing them for
|
||||
|
@@ -16,11 +16,11 @@ arena_chunk_dalloc_huge
|
||||
arena_cleanup
|
||||
arena_dalloc
|
||||
arena_dalloc_bin
|
||||
arena_dalloc_bin_locked
|
||||
arena_dalloc_bin_junked_locked
|
||||
arena_dalloc_junk_large
|
||||
arena_dalloc_junk_small
|
||||
arena_dalloc_large
|
||||
arena_dalloc_large_locked
|
||||
arena_dalloc_large_junked_locked
|
||||
arena_dalloc_small
|
||||
arena_dss_prec_get
|
||||
arena_dss_prec_set
|
||||
|
@@ -1,4 +1,6 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Usage: size_classes.sh <lg_parr> <lg_g>
|
||||
|
||||
# The following limits are chosen such that they cover all supported platforms.
|
||||
|
||||
@@ -15,10 +17,10 @@ lg_tmin=3
|
||||
lg_kmax=12
|
||||
|
||||
# Page sizes.
|
||||
lg_parr="12 13 16"
|
||||
lg_parr=`echo $1 | tr ',' ' '`
|
||||
|
||||
# Size class group size (number of size classes for each size doubling).
|
||||
lg_g=2
|
||||
lg_g=$2
|
||||
|
||||
pow2() {
|
||||
e=$1
|
||||
@@ -159,7 +161,11 @@ size_classes() {
|
||||
nbins=$((${index} + 1))
|
||||
# Final written value is correct:
|
||||
small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
|
||||
lg_large_minclass=$((${lg_grp} + 1))
|
||||
if [ ${lg_g} -gt 0 ] ; then
|
||||
lg_large_minclass=$((${lg_grp} + 1))
|
||||
else
|
||||
lg_large_minclass=$((${lg_grp} + 2))
|
||||
fi
|
||||
fi
|
||||
index=$((${index} + 1))
|
||||
ndelta=$((${ndelta} + 1))
|
||||
|
@@ -112,7 +112,7 @@ void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
|
||||
void tcache_arena_reassociate(tcache_t *tcache, arena_t *arena);
|
||||
void tcache_arena_dissociate(tcache_t *tcache);
|
||||
tcache_t *tcache_get_hard(tsd_t *tsd);
|
||||
tcache_t *tcache_create(arena_t *arena);
|
||||
tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
|
||||
void tcache_cleanup(tsd_t *tsd);
|
||||
void tcache_enabled_cleanup(tsd_t *tsd);
|
||||
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
|
||||
@@ -363,7 +363,7 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
||||
binind = size2index(size);
|
||||
|
||||
if (config_fill && unlikely(opt_junk))
|
||||
memset(ptr, 0x5a, size);
|
||||
arena_dalloc_junk_large(ptr, size);
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
|
Reference in New Issue
Block a user