Add {,r,s,d}allocm().

Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
This commit is contained in:
Jason Evans 2010-09-17 15:46:18 -07:00
parent 4cc6a60a4f
commit 8e3c3c61b5
17 changed files with 892 additions and 124 deletions

View File

@ -58,7 +58,8 @@ DSOS := @objroot@lib/libjemalloc@install_suffix@.$(SO).$(REV) \
@objroot@lib/libjemalloc@install_suffix@.$(SO) \ @objroot@lib/libjemalloc@install_suffix@.$(SO) \
@objroot@lib/libjemalloc@install_suffix@_pic.a @objroot@lib/libjemalloc@install_suffix@_pic.a
MAN3 := @objroot@doc/jemalloc@install_suffix@.3 MAN3 := @objroot@doc/jemalloc@install_suffix@.3
CTESTS := @srcroot@test/posix_memalign.c @srcroot@test/thread_arena.c CTESTS := @srcroot@test/allocm.c @srcroot@test/posix_memalign.c \
@srcroot@test/rallocm.c @srcroot@test/thread_arena.c
.PHONY: all dist install check clean distclean relclean .PHONY: all dist install check clean distclean relclean

View File

@ -38,7 +38,7 @@
.\" @(#)malloc.3 8.1 (Berkeley) 6/4/93 .\" @(#)malloc.3 8.1 (Berkeley) 6/4/93
.\" $FreeBSD: head/lib/libc/stdlib/malloc.3 182225 2008-08-27 02:00:53Z jasone $ .\" $FreeBSD: head/lib/libc/stdlib/malloc.3 182225 2008-08-27 02:00:53Z jasone $
.\" .\"
.Dd September 11, 2010 .Dd September 17, 2010
.Dt JEMALLOC 3 .Dt JEMALLOC 3
.Os .Os
.Sh NAME .Sh NAME
@ -51,13 +51,18 @@
.Nm @jemalloc_prefix@malloc_stats_print , .Nm @jemalloc_prefix@malloc_stats_print ,
.Nm @jemalloc_prefix@mallctl , .Nm @jemalloc_prefix@mallctl ,
.Nm @jemalloc_prefix@mallctlnametomib , .Nm @jemalloc_prefix@mallctlnametomib ,
.Nm @jemalloc_prefix@mallctlbymib .Nm @jemalloc_prefix@mallctlbymib ,
.Nm @jemalloc_prefix@allocm ,
.Nm @jemalloc_prefix@rallocm ,
.Nm @jemalloc_prefix@sallocm ,
.Nm @jemalloc_prefix@dallocm
.Nd general purpose memory allocation functions .Nd general purpose memory allocation functions
.Sh LIBRARY .Sh LIBRARY
.Sy libjemalloc@install_suffix@ .Sy libjemalloc@install_suffix@
.Sh SYNOPSIS .Sh SYNOPSIS
.In stdlib.h .In stdlib.h
.In jemalloc/jemalloc@install_suffix@.h .In jemalloc/jemalloc@install_suffix@.h
.Ss Standard API
.Ft void * .Ft void *
.Fn @jemalloc_prefix@malloc "size_t size" .Fn @jemalloc_prefix@malloc "size_t size"
.Ft void * .Ft void *
@ -68,6 +73,7 @@
.Fn @jemalloc_prefix@realloc "void *ptr" "size_t size" .Fn @jemalloc_prefix@realloc "void *ptr" "size_t size"
.Ft void .Ft void
.Fn @jemalloc_prefix@free "void *ptr" .Fn @jemalloc_prefix@free "void *ptr"
.Ss Non-standard API
.Ft size_t .Ft size_t
.Fn @jemalloc_prefix@malloc_usable_size "const void *ptr" .Fn @jemalloc_prefix@malloc_usable_size "const void *ptr"
.Ft void .Ft void
@ -82,7 +88,17 @@
.Va @jemalloc_prefix@malloc_options ; .Va @jemalloc_prefix@malloc_options ;
.Ft void .Ft void
.Fn \*(lp*@jemalloc_prefix@malloc_message\*(rp "void *cbopaque" "const char *s" .Fn \*(lp*@jemalloc_prefix@malloc_message\*(rp "void *cbopaque" "const char *s"
.Ss Experimental API
.Ft int
.Fn @jemalloc_prefix@allocm "void **ptr" "size_t *rsize" "size_t size" "int flags"
.Ft int
.Fn @jemalloc_prefix@rallocm "void **ptr" "size_t *rsize" "size_t size" "size_t extra" "int flags"
.Ft int
.Fn @jemalloc_prefix@sallocm "const void *ptr" "size_t *rsize" "int flags"
.Ft int
.Fn @jemalloc_prefix@dallocm "void *ptr" "int flags"
.Sh DESCRIPTION .Sh DESCRIPTION
.Ss Standard API
The The
.Fn @jemalloc_prefix@malloc .Fn @jemalloc_prefix@malloc
function allocates function allocates
@ -158,7 +174,7 @@ If
is is
.Dv NULL , .Dv NULL ,
no action occurs. no action occurs.
.Pp .Ss Non-standard API
The The
.Fn @jemalloc_prefix@malloc_usable_size .Fn @jemalloc_prefix@malloc_usable_size
function returns the usable size of the allocation pointed to by function returns the usable size of the allocation pointed to by
@ -289,6 +305,102 @@ for (i = 0; i < nbins; i++) {
/* Do something with bin_size... */ /* Do something with bin_size... */
} }
.Ed .Ed
.Ss Experimental API
The experimental API is subject to change or removal without regard for
backward compatibility.
.Pp
The
.Fn @jemalloc_prefix@allocm ,
.Fn @jemalloc_prefix@rallocm ,
.Fn @jemalloc_prefix@sallocm ,
and
.Fn @jemalloc_prefix@dallocm
functions all have a
.Fa flags
argument that can be used to specify options.
The functions only check the options that are contextually relevant.
Use bitwise or (|) operations to specify one or more of the following:
.Bl -tag -width ".Dv ALLOCM_LG_ALIGN(la)"
.It ALLOCM_LG_ALIGN(la)
Align the memory allocation to start at an address that is a multiple of
(1 <<
.Fa la ) .
This macro does not validate that
.Fa la
is within the valid range.
.It ALLOCM_ALIGN(a)
Align the memory allocation to start at an address that is a multiple of
.Fa a ,
where
.Fa a
is a power of two.
This macro does not validate that
.Fa a
is a power of 2.
.It ALLOCM_ZERO
Initialize newly allocated memory to contain zero bytes.
In the growing reallocation case, the real size prior to reallocation defines
the boundary between untouched bytes and those that are initialized to contain
zero bytes.
If this option is absent, newly allocated memory is uninitialized.
.It ALLOCM_NO_MOVE
For reallocation, fail rather than moving the object.
This constraint can apply to both growth and shrinkage.
.El
.Pp
The
.Fn @jemalloc_prefix@allocm
function allocates at least
.Fa size
bytes of memory, sets
.Fa *ptr
to the base address of the allocation, and sets
.Fa *rsize
to the real size of the allocation if
.Fa rsize
is not
.Dv NULL .
.Pp
The
.Fn @jemalloc_prefix@rallocm
function resizes the allocation at
.Fa *ptr
to be at least
.Fa size
bytes, sets
.Fa *ptr
to the base address of the allocation if it moved, and sets
.Fa *rsize
to the real size of the allocation if
.Fa rsize
is not
.Dv NULL .
If
.Fa extra
is non-zero, an attempt is made to resize the allocation to be at least
.Fa ( size
+
.Fa extra )
bytes, though an inability to allocate the extra byte(s) will not by itself
result in failure.
Behavior is undefined if
.Fa ( size
+
.Fa extra
>
.Dv SIZE_T_MAX ) .
.Pp
The
.Fn @jemalloc_prefix@sallocm
function sets
.Fa *rsize
to the real size of the allocation.
.Pp
The
.Fn @jemalloc_prefix@dallocm
function causes the memory referenced by
.Fa ptr
to be made available for future allocations.
.Sh TUNING .Sh TUNING
Once, when the first call is made to one of these memory allocation Once, when the first call is made to one of these memory allocation
routines, various flags will be set or reset, which affects the routines, various flags will be set or reset, which affects the
@ -646,11 +758,10 @@ LsR
^^R ^^R
^^R ^^R
LsR LsR
^^R
^^R
^^R. ^^R.
Category;Subcategory;Size Category;Subcategory;Size
Small;Tiny;8 @roff_tiny@Small;Tiny;8
@roff_no_tiny@Small;Tiny;[disabled]
;Quantum-spaced;16 ;Quantum-spaced;16
;;32 ;;32
;;48 ;;48
@ -681,7 +792,7 @@ Allocations are packed tightly together, which can be an issue for
multi-threaded applications. multi-threaded applications.
If you need to assure that allocations do not suffer from cacheline sharing, If you need to assure that allocations do not suffer from cacheline sharing,
round your allocation requests up to the nearest multiple of the cacheline round your allocation requests up to the nearest multiple of the cacheline
size. size, or specify cacheline alignment when allocating.
.Sh MALLCTL NAMESPACE .Sh MALLCTL NAMESPACE
The following names are defined in the namespace accessible via the The following names are defined in the namespace accessible via the
.Fn @jemalloc_prefix@mallctl* .Fn @jemalloc_prefix@mallctl*
@ -1412,6 +1523,7 @@ is likely to result in a crash or deadlock.
All messages are prefixed by All messages are prefixed by
.Dq <jemalloc>: . .Dq <jemalloc>: .
.Sh RETURN VALUES .Sh RETURN VALUES
.Ss Standard API
The The
.Fn @jemalloc_prefix@malloc .Fn @jemalloc_prefix@malloc
and and
@ -1460,7 +1572,7 @@ when an error occurs.
The The
.Fn @jemalloc_prefix@free .Fn @jemalloc_prefix@free
function returns no value. function returns no value.
.Pp .Ss Non-standard API
The The
.Fn @jemalloc_prefix@malloc_usable_size .Fn @jemalloc_prefix@malloc_usable_size
function returns the usable size of the allocation pointed to by function returns the usable size of the allocation pointed to by
@ -1502,6 +1614,47 @@ An interface with side effects failed in some way not directly related to
.Fn @jemalloc_prefix@mallctl* .Fn @jemalloc_prefix@mallctl*
read/write processing. read/write processing.
.El .El
.Ss Experimental API
The
.Fn @jemalloc_prefix@allocm ,
.Fn @jemalloc_prefix@rallocm ,
.Fn @jemalloc_prefix@sallocm ,
and
.Fn @jemalloc_prefix@dallocm
functions return
.Dv ALLOCM_SUCCESS
on success; otherwise they return an error value.
The
.Fn @jemalloc_prefix@allocm
and
.Fn @jemalloc_prefix@rallocm
functions will fail if:
.Bl -tag -width ".Dv ALLOCM_ERR_OOM"
.It ALLOCM_ERR_OOM
Out of memory.
Insufficient contiguous memory was available to service the allocation request.
The
.Fn @jemalloc_prefix@allocm
function additionally sets
.Fa *ptr
to
.Dv NULL ,
whereas the
.Fn @jemalloc_prefix@rallocm
function leaves
.Fa *ptr
unmodified.
.El
.Pp
The
.Fn @jemalloc_prefix@rallocm
function will also fail if:
.Bl -tag -width ".Dv ALLOCM_ERR_NOT_MOVED"
.It ALLOCM_ERR_NOT_MOVED
.Dv ALLOCM_NO_MOVE
was specified, but the reallocation request could not be serviced without
moving the object.
.El
.Sh ENVIRONMENT .Sh ENVIRONMENT
The following environment variables affect the execution of the allocation The following environment variables affect the execution of the allocation
functions: functions:

View File

@ -432,8 +432,8 @@ void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero); void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero); void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_malloc(size_t size, bool zero); void *arena_malloc(size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t alignment, size_t size, void *arena_palloc(arena_t *arena, size_t size, size_t alloc_size,
size_t alloc_size); size_t alignment, bool zero);
size_t arena_salloc(const void *ptr); size_t arena_salloc(const void *ptr);
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
void arena_prof_promoted(const void *ptr, size_t size); void arena_prof_promoted(const void *ptr, size_t size);
@ -449,7 +449,10 @@ void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats); malloc_large_stats_t *lstats);
#endif #endif
void *arena_ralloc(void *ptr, size_t size, size_t oldsize); void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero);
bool arena_new(arena_t *arena, unsigned ind); bool arena_new(arena_t *arena, unsigned ind);
bool arena_boot(void); bool arena_boot(void);

View File

@ -20,8 +20,11 @@ extern size_t huge_allocated;
extern malloc_mutex_t huge_mtx; extern malloc_mutex_t huge_mtx;
void *huge_malloc(size_t size, bool zero); void *huge_malloc(size_t size, bool zero);
void *huge_palloc(size_t alignment, size_t size); void *huge_palloc(size_t size, size_t alignment, bool zero);
void *huge_ralloc(void *ptr, size_t size, size_t oldsize); void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero);
void huge_dalloc(void *ptr); void huge_dalloc(void *ptr);
size_t huge_salloc(const void *ptr); size_t huge_salloc(const void *ptr);
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF

View File

@ -84,6 +84,8 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
/******************************************************************************/ /******************************************************************************/
#define JEMALLOC_H_TYPES #define JEMALLOC_H_TYPES
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
#define ZU(z) ((size_t)z) #define ZU(z) ((size_t)z)
#ifndef __DECONST #ifndef __DECONST
@ -391,12 +393,13 @@ choose_arena(void)
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
void *imalloc(size_t size); void *imalloc(size_t size);
void *icalloc(size_t size); void *icalloc(size_t size);
void *ipalloc(size_t alignment, size_t size); void *ipalloc(size_t size, size_t alignment, bool zero);
size_t isalloc(const void *ptr); size_t isalloc(const void *ptr);
# ifdef JEMALLOC_IVSALLOC # ifdef JEMALLOC_IVSALLOC
size_t ivsalloc(const void *ptr); size_t ivsalloc(const void *ptr);
# endif # endif
void *iralloc(void *ptr, size_t size); void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
void idalloc(void *ptr); void idalloc(void *ptr);
#endif #endif
@ -424,7 +427,7 @@ icalloc(size_t size)
} }
JEMALLOC_INLINE void * JEMALLOC_INLINE void *
ipalloc(size_t alignment, size_t size) ipalloc(size_t size, size_t alignment, bool zero)
{ {
void *ret; void *ret;
size_t ceil_size; size_t ceil_size;
@ -459,7 +462,7 @@ ipalloc(size_t alignment, size_t size)
if (ceil_size <= PAGE_SIZE || (alignment <= PAGE_SIZE if (ceil_size <= PAGE_SIZE || (alignment <= PAGE_SIZE
&& ceil_size <= arena_maxclass)) && ceil_size <= arena_maxclass))
ret = arena_malloc(ceil_size, false); ret = arena_malloc(ceil_size, zero);
else { else {
size_t run_size; size_t run_size;
@ -506,12 +509,12 @@ ipalloc(size_t alignment, size_t size)
} }
if (run_size <= arena_maxclass) { if (run_size <= arena_maxclass) {
ret = arena_palloc(choose_arena(), alignment, ceil_size, ret = arena_palloc(choose_arena(), ceil_size, run_size,
run_size); alignment, zero);
} else if (alignment <= chunksize) } else if (alignment <= chunksize)
ret = huge_malloc(ceil_size, false); ret = huge_malloc(ceil_size, zero);
else else
ret = huge_palloc(alignment, ceil_size); ret = huge_palloc(ceil_size, alignment, zero);
} }
assert(((uintptr_t)ret & (alignment - 1)) == 0); assert(((uintptr_t)ret & (alignment - 1)) == 0);
@ -556,8 +559,10 @@ ivsalloc(const void *ptr)
#endif #endif
JEMALLOC_INLINE void * JEMALLOC_INLINE void *
iralloc(void *ptr, size_t size) iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move)
{ {
void *ret;
size_t oldsize; size_t oldsize;
assert(ptr != NULL); assert(ptr != NULL);
@ -565,10 +570,53 @@ iralloc(void *ptr, size_t size)
oldsize = isalloc(ptr); oldsize = isalloc(ptr);
if (size <= arena_maxclass) if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
return (arena_ralloc(ptr, size, oldsize)); != 0) {
else size_t copysize;
return (huge_ralloc(ptr, size, oldsize));
/*
* Existing object alignment is inadquate; allocate new space
* and copy.
*/
if (no_move)
return (NULL);
ret = ipalloc(size + extra, alignment, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
ret = ipalloc(size, alignment, zero);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller
* has no expectation that the extra bytes will be reliably
* preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
idalloc(ptr);
return (ret);
}
if (no_move) {
if (size <= arena_maxclass) {
return (arena_ralloc_no_move(ptr, oldsize, size,
extra, zero));
} else {
return (huge_ralloc_no_move(ptr, oldsize, size,
extra));
}
} else {
if (size + extra <= arena_maxclass) {
return (arena_ralloc(ptr, oldsize, size, extra,
alignment, zero));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero));
}
}
} }
JEMALLOC_INLINE void JEMALLOC_INLINE void

View File

@ -4,6 +4,8 @@
extern "C" { extern "C" {
#endif #endif
#include <strings.h>
#define JEMALLOC_VERSION "@jemalloc_version@" #define JEMALLOC_VERSION "@jemalloc_version@"
#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ #define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ #define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
@ -16,6 +18,19 @@ extern "C" {
# define JEMALLOC_P(s) s # define JEMALLOC_P(s) s
#endif #endif
#define ALLOCM_LG_ALIGN ((int)0x3f)
#if LG_SIZEOF_PTR == 2
#define ALLOCM_ALIGN(a) (ffs(a)-1)
#else
#define ALLOCM_ALIGN(a) ((a < (size_t)MAX_INT) ? ffs(a)-1 : ffs(a>>32)+31)
#endif
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80)
#define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1
#define ALLOCM_ERR_NOT_MOVED 2
extern const char *JEMALLOC_P(malloc_options); extern const char *JEMALLOC_P(malloc_options);
extern void (*JEMALLOC_P(malloc_message))(void *, const char *); extern void (*JEMALLOC_P(malloc_message))(void *, const char *);
@ -36,6 +51,12 @@ int JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp,
int JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp, int JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen); size_t *oldlenp, void *newp, size_t newlen);
int JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags);
int JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags);
int JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags);
int JEMALLOC_P(dallocm)(void *ptr, int flags);
#ifdef __cplusplus #ifdef __cplusplus
}; };
#endif #endif

View File

@ -177,10 +177,11 @@ static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin); arena_run_t *run, arena_bin_t *bin);
static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t size, size_t oldsize); void *ptr, size_t oldsize, size_t size);
static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t size, size_t oldsize); void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
static bool arena_ralloc_large(void *ptr, size_t size, size_t oldsize); static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
static bool small_size2bin_init(void); static bool small_size2bin_init(void);
#ifdef JEMALLOC_DEBUG #ifdef JEMALLOC_DEBUG
static void small_size2bin_validate(void); static void small_size2bin_validate(void);
@ -1438,7 +1439,8 @@ arena_malloc(size_t size, bool zero)
/* Only handles large allocations that require more than page alignment. */ /* Only handles large allocations that require more than page alignment. */
void * void *
arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size) arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment,
bool zero)
{ {
void *ret; void *ret;
size_t offset; size_t offset;
@ -1448,7 +1450,7 @@ arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
assert((alignment & PAGE_MASK) == 0); assert((alignment & PAGE_MASK) == 0);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
ret = (void *)arena_run_alloc(arena, alloc_size, true, false); ret = (void *)arena_run_alloc(arena, alloc_size, true, zero);
if (ret == NULL) { if (ret == NULL) {
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
return (NULL); return (NULL);
@ -1496,10 +1498,12 @@ arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
#ifdef JEMALLOC_FILL #ifdef JEMALLOC_FILL
if (opt_junk) if (zero == false) {
memset(ret, 0xa5, size); if (opt_junk)
else if (opt_zero) memset(ret, 0xa5, size);
memset(ret, 0, size); else if (opt_zero)
memset(ret, 0, size);
}
#endif #endif
return (ret); return (ret);
} }
@ -1944,7 +1948,7 @@ arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
static void static void
arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t size, size_t oldsize) size_t oldsize, size_t size)
{ {
assert(size < oldsize); assert(size < oldsize);
@ -1979,27 +1983,29 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
static bool static bool
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t size, size_t oldsize) size_t oldsize, size_t size, size_t extra, bool zero)
{ {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
size_t npages = oldsize >> PAGE_SHIFT; size_t npages = oldsize >> PAGE_SHIFT;
size_t followsize;
assert(oldsize == (chunk->map[pageind].bits & ~PAGE_MASK)); assert(oldsize == (chunk->map[pageind].bits & ~PAGE_MASK));
/* Try to extend the run. */ /* Try to extend the run. */
assert(size > oldsize); assert(size + extra > oldsize);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
& CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits & & CHUNK_MAP_ALLOCATED) == 0 && (followsize =
~PAGE_MASK) >= size - oldsize) { chunk->map[pageind+npages].bits & ~PAGE_MASK) >= size - oldsize) {
/* /*
* The next run is available and sufficiently large. Split the * The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing * following run, then merge the first part with the existing
* allocation. * allocation.
*/ */
size_t splitsize = (oldsize + followsize <= size + extra)
? followsize : size + extra - oldsize;
arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk + arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
((pageind+npages) << PAGE_SHIFT)), size - oldsize, true, ((pageind+npages) << PAGE_SHIFT)), splitsize, true, zero);
false);
chunk->map[pageind].bits = size | CHUNK_MAP_LARGE | chunk->map[pageind].bits = size | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED; CHUNK_MAP_ALLOCATED;
@ -2037,11 +2043,12 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
* always fail if growing an object, and the following run is already in use. * always fail if growing an object, and the following run is already in use.
*/ */
static bool static bool
arena_ralloc_large(void *ptr, size_t size, size_t oldsize) arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{ {
size_t psize; size_t psize;
psize = PAGE_CEILING(size); psize = PAGE_CEILING(size + extra);
if (psize == oldsize) { if (psize == oldsize) {
/* Same size class. */ /* Same size class. */
#ifdef JEMALLOC_FILL #ifdef JEMALLOC_FILL
@ -2067,14 +2074,15 @@ arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
oldsize - size); oldsize - size);
} }
#endif #endif
arena_ralloc_large_shrink(arena, chunk, ptr, psize, arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
oldsize); psize);
return (false); return (false);
} else { } else {
bool ret = arena_ralloc_large_grow(arena, chunk, ptr, bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
psize, oldsize); oldsize, PAGE_CEILING(size),
psize - PAGE_CEILING(size), zero);
#ifdef JEMALLOC_FILL #ifdef JEMALLOC_FILL
if (ret == false && opt_zero) { if (ret == false && zero == false && opt_zero) {
memset((void *)((uintptr_t)ptr + oldsize), 0, memset((void *)((uintptr_t)ptr + oldsize), 0,
size - oldsize); size - oldsize);
} }
@ -2085,49 +2093,89 @@ arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
} }
void * void *
arena_ralloc(void *ptr, size_t size, size_t oldsize) arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{ {
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */ /*
* Avoid moving the allocation if the size class can be left the same.
*/
if (oldsize <= arena_maxclass) { if (oldsize <= arena_maxclass) {
if (oldsize <= small_maxclass) { if (oldsize <= small_maxclass) {
if (size <= small_maxclass && small_size2bin[size] == assert(choose_arena()->bins[small_size2bin[
small_size2bin[oldsize]) oldsize]].reg_size == oldsize);
goto IN_PLACE; if ((size + extra <= small_maxclass &&
small_size2bin[size + extra] ==
small_size2bin[oldsize]) || (size <= oldsize &&
size + extra >= oldsize)) {
#ifdef JEMALLOC_FILL
if (opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size),
0x5a, oldsize - size);
}
#endif
return (ptr);
}
} else { } else {
assert(size <= arena_maxclass); assert(size <= arena_maxclass);
if (size > small_maxclass) { if (size + extra > small_maxclass) {
if (arena_ralloc_large(ptr, size, oldsize) == if (arena_ralloc_large(ptr, oldsize, size,
false) extra, zero) == false)
return (ptr); return (ptr);
} }
} }
} }
/* /* Reallocation would require a move. */
* If we get here, then size and oldsize are different enough that we return (NULL);
* need to move the object. In that case, fall back to allocating new }
* space and copying.
*/
ret = arena_malloc(size, false);
if (ret == NULL)
return (NULL);
/* Junk/zero-filling were already done by arena_malloc(). */ void *
arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
if (ret != NULL)
return (ret);
/*
* size and oldsize are different enough that we need to move the
* object. In that case, fall back to allocating new space and
* copying.
*/
if (alignment != 0)
ret = ipalloc(size + extra, alignment, zero);
else
ret = arena_malloc(size + extra, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment != 0)
ret = ipalloc(size, alignment, zero);
else
ret = arena_malloc(size, zero);
if (ret == NULL)
return (NULL);
}
/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize; copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
idalloc(ptr); idalloc(ptr);
return (ret); return (ret);
IN_PLACE:
#ifdef JEMALLOC_FILL
if (opt_junk && size < oldsize)
memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
else if (opt_zero && size > oldsize)
memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
#endif
return (ptr);
} }
bool bool

View File

@ -263,13 +263,12 @@ ckh_grow(ckh_t *ckh)
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
while (true) { while (true) {
lg_curcells++; lg_curcells++;
tab = (ckhc_t *) ipalloc((ZU(1) << LG_CACHELINE), tab = (ckhc_t *)ipalloc(sizeof(ckhc_t) << lg_curcells,
sizeof(ckhc_t) << lg_curcells); ZU(1) << LG_CACHELINE, true);
if (tab == NULL) { if (tab == NULL) {
ret = true; ret = true;
goto RETURN; goto RETURN;
} }
memset(tab, 0, sizeof(ckhc_t) << lg_curcells);
/* Swap in new table. */ /* Swap in new table. */
ttab = ckh->tab; ttab = ckh->tab;
ckh->tab = tab; ckh->tab = tab;
@ -305,8 +304,8 @@ ckh_shrink(ckh_t *ckh)
*/ */
lg_prevbuckets = ckh->lg_curbuckets; lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
tab = (ckhc_t *)ipalloc((ZU(1) << LG_CACHELINE), tab = (ckhc_t *)ipalloc(sizeof(ckhc_t) << lg_curcells,
sizeof(ckhc_t) << lg_curcells); ZU(1) << LG_CACHELINE, true);
if (tab == NULL) { if (tab == NULL) {
/* /*
* An OOM error isn't worth propagating, since it doesn't * An OOM error isn't worth propagating, since it doesn't
@ -314,7 +313,6 @@ ckh_shrink(ckh_t *ckh)
*/ */
return; return;
} }
memset(tab, 0, sizeof(ckhc_t) << lg_curcells);
/* Swap in new table. */ /* Swap in new table. */
ttab = ckh->tab; ttab = ckh->tab;
ckh->tab = tab; ckh->tab = tab;
@ -377,13 +375,12 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh->hash = hash; ckh->hash = hash;
ckh->keycomp = keycomp; ckh->keycomp = keycomp;
ckh->tab = (ckhc_t *)ipalloc((ZU(1) << LG_CACHELINE), ckh->tab = (ckhc_t *)ipalloc(sizeof(ckhc_t) << lg_mincells,
sizeof(ckhc_t) << lg_mincells); (ZU(1) << LG_CACHELINE), true);
if (ckh->tab == NULL) { if (ckh->tab == NULL) {
ret = true; ret = true;
goto RETURN; goto RETURN;
} }
memset(ckh->tab, 0, sizeof(ckhc_t) << lg_mincells);
#ifdef JEMALLOC_DEBUG #ifdef JEMALLOC_DEBUG
ckh->magic = CKH_MAGIG; ckh->magic = CKH_MAGIG;

View File

@ -69,12 +69,11 @@ huge_malloc(size_t size, bool zero)
/* Only handles large allocations that require more than chunk alignment. */ /* Only handles large allocations that require more than chunk alignment. */
void * void *
huge_palloc(size_t alignment, size_t size) huge_palloc(size_t size, size_t alignment, bool zero)
{ {
void *ret; void *ret;
size_t alloc_size, chunk_size, offset; size_t alloc_size, chunk_size, offset;
extent_node_t *node; extent_node_t *node;
bool zero;
/* /*
* This allocation requires alignment that is even larger than chunk * This allocation requires alignment that is even larger than chunk
@ -98,7 +97,6 @@ huge_palloc(size_t alignment, size_t size)
if (node == NULL) if (node == NULL)
return (NULL); return (NULL);
zero = false;
ret = chunk_alloc(alloc_size, false, &zero); ret = chunk_alloc(alloc_size, false, &zero);
if (ret == NULL) { if (ret == NULL) {
base_node_dealloc(node); base_node_dealloc(node);
@ -142,45 +140,80 @@ huge_palloc(size_t alignment, size_t size)
malloc_mutex_unlock(&huge_mtx); malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL #ifdef JEMALLOC_FILL
if (opt_junk) if (zero == false) {
memset(ret, 0xa5, chunk_size); if (opt_junk)
else if (opt_zero) memset(ret, 0xa5, chunk_size);
memset(ret, 0, chunk_size); else if (opt_zero)
memset(ret, 0, chunk_size);
}
#endif #endif
return (ret); return (ret);
} }
void * void *
huge_ralloc(void *ptr, size_t size, size_t oldsize) huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
{ {
void *ret;
size_t copysize;
/* Avoid moving the allocation if the size class would not change. */ /*
if (oldsize > arena_maxclass && * Avoid moving the allocation if the size class can be left the same.
CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) { */
if (oldsize > arena_maxclass
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
#ifdef JEMALLOC_FILL #ifdef JEMALLOC_FILL
if (opt_junk && size < oldsize) { if (opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize memset((void *)((uintptr_t)ptr + size), 0x5a,
- size); oldsize - size);
} else if (opt_zero && size > oldsize) {
memset((void *)((uintptr_t)ptr + oldsize), 0, size
- oldsize);
} }
#endif #endif
return (ptr); return (ptr);
} }
/* /* Reallocation would require a move. */
* If we get here, then size and oldsize are different enough that we return (NULL);
* need to use a different size class. In that case, fall back to }
* allocating new space and copying.
*/
ret = huge_malloc(size, false);
if (ret == NULL)
return (NULL);
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
if (ret != NULL)
return (ret);
/*
* size and oldsize are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
if (alignment != 0)
ret = huge_palloc(size + extra, alignment, zero);
else
ret = huge_malloc(size + extra, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment != 0)
ret = huge_palloc(size, alignment, zero);
else
ret = huge_malloc(size, zero);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize; copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
idalloc(ptr); idalloc(ptr);

View File

@ -854,18 +854,20 @@ JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
} else { } else {
if (prof_promote && (uintptr_t)cnt != if (prof_promote && (uintptr_t)cnt !=
(uintptr_t)1U && size <= small_maxclass) { (uintptr_t)1U && size <= small_maxclass) {
result = ipalloc(alignment, result = ipalloc(small_maxclass+1,
small_maxclass+1); alignment, false);
if (result != NULL) { if (result != NULL) {
arena_prof_promoted(result, arena_prof_promoted(result,
size); size);
} }
} else } else {
result = ipalloc(alignment, size); result = ipalloc(size, alignment,
false);
}
} }
} else } else
#endif #endif
result = ipalloc(alignment, size); result = ipalloc(size, alignment, false);
} }
if (result == NULL) { if (result == NULL) {
@ -1023,14 +1025,15 @@ JEMALLOC_P(realloc)(void *ptr, size_t size)
} }
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
size <= small_maxclass) { size <= small_maxclass) {
ret = iralloc(ptr, small_maxclass+1); ret = iralloc(ptr, small_maxclass+1, 0, 0,
false, false);
if (ret != NULL) if (ret != NULL)
arena_prof_promoted(ret, size); arena_prof_promoted(ret, size);
} else } else
ret = iralloc(ptr, size); ret = iralloc(ptr, size, 0, 0, false, false);
} else } else
#endif #endif
ret = iralloc(ptr, size); ret = iralloc(ptr, size, 0, 0, false, false);
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
OOM: OOM:
@ -1133,6 +1136,8 @@ JEMALLOC_P(malloc_usable_size)(const void *ptr)
{ {
size_t ret; size_t ret;
assert(malloc_initialized || malloc_initializer == pthread_self());
#ifdef JEMALLOC_IVSALLOC #ifdef JEMALLOC_IVSALLOC
ret = ivsalloc(ptr); ret = ivsalloc(ptr);
#else #else
@ -1204,6 +1209,184 @@ JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
} }
JEMALLOC_INLINE void *
iallocm(size_t size, size_t alignment, bool zero)
{
if (alignment != 0)
return (ipalloc(size, alignment, zero));
else if (zero)
return (icalloc(size));
else
return (imalloc(size));
}
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
{
void *p;
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
#ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt;
#endif
assert(ptr != NULL);
assert(size != 0);
if (malloc_init())
goto OOM;
#ifdef JEMALLOC_PROF
if (opt_prof) {
if ((cnt = prof_alloc_prep(size)) == NULL)
goto OOM;
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && size <=
small_maxclass) {
p = iallocm(small_maxclass+1, alignment, zero);
if (p == NULL)
goto OOM;
arena_prof_promoted(p, size);
} else {
p = iallocm(size, alignment, zero);
if (p == NULL)
goto OOM;
}
} else
#endif
{
p = iallocm(size, alignment, zero);
if (p == NULL)
goto OOM;
}
*ptr = p;
if (rsize != NULL)
*rsize = isalloc(p);
return (ALLOCM_SUCCESS);
OOM:
#ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in allocm(): "
"out of memory\n");
abort();
}
#endif
*ptr = NULL;
return (ALLOCM_ERR_OOM);
}
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
int flags)
{
void *p, *q;
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
bool no_move = flags & ALLOCM_NO_MOVE;
#ifdef JEMALLOC_PROF
size_t old_size;
prof_thr_cnt_t *cnt;
prof_ctx_t *old_ctx;
#endif
assert(ptr != NULL);
assert(*ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || malloc_initializer == pthread_self());
p = *ptr;
#ifdef JEMALLOC_PROF
if (opt_prof) {
old_size = isalloc(p);
old_ctx = prof_ctx_get(p);
if ((cnt = prof_alloc_prep(size)) == NULL)
goto OOM;
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && size <=
small_maxclass) {
q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
size+extra) ? 0 : size+extra - (small_maxclass+1),
alignment, zero, no_move);
if (q == NULL)
goto ERR;
arena_prof_promoted(q, size);
} else {
q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL)
goto ERR;
}
prof_realloc(q, cnt, p, old_size, old_ctx);
} else
#endif
{
q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL)
goto ERR;
}
*ptr = q;
if (rsize != NULL)
*rsize = isalloc(q);
return (ALLOCM_SUCCESS);
ERR:
if (no_move)
return (ALLOCM_ERR_NOT_MOVED);
#ifdef JEMALLOC_PROF
OOM:
#endif
#ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in rallocm(): "
"out of memory\n");
abort();
}
#endif
return (ALLOCM_ERR_OOM);
}
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
{
size_t sz;
assert(malloc_initialized || malloc_initializer == pthread_self());
#ifdef JEMALLOC_IVSALLOC
sz = ivsalloc(ptr);
#else
assert(ptr != NULL);
sz = isalloc(ptr);
#endif
assert(rsize != NULL);
*rsize = sz;
return (ALLOCM_SUCCESS);
}
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(dallocm)(void *ptr, int flags)
{
assert(ptr != NULL);
assert(malloc_initialized || malloc_initializer == pthread_self());
#ifdef JEMALLOC_PROF
if (opt_prof)
prof_free(ptr);
#endif
idalloc(ptr);
return (ALLOCM_SUCCESS);
}
/* /*
* End non-standard functions. * End non-standard functions.
*/ */

View File

@ -90,7 +90,7 @@ prof_sample_state_t prof_sample_state_oom;
r = (prof_sample_state_t *)pthread_getspecific( \ r = (prof_sample_state_t *)pthread_getspecific( \
prof_sample_state_tsd); \ prof_sample_state_tsd); \
if (r == NULL) { \ if (r == NULL) { \
r = ipalloc(CACHELINE, sizeof(prof_sample_state_t)); \ r = ipalloc(sizeof(prof_sample_state_t), CACHELINE); \
if (r == NULL) { \ if (r == NULL) { \
malloc_write("<jemalloc>: Error in heap " \ malloc_write("<jemalloc>: Error in heap " \
"profiler: out of memory; subsequent heap " \ "profiler: out of memory; subsequent heap " \

View File

@ -209,7 +209,9 @@ tcache_create(arena_t *arena)
* Round up to the nearest multiple of the cacheline size, in order to * Round up to the nearest multiple of the cacheline size, in order to
* avoid the possibility of false cacheline sharing. * avoid the possibility of false cacheline sharing.
* *
* That this works relies on the same logic as in ipalloc(). * That this works relies on the same logic as in ipalloc(), but we
* cannot directly call ipalloc() here due to tcache bootstrapping
* issues.
*/ */
size = (size + CACHELINE_MASK) & (-CACHELINE); size = (size + CACHELINE_MASK) & (-CACHELINE);

133
jemalloc/test/allocm.c Normal file
View File

@ -0,0 +1,133 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#define JEMALLOC_MANGLE
#include "jemalloc/jemalloc.h"
#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)0x80000000000LLU) */
#define MAXALIGN ((size_t)0x2000000LLU)
#define NITER 4
int
main(void)
{
int r;
void *p;
size_t sz, alignment, total, tsz;
unsigned i;
void *ps[NITER];
fprintf(stderr, "Test begin\n");
sz = 0;
r = JEMALLOC_P(allocm)(&p, &sz, 42, 0);
if (r != ALLOCM_SUCCESS) {
fprintf(stderr, "Unexpected allocm() error\n");
abort();
}
if (sz < 42)
fprintf(stderr, "Real size smaller than expected\n");
if (JEMALLOC_P(dallocm)(p, 0) != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected dallocm() error\n");
r = JEMALLOC_P(allocm)(&p, NULL, 42, 0);
if (r != ALLOCM_SUCCESS) {
fprintf(stderr, "Unexpected allocm() error\n");
abort();
}
if (JEMALLOC_P(dallocm)(p, 0) != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected dallocm() error\n");
r = JEMALLOC_P(allocm)(&p, NULL, 42, ALLOCM_ZERO);
if (r != ALLOCM_SUCCESS) {
fprintf(stderr, "Unexpected allocm() error\n");
abort();
}
if (JEMALLOC_P(dallocm)(p, 0) != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected dallocm() error\n");
#if LG_SIZEOF_PTR == 3
alignment = 0x8000000000000000LLU;
sz = 0x8000000000000000LLU;
#else
alignment = 0x80000000LU;
sz = 0x80000000LU;
#endif
r = JEMALLOC_P(allocm)(&p, NULL, sz, ALLOCM_ALIGN(alignment));
if (r == ALLOCM_SUCCESS) {
fprintf(stderr,
"Expected error for allocm(&p, %zu, 0x%x)\n",
sz, ALLOCM_ALIGN(alignment));
}
#if LG_SIZEOF_PTR == 3
alignment = 0x4000000000000000LLU;
sz = 0x8400000000000001LLU;
#else
alignment = 0x40000000LU;
sz = 0x84000001LU;
#endif
r = JEMALLOC_P(allocm)(&p, NULL, sz, ALLOCM_ALIGN(alignment));
if (r == ALLOCM_SUCCESS) {
fprintf(stderr,
"Expected error for allocm(&p, %zu, 0x%x)\n",
sz, ALLOCM_ALIGN(alignment));
}
alignment = 0x10LLU;
#if LG_SIZEOF_PTR == 3
sz = 0xfffffffffffffff0LLU;
#else
sz = 0xfffffff0LU;
#endif
r = JEMALLOC_P(allocm)(&p, NULL, sz, ALLOCM_ALIGN(alignment));
if (r == ALLOCM_SUCCESS) {
fprintf(stderr,
"Expected error for allocm(&p, %zu, 0x%x)\n",
sz, ALLOCM_ALIGN(alignment));
}
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
fprintf(stderr, "Alignment: %zu\n", alignment);
for (sz = 1;
sz < 3 * alignment && sz < (1U << 31);
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
r = JEMALLOC_P(allocm)(&ps[i], NULL, sz,
ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
if (r != ALLOCM_SUCCESS) {
fprintf(stderr,
"Error for size %zu (0x%zx): %d\n",
sz, sz, r);
exit(1);
}
if ((uintptr_t)p & (alignment-1)) {
fprintf(stderr,
"%p inadequately aligned for"
" alignment: %zu\n", p, alignment);
}
JEMALLOC_P(sallocm)(ps[i], &tsz, 0);
total += tsz;
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
JEMALLOC_P(dallocm)(ps[i], 0);
ps[i] = NULL;
}
}
}
}
fprintf(stderr, "Test end\n");
return (0);
}

25
jemalloc/test/allocm.exp Normal file
View File

@ -0,0 +1,25 @@
Test begin
Alignment: 8
Alignment: 16
Alignment: 32
Alignment: 64
Alignment: 128
Alignment: 256
Alignment: 512
Alignment: 1024
Alignment: 2048
Alignment: 4096
Alignment: 8192
Alignment: 16384
Alignment: 32768
Alignment: 65536
Alignment: 131072
Alignment: 262144
Alignment: 524288
Alignment: 1048576
Alignment: 2097152
Alignment: 4194304
Alignment: 8388608
Alignment: 16777216
Alignment: 33554432
Test end

View File

@ -7,7 +7,7 @@
#define JEMALLOC_MANGLE #define JEMALLOC_MANGLE
#include "jemalloc/jemalloc.h" #include "jemalloc/jemalloc.h"
#define CHUNK 0x100000 #define CHUNK 0x400000
/* #define MAXALIGN ((size_t)0x80000000000LLU) */ /* #define MAXALIGN ((size_t)0x80000000000LLU) */
#define MAXALIGN ((size_t)0x2000000LLU) #define MAXALIGN ((size_t)0x2000000LLU)
#define NITER 4 #define NITER 4
@ -117,6 +117,5 @@ main(void)
} }
fprintf(stderr, "Test end\n"); fprintf(stderr, "Test end\n");
return (0);
return 0;
} }

117
jemalloc/test/rallocm.c Normal file
View File

@ -0,0 +1,117 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define JEMALLOC_MANGLE
#include "jemalloc/jemalloc.h"
int
main(void)
{
void *p, *q;
size_t sz, tsz;
int r;
fprintf(stderr, "Test begin\n");
r = allocm(&p, &sz, 42, 0);
if (r != ALLOCM_SUCCESS) {
fprintf(stderr, "Unexpected allocm() error\n");
abort();
}
q = p;
r = rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q != p)
fprintf(stderr, "Unexpected object move\n");
if (tsz != sz) {
fprintf(stderr, "Unexpected size change: %zu --> %zu\n",
sz, tsz);
}
q = p;
r = rallocm(&q, &tsz, sz, 5, ALLOCM_NO_MOVE);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q != p)
fprintf(stderr, "Unexpected object move\n");
if (tsz != sz) {
fprintf(stderr, "Unexpected size change: %zu --> %zu\n",
sz, tsz);
}
q = p;
r = rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE);
if (r != ALLOCM_ERR_NOT_MOVED)
fprintf(stderr, "Unexpected rallocm() result\n");
if (q != p)
fprintf(stderr, "Unexpected object move\n");
if (tsz != sz) {
fprintf(stderr, "Unexpected size change: %zu --> %zu\n",
sz, tsz);
}
q = p;
r = rallocm(&q, &tsz, sz + 5, 0, 0);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q == p)
fprintf(stderr, "Expected object move\n");
if (tsz == sz) {
fprintf(stderr, "Expected size change: %zu --> %zu\n",
sz, tsz);
}
p = q;
sz = tsz;
r = rallocm(&q, &tsz, 8192, 0, 0);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q == p)
fprintf(stderr, "Expected object move\n");
if (tsz == sz) {
fprintf(stderr, "Expected size change: %zu --> %zu\n",
sz, tsz);
}
p = q;
sz = tsz;
r = rallocm(&q, &tsz, 16384, 0, 0);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (tsz == sz) {
fprintf(stderr, "Expected size change: %zu --> %zu\n",
sz, tsz);
}
p = q;
sz = tsz;
r = rallocm(&q, &tsz, 8192, 0, ALLOCM_NO_MOVE);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q != p)
fprintf(stderr, "Unexpected object move\n");
if (tsz == sz) {
fprintf(stderr, "Expected size change: %zu --> %zu\n",
sz, tsz);
}
sz = tsz;
r = rallocm(&q, &tsz, 16384, 0, ALLOCM_NO_MOVE);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q != p)
fprintf(stderr, "Unexpected object move\n");
if (tsz == sz) {
fprintf(stderr, "Expected size change: %zu --> %zu\n",
sz, tsz);
}
sz = tsz;
dallocm(p, 0);
fprintf(stderr, "Test end\n");
return (0);
}

View File

@ -0,0 +1,2 @@
Test begin
Test end