diff --git a/Makefile.in b/Makefile.in index cd137fd9..af60a21c 100644 --- a/Makefile.in +++ b/Makefile.in @@ -112,13 +112,16 @@ TESTS_UNIT := $(srcroot)test/unit/bitmap.c $(srcroot)test/unit/math.c \ $(srcroot)test/unit/SFMT.c $(srcroot)test/unit/tsd.c TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ $(srcroot)test/integration/allocated.c \ - $(srcroot)test/integration/ALLOCM_ARENA.c \ + $(srcroot)test/integration/mallocx.c \ $(srcroot)test/integration/mremap.c \ $(srcroot)test/integration/posix_memalign.c \ + $(srcroot)test/integration/rallocx.c \ $(srcroot)test/integration/thread_arena.c \ - $(srcroot)test/integration/thread_tcache_enabled.c + $(srcroot)test/integration/thread_tcache_enabled.c \ + $(srcroot)test/integration/xallocx.c ifeq ($(enable_experimental), 1) TESTS_INTEGRATION += $(srcroot)test/integration/allocm.c \ + $(srcroot)test/integration/ALLOCM_ARENA.c \ $(srcroot)test/integration/rallocm.c endif TESTS_STRESS := diff --git a/configure.ac b/configure.ac index 02842b63..724bc1a2 100644 --- a/configure.ac +++ b/configure.ac @@ -417,7 +417,7 @@ AC_PROG_RANLIB AC_PATH_PROG([LD], [ld], [false], [$PATH]) AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) -public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free malloc_usable_size malloc_stats_print mallctl mallctlnametomib mallctlbymib" +public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size" dnl Check for allocator-related functions that should be wrapped. AC_CHECK_FUNC([memalign], diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index 596f6458..d6f72722 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -33,11 +33,17 @@ aligned_alloc realloc free - malloc_usable_size - malloc_stats_print + mallocx + rallocx + xallocx + sallocx + dallocx + nallocx mallctl mallctlnametomib mallctlbymib + malloc_stats_print + malloc_usable_size allocm rallocm sallocm @@ -92,16 +98,37 @@ Non-standard API - size_t malloc_usable_size - const void *ptr + void *mallocx + size_t size + int flags - void malloc_stats_print - void (*write_cb) - void *, const char * - - void *cbopaque - const char *opts + void *rallocx + void *ptr + size_t size + int flags + + + size_t xallocx + void *ptr + size_t size + size_t extra + int flags + + + size_t sallocx + void *ptr + int flags + + + void dallocx + void *ptr + int flags + + + size_t nallocx + size_t size + int flags int mallctl @@ -126,6 +153,18 @@ void *newp size_t newlen + + void malloc_stats_print + void (*write_cb) + void *, const char * + + void *cbopaque + const char *opts + + + size_t malloc_usable_size + const void *ptr + void (*malloc_message) void *cbopaque @@ -225,41 +264,99 @@ Non-standard API + The mallocx, + rallocx, + xallocx, + sallocx, + dallocx, and + nallocx functions all have a + flags argument that can be used to specify + options. The functions only check the options that are contextually + relevant. Use bitwise or (|) operations to + specify one or more of the following: + + + MALLOCX_LG_ALIGN(la) + - The malloc_usable_size function - returns the usable size of the allocation pointed to by - ptr. The return value may be larger than the size - that was requested during allocation. The - malloc_usable_size function is not a - mechanism for in-place realloc; rather - it is provided solely as a tool for introspection purposes. Any - discrepancy between the requested allocation size and the size reported - by malloc_usable_size should not be - depended on, since such behavior is entirely implementation-dependent. + Align the memory allocation to start at an address + that is a multiple of (1 << + la). This macro does not validate + that la is within the valid + range. + + + MALLOCX_ALIGN(a) + + + Align the memory allocation to start at an address + that is a multiple of a, where + a is a power of two. This macro does not + validate that a is a power of 2. + + + + MALLOCX_ZERO + + Initialize newly allocated memory to contain zero + bytes. In the growing reallocation case, the real size prior to + reallocation defines the boundary between untouched bytes and those + that are initialized to contain zero bytes. If this macro is + absent, newly allocated memory is uninitialized. + + + MALLOCX_ARENA(a) + + + Use the arena specified by the index + a (and by necessity bypass the thread + cache). This macro has no effect for huge regions, nor for regions + that were allocated via an arena other than the one specified. + This macro does not validate that a + specifies an arena index in the valid range. + + - The malloc_stats_print function - writes human-readable summary statistics via the - write_cb callback function pointer and - cbopaque data passed to - write_cb, or - malloc_message if - write_cb is NULL. This - function can be called repeatedly. General information that never - changes during execution can be omitted by specifying "g" as a character - within the opts string. Note that - malloc_message uses the - mallctl* functions internally, so - inconsistent statistics can be reported if multiple threads use these - functions simultaneously. If is - specified during configuration, “m” and “a” can - be specified to omit merged arena and per arena statistics, respectively; - “b” and “l” can be specified to omit per size - class statistics for bins and large objects, respectively. Unrecognized - characters are silently ignored. Note that thread caching may prevent - some statistics from being completely up to date, since extra locking - would be required to merge counters that track thread cache operations. - + The mallocx function allocates at + least size bytes of memory, and returns a pointer + to the base address of the allocation. Behavior is undefined if + size is 0. + + The rallocx function resizes the + allocation at ptr to be at least + size bytes, and returns a pointer to the base + address of the resulting allocation, which may or may not have moved from + its original location. Behavior is undefined if + size is 0. + + The xallocx function resizes the + allocation at ptr in place to be at least + size bytes, and returns the real size of the + allocation. If extra is non-zero, an attempt is + made to resize the allocation to be at least (size + + extra) bytes, though inability to allocate + the extra byte(s) will not by itself result in failure to resize. + Behavior is undefined if size is + 0, or if (size + extra + > SIZE_T_MAX). + + The sallocx function returns the + real size of the allocation at ptr. + + The dallocx function causes the + memory referenced by ptr to be made available for + future allocations. + + The nallocx function allocates no + memory, but it performs the same size computation as the + mallocx function, and returns the real + size of the allocation that would result from the equivalent + mallocx function call. Behavior is + undefined if size is + 0. The mallctl function provides a general interface for introspecting the memory allocator, as well as @@ -314,6 +411,41 @@ for (i = 0; i < nbins; i++) { mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); /* Do something with bin_size... */ }]]> + + The malloc_stats_print function + writes human-readable summary statistics via the + write_cb callback function pointer and + cbopaque data passed to + write_cb, or + malloc_message if + write_cb is NULL. This + function can be called repeatedly. General information that never + changes during execution can be omitted by specifying "g" as a character + within the opts string. Note that + malloc_message uses the + mallctl* functions internally, so + inconsistent statistics can be reported if multiple threads use these + functions simultaneously. If is + specified during configuration, “m” and “a” can + be specified to omit merged arena and per arena statistics, respectively; + “b” and “l” can be specified to omit per size + class statistics for bins and large objects, respectively. Unrecognized + characters are silently ignored. Note that thread caching may prevent + some statistics from being completely up to date, since extra locking + would be required to merge counters that track thread cache operations. + + + The malloc_usable_size function + returns the usable size of the allocation pointed to by + ptr. The return value may be larger than the size + that was requested during allocation. The + malloc_usable_size function is not a + mechanism for in-place realloc; rather + it is provided solely as a tool for introspection purposes. Any + discrepancy between the requested allocation size and the size reported + by malloc_usable_size should not be + depended on, since such behavior is entirely implementation-dependent. + Experimental API @@ -398,7 +530,7 @@ for (i = 0; i < nbins; i++) { rsize is not NULL. If extra is non-zero, an attempt is made to resize the allocation to be at least size + + language="C">(size + extra) bytes, though inability to allocate the extra byte(s) will not by itself result in failure. Behavior is undefined if size is 0, or if @@ -936,7 +1068,8 @@ for (i = 0; i < nbins; i++) { Zero filling enabled/disabled. If enabled, each byte of uninitialized allocated memory will be initialized to 0. Note that this initialization only happens once for each byte, so - realloc and + realloc, + rallocx and rallocm calls do not zero memory that was previously allocated. This is intended for debugging and will impact performance negatively. This option is disabled by default. @@ -2039,9 +2172,26 @@ malloc_conf = "xmalloc:true";]]> Non-standard API - The malloc_usable_size function - returns the usable size of the allocation pointed to by - ptr. + The mallocx and + rallocx functions return a pointer to + the allocated memory if successful; otherwise a NULL + pointer is returned to indicate insufficient contiguous memory was + available to service the allocation request. + + The xallocx function returns the + real size of the resulting resized allocation pointed to by + ptr, which is a value less than + size if the allocation could not be adequately + grown in place. + + The sallocx function returns the + real size of the allocation pointed to by ptr. + + + The nallocx returns the real size + that would result from a successful equivalent + mallocx function call, or zero if + insufficient memory is available to perform the size computation. The mallctl, mallctlnametomib, and @@ -2092,6 +2242,10 @@ malloc_conf = "xmalloc:true";]]> + + The malloc_usable_size function + returns the usable size of the allocation pointed to by + ptr. Experimental API diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 3dd9761d..f380bbfb 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -228,6 +228,7 @@ static const bool config_ivsalloc = #include "jemalloc/internal/jemalloc_internal_macros.h" +#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) #define ALLOCM_LG_ALIGN_MASK ((int)0x3f) /* Smallest size class to support. */ @@ -731,22 +732,22 @@ choose_arena(arena_t *arena) #include "jemalloc/internal/quarantine.h" #ifndef JEMALLOC_ENABLE_INLINE -void *imallocx(size_t size, bool try_tcache, arena_t *arena); +void *imalloct(size_t size, bool try_tcache, arena_t *arena); void *imalloc(size_t size); -void *icallocx(size_t size, bool try_tcache, arena_t *arena); +void *icalloct(size_t size, bool try_tcache, arena_t *arena); void *icalloc(size_t size); -void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, +void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena); void *ipalloc(size_t usize, size_t alignment, bool zero); size_t isalloc(const void *ptr, bool demote); size_t ivsalloc(const void *ptr, bool demote); size_t u2rz(size_t usize); size_t p2rz(const void *ptr); -void idallocx(void *ptr, bool try_tcache); +void idalloct(void *ptr, bool try_tcache); void idalloc(void *ptr); -void iqallocx(void *ptr, bool try_tcache); +void iqalloct(void *ptr, bool try_tcache); void iqalloc(void *ptr); -void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment, +void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena); void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, @@ -756,7 +757,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) JEMALLOC_ALWAYS_INLINE void * -imallocx(size_t size, bool try_tcache, arena_t *arena) +imalloct(size_t size, bool try_tcache, arena_t *arena) { assert(size != 0); @@ -771,11 +772,11 @@ JEMALLOC_ALWAYS_INLINE void * imalloc(size_t size) { - return (imallocx(size, true, NULL)); + return (imalloct(size, true, NULL)); } JEMALLOC_ALWAYS_INLINE void * -icallocx(size_t size, bool try_tcache, arena_t *arena) +icalloct(size_t size, bool try_tcache, arena_t *arena) { if (size <= arena_maxclass) @@ -788,11 +789,11 @@ JEMALLOC_ALWAYS_INLINE void * icalloc(size_t size) { - return (icallocx(size, true, NULL)); + return (icalloct(size, true, NULL)); } JEMALLOC_ALWAYS_INLINE void * -ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, +ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena) { void *ret; @@ -820,7 +821,7 @@ JEMALLOC_ALWAYS_INLINE void * ipalloc(size_t usize, size_t alignment, bool zero) { - return (ipallocx(usize, alignment, zero, true, NULL)); + return (ipalloct(usize, alignment, zero, true, NULL)); } /* @@ -881,7 +882,7 @@ p2rz(const void *ptr) } JEMALLOC_ALWAYS_INLINE void -idallocx(void *ptr, bool try_tcache) +idalloct(void *ptr, bool try_tcache) { arena_chunk_t *chunk; @@ -898,28 +899,28 @@ JEMALLOC_ALWAYS_INLINE void idalloc(void *ptr) { - idallocx(ptr, true); + idalloct(ptr, true); } JEMALLOC_ALWAYS_INLINE void -iqallocx(void *ptr, bool try_tcache) +iqalloct(void *ptr, bool try_tcache) { if (config_fill && opt_quarantine) quarantine(ptr); else - idallocx(ptr, try_tcache); + idalloct(ptr, try_tcache); } JEMALLOC_ALWAYS_INLINE void iqalloc(void *ptr) { - iqallocx(ptr, true); + iqalloct(ptr, true); } JEMALLOC_ALWAYS_INLINE void * -irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, +iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) { void *ret; @@ -943,7 +944,7 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, usize = sa2u(size + extra, alignment); if (usize == 0) return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); + ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); if (ret == NULL) { if (extra == 0) return (NULL); @@ -951,7 +952,7 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, usize = sa2u(size, alignment); if (usize == 0) return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, + ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); if (ret == NULL) return (NULL); @@ -963,7 +964,7 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, */ copysize = (size < oldsize) ? size : oldsize; memcpy(ret, ptr, copysize); - iqallocx(ptr, try_tcache_dalloc); + iqalloct(ptr, try_tcache_dalloc); return (ret); } @@ -992,7 +993,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, bool no_move) { - return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true, + return (iralloct(ptr, size, extra, alignment, zero, no_move, true, true, NULL)); } diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index 9fbc625f..541e1b2c 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -207,17 +207,17 @@ huge_ralloc_no_move huge_salloc iallocm icalloc -icallocx +icalloct idalloc -idallocx +idalloct imalloc -imallocx +imalloct ipalloc -ipallocx +ipalloct iqalloc -iqallocx +iqalloct iralloc -irallocx +iralloct isalloc isthreaded ivsalloc diff --git a/include/jemalloc/internal/public_symbols.txt b/include/jemalloc/internal/public_symbols.txt index 7d097422..e27c0e5b 100644 --- a/include/jemalloc/internal/public_symbols.txt +++ b/include/jemalloc/internal/public_symbols.txt @@ -6,11 +6,17 @@ posix_memalign aligned_alloc realloc free -malloc_usable_size -malloc_stats_print +mallocx +rallocx +xallocx +sallocx +dallocx +nallocx mallctl mallctlnametomib mallctlbymib +malloc_stats_print +malloc_usable_size memalign valloc allocm diff --git a/include/jemalloc/jemalloc_macros.h.in b/include/jemalloc/jemalloc_macros.h.in index d1455319..9773bcbc 100644 --- a/include/jemalloc/jemalloc_macros.h.in +++ b/include/jemalloc/jemalloc_macros.h.in @@ -8,6 +8,17 @@ #define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ #define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" +# define MALLOCX_LG_ALIGN(la) (la) +# if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) (ffs(a)-1) +# else +# define MALLOCX_ALIGN(a) \ + ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) +# endif +# define MALLOCX_ZERO ((int)0x40) +/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */ +# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8)) + #ifdef JEMALLOC_EXPERIMENTAL # define ALLOCM_LG_ALIGN(la) (la) # if LG_SIZEOF_PTR == 2 @@ -39,11 +50,17 @@ # undef je_aligned_alloc # undef je_realloc # undef je_free -# undef je_malloc_usable_size -# undef je_malloc_stats_print +# undef je_mallocx +# undef je_rallocx +# undef je_xallocx +# undef je_sallocx +# undef je_dallocx +# undef je_nallocx # undef je_mallctl # undef je_mallctlnametomib # undef je_mallctlbymib +# undef je_malloc_stats_print +# undef je_malloc_usable_size # undef je_memalign # undef je_valloc # undef je_allocm diff --git a/include/jemalloc/jemalloc_mangle.h.in b/include/jemalloc/jemalloc_mangle.h.in index 215de9d0..7018a752 100644 --- a/include/jemalloc/jemalloc_mangle.h.in +++ b/include/jemalloc/jemalloc_mangle.h.in @@ -17,11 +17,17 @@ # define aligned_alloc je_aligned_alloc # define realloc je_realloc # define free je_free -# define malloc_usable_size je_malloc_usable_size -# define malloc_stats_print je_malloc_stats_print +# define mallocx je_mallocx +# define rallocx je_rallocx +# define xallocx je_xallocx +# define sallocx je_sallocx +# define dallocx je_dallocx +# define nallocx je_nallocx # define mallctl je_mallctl # define mallctlnametomib je_mallctlnametomib # define mallctlbymib je_mallctlbymib +# define malloc_stats_print je_malloc_stats_print +# define malloc_usable_size je_malloc_usable_size # define memalign je_memalign # define valloc je_valloc # ifdef JEMALLOC_EXPERIMENTAL @@ -56,6 +62,12 @@ # undef je_mallctlbymib # undef je_memalign # undef je_valloc +# undef je_mallocx +# undef je_rallocx +# undef je_xallocx +# undef je_sallocx +# undef je_dallocx +# undef je_nallocx # ifdef JEMALLOC_EXPERIMENTAL # undef je_allocm # undef je_rallocm diff --git a/include/jemalloc/jemalloc_protos.h.in b/include/jemalloc/jemalloc_protos.h.in index 3dad8596..25446de3 100644 --- a/include/jemalloc/jemalloc_protos.h.in +++ b/include/jemalloc/jemalloc_protos.h.in @@ -17,6 +17,25 @@ JEMALLOC_EXPORT void *@je_@aligned_alloc(size_t alignment, size_t size) JEMALLOC_EXPORT void *@je_@realloc(void *ptr, size_t size); JEMALLOC_EXPORT void @je_@free(void *ptr); +JEMALLOC_EXPORT void *@je_@mallocx(size_t size, int flags); +JEMALLOC_EXPORT void *@je_@rallocx(void *ptr, size_t size, int flags); +JEMALLOC_EXPORT size_t @je_@xallocx(void *ptr, size_t size, size_t extra, + int flags); +JEMALLOC_EXPORT size_t @je_@sallocx(const void *ptr, int flags); +JEMALLOC_EXPORT void @je_@dallocx(void *ptr, int flags); +JEMALLOC_EXPORT size_t @je_@nallocx(size_t size, int flags); + +JEMALLOC_EXPORT int @je_@mallctl(const char *name, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT int @je_@mallctlnametomib(const char *name, size_t *mibp, + size_t *miblenp); +JEMALLOC_EXPORT int @je_@mallctlbymib(const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void @je_@malloc_stats_print(void (*write_cb)(void *, + const char *), void *@je_@cbopaque, const char *opts); +JEMALLOC_EXPORT size_t @je_@malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr); + #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); @@ -26,17 +45,6 @@ JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size) JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc); #endif -JEMALLOC_EXPORT size_t @je_@malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr); -JEMALLOC_EXPORT void @je_@malloc_stats_print(void (*write_cb)(void *, - const char *), void *@je_@cbopaque, const char *opts); -JEMALLOC_EXPORT int @je_@mallctl(const char *name, void *oldp, - size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int @je_@mallctlnametomib(const char *name, size_t *mibp, - size_t *miblenp); -JEMALLOC_EXPORT int @je_@mallctlbymib(const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen); - #ifdef JEMALLOC_EXPERIMENTAL JEMALLOC_EXPORT int @je_@allocm(void **ptr, size_t *rsize, size_t size, int flags) JEMALLOC_ATTR(nonnull(1)); diff --git a/src/arena.c b/src/arena.c index 145de863..4a460130 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2031,7 +2031,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t usize = sa2u(size + extra, alignment); if (usize == 0) return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); + ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); } else ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); @@ -2043,7 +2043,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t usize = sa2u(size, alignment); if (usize == 0) return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, + ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); } else ret = arena_malloc(arena, size, zero, try_tcache_alloc); @@ -2061,7 +2061,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, copysize = (size < oldsize) ? size : oldsize; VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); memcpy(ret, ptr, copysize); - iqallocx(ptr, try_tcache_dalloc); + iqalloct(ptr, try_tcache_dalloc); return (ret); } diff --git a/src/huge.c b/src/huge.c index 443b4007..33fab684 100644 --- a/src/huge.c +++ b/src/huge.c @@ -181,7 +181,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, #endif { memcpy(ret, ptr, copysize); - iqallocx(ptr, try_tcache_dalloc); + iqalloct(ptr, try_tcache_dalloc); } return (ret); } diff --git a/src/jemalloc.c b/src/jemalloc.c index f13a7d8c..f8c8119d 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1337,28 +1337,363 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = * Begin non-standard functions. */ -size_t -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) +JEMALLOC_ALWAYS_INLINE_C void * +imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, + arena_t *arena) { - size_t ret; + + assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, + alignment))); + + if (alignment != 0) + return (ipalloct(usize, alignment, zero, try_tcache, arena)); + else if (zero) + return (icalloct(usize, try_tcache, arena)); + else + return (imalloct(usize, try_tcache, arena)); +} + +void * +je_mallocx(size_t size, int flags) +{ + void *p; + size_t usize; + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & MALLOCX_ZERO; + unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; + arena_t *arena; + bool try_tcache; + + assert(size != 0); + + if (malloc_init()) + goto label_oom; + + if (arena_ind != UINT_MAX) { + arena = arenas[arena_ind]; + try_tcache = false; + } else { + arena = NULL; + try_tcache = true; + } + + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + if (usize == 0) + goto label_oom; + + if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + + PROF_ALLOC_PREP(1, usize, cnt); + if (cnt == NULL) + goto label_oom; + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= + SMALL_MAXCLASS) { + size_t usize_promoted = (alignment == 0) ? + s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, + alignment); + assert(usize_promoted != 0); + p = imallocx(usize_promoted, alignment, zero, + try_tcache, arena); + if (p == NULL) + goto label_oom; + arena_prof_promoted(p, usize); + } else { + p = imallocx(usize, alignment, zero, try_tcache, arena); + if (p == NULL) + goto label_oom; + } + prof_malloc(p, usize, cnt); + } else { + p = imallocx(usize, alignment, zero, try_tcache, arena); + if (p == NULL) + goto label_oom; + } + + if (config_stats) { + assert(usize == isalloc(p, config_prof)); + thread_allocated_tsd_get()->allocated += usize; + } + UTRACE(0, size, p); + JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); + return (p); +label_oom: + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in mallocx(): out of memory\n"); + abort(); + } + UTRACE(0, size, 0); + return (NULL); +} + +void * +je_rallocx(void *ptr, size_t size, int flags) +{ + void *p; + size_t usize; + size_t old_size; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & MALLOCX_ZERO; + unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; + bool try_tcache_alloc, try_tcache_dalloc; + arena_t *arena; + + assert(ptr != NULL); + assert(size != 0); + assert(malloc_initialized || IS_INITIALIZER); + malloc_thread_init(); + + if (arena_ind != UINT_MAX) { + arena_chunk_t *chunk; + try_tcache_alloc = false; + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + try_tcache_dalloc = (chunk == ptr || chunk->arena != + arenas[arena_ind]); + arena = arenas[arena_ind]; + } else { + try_tcache_alloc = true; + try_tcache_dalloc = true; + arena = NULL; + } + + if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + + usize = (alignment == 0) ? s2u(size) : sa2u(size, + alignment); + prof_ctx_t *old_ctx = prof_ctx_get(ptr); + old_size = isalloc(ptr, true); + if (config_valgrind && opt_valgrind) + old_rzsize = p2rz(ptr); + PROF_ALLOC_PREP(1, usize, cnt); + if (cnt == NULL) + goto label_oom; + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= + SMALL_MAXCLASS) { + p = iralloct(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= + size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, + zero, false, try_tcache_alloc, try_tcache_dalloc, + arena); + if (p == NULL) + goto label_oom; + if (usize < PAGE) + arena_prof_promoted(p, usize); + } else { + p = iralloct(ptr, size, 0, alignment, zero, false, + try_tcache_alloc, try_tcache_dalloc, arena); + if (p == NULL) + goto label_oom; + } + prof_realloc(p, usize, cnt, old_size, old_ctx); + } else { + if (config_stats) { + old_size = isalloc(ptr, false); + if (config_valgrind && opt_valgrind) + old_rzsize = u2rz(old_size); + } else if (config_valgrind && opt_valgrind) { + old_size = isalloc(ptr, false); + old_rzsize = u2rz(old_size); + } + p = iralloct(ptr, size, 0, alignment, zero, false, + try_tcache_alloc, try_tcache_dalloc, arena); + if (p == NULL) + goto label_oom; + if (config_stats || (config_valgrind && opt_valgrind)) + usize = isalloc(p, config_prof); + } + + if (config_stats) { + thread_allocated_t *ta; + ta = thread_allocated_tsd_get(); + ta->allocated += usize; + ta->deallocated += old_size; + } + UTRACE(ptr, size, p); + JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_size, old_rzsize, zero); + return (p); +label_oom: + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in rallocx(): out of memory\n"); + abort(); + } + UTRACE(ptr, size, 0); + return (NULL); +} + +size_t +je_xallocx(void *ptr, size_t size, size_t extra, int flags) +{ + size_t usize; + size_t old_size; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & MALLOCX_ZERO; + unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; + bool try_tcache_alloc, try_tcache_dalloc; + arena_t *arena; + + assert(ptr != NULL); + assert(size != 0); + assert(SIZE_T_MAX - size >= extra); + assert(malloc_initialized || IS_INITIALIZER); + malloc_thread_init(); + + if (arena_ind != UINT_MAX) { + arena_chunk_t *chunk; + try_tcache_alloc = false; + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + try_tcache_dalloc = (chunk == ptr || chunk->arena != + arenas[arena_ind]); + arena = arenas[arena_ind]; + } else { + try_tcache_alloc = true; + try_tcache_dalloc = true; + arena = NULL; + } + + if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + + /* + * usize isn't knowable before iralloc() returns when extra is + * non-zero. Therefore, compute its maximum possible value and + * use that in PROF_ALLOC_PREP() to decide whether to capture a + * backtrace. prof_realloc() will use the actual usize to + * decide whether to sample. + */ + size_t max_usize = (alignment == 0) ? s2u(size+extra) : + sa2u(size+extra, alignment); + prof_ctx_t *old_ctx = prof_ctx_get(ptr); + old_size = isalloc(ptr, true); + if (config_valgrind && opt_valgrind) + old_rzsize = p2rz(ptr); + PROF_ALLOC_PREP(1, max_usize, cnt); + if (cnt == NULL) { + usize = isalloc(ptr, config_prof); + goto label_not_moved; + } + /* + * Use minimum usize to determine whether promotion may happen. + */ + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U + && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) + <= SMALL_MAXCLASS) { + if (iralloct(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= + size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), + alignment, zero, true, try_tcache_alloc, + try_tcache_dalloc, arena) == NULL) + goto label_not_moved; + if (max_usize < PAGE) { + usize = max_usize; + arena_prof_promoted(ptr, usize); + } else + usize = isalloc(ptr, config_prof); + } else { + if (iralloct(ptr, size, extra, alignment, zero, true, + try_tcache_alloc, try_tcache_dalloc, arena) == NULL) + goto label_not_moved; + usize = isalloc(ptr, config_prof); + } + prof_realloc(ptr, usize, cnt, old_size, old_ctx); + } else { + if (config_stats) { + old_size = isalloc(ptr, false); + if (config_valgrind && opt_valgrind) + old_rzsize = u2rz(old_size); + } else if (config_valgrind && opt_valgrind) { + old_size = isalloc(ptr, false); + old_rzsize = u2rz(old_size); + } + if (iralloct(ptr, size, extra, alignment, zero, true, + try_tcache_alloc, try_tcache_dalloc, arena) == NULL) { + usize = isalloc(ptr, config_prof); + goto label_not_moved; + } + usize = isalloc(ptr, config_prof); + } + + if (config_stats) { + thread_allocated_t *ta; + ta = thread_allocated_tsd_get(); + ta->allocated += usize; + ta->deallocated += old_size; + } + JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_size, old_rzsize, zero); +label_not_moved: + UTRACE(ptr, size, ptr); + return (usize); +} + +size_t +je_sallocx(const void *ptr, int flags) +{ + size_t usize; assert(malloc_initialized || IS_INITIALIZER); malloc_thread_init(); if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); - else - ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; + usize = ivsalloc(ptr, config_prof); + else { + assert(ptr != NULL); + usize = isalloc(ptr, config_prof); + } - return (ret); + return (usize); } void -je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) +je_dallocx(void *ptr, int flags) { + size_t usize; + UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; + bool try_tcache; - stats_print(write_cb, cbopaque, opts); + assert(ptr != NULL); + assert(malloc_initialized || IS_INITIALIZER); + + if (arena_ind != UINT_MAX) { + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + try_tcache = (chunk == ptr || chunk->arena != + arenas[arena_ind]); + } else + try_tcache = true; + + UTRACE(ptr, 0, 0); + if (config_stats || config_valgrind) + usize = isalloc(ptr, config_prof); + if (config_prof && opt_prof) { + if (config_stats == false && config_valgrind == false) + usize = isalloc(ptr, config_prof); + prof_free(ptr, usize); + } + if (config_stats) + thread_allocated_tsd_get()->deallocated += usize; + if (config_valgrind && opt_valgrind) + rzsize = p2rz(ptr); + iqalloct(ptr, try_tcache); + JEMALLOC_VALGRIND_FREE(ptr, rzsize); +} + +size_t +je_nallocx(size_t size, int flags) +{ + size_t usize; + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + + assert(size != 0); + + if (malloc_init()) + return (0); + + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + return (usize); } int @@ -1393,6 +1728,30 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); } +void +je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + + stats_print(write_cb, cbopaque, opts); +} + +size_t +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) +{ + size_t ret; + + assert(malloc_initialized || IS_INITIALIZER); + malloc_thread_init(); + + if (config_ivsalloc) + ret = ivsalloc(ptr, config_prof); + else + ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; + + return (ret); +} + /* * End non-standard functions. */ @@ -1402,284 +1761,65 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, */ #ifdef JEMALLOC_EXPERIMENTAL -JEMALLOC_ALWAYS_INLINE_C void * -iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena) -{ - - assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, - alignment))); - - if (alignment != 0) - return (ipallocx(usize, alignment, zero, try_tcache, arena)); - else if (zero) - return (icallocx(usize, try_tcache, arena)); - else - return (imallocx(usize, try_tcache, arena)); -} - int je_allocm(void **ptr, size_t *rsize, size_t size, int flags) { void *p; - size_t usize; - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & ALLOCM_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - arena_t *arena; - bool try_tcache; assert(ptr != NULL); - assert(size != 0); - if (malloc_init()) - goto label_oom; - - if (arena_ind != UINT_MAX) { - arena = arenas[arena_ind]; - try_tcache = false; - } else { - arena = NULL; - try_tcache = true; - } - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - if (usize == 0) - goto label_oom; - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) - goto label_oom; - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= - SMALL_MAXCLASS) { - size_t usize_promoted = (alignment == 0) ? - s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, - alignment); - assert(usize_promoted != 0); - p = iallocm(usize_promoted, alignment, zero, - try_tcache, arena); - if (p == NULL) - goto label_oom; - arena_prof_promoted(p, usize); - } else { - p = iallocm(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - goto label_oom; - } - prof_malloc(p, usize, cnt); - } else { - p = iallocm(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - goto label_oom; - } + p = je_mallocx(size, flags); + if (p == NULL) + return (ALLOCM_ERR_OOM); if (rsize != NULL) - *rsize = usize; - + *rsize = isalloc(p, config_prof); *ptr = p; - if (config_stats) { - assert(usize == isalloc(p, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); return (ALLOCM_SUCCESS); -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in allocm(): " - "out of memory\n"); - abort(); - } - *ptr = NULL; - UTRACE(0, size, 0); - return (ALLOCM_ERR_OOM); } int je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) { - void *p, *q; - size_t usize; - size_t old_size; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & ALLOCM_ZERO; + int ret; bool no_move = flags & ALLOCM_NO_MOVE; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache_alloc, try_tcache_dalloc; - arena_t *arena; assert(ptr != NULL); assert(*ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk; - try_tcache_alloc = false; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr); - try_tcache_dalloc = (chunk == *ptr || chunk->arena != - arenas[arena_ind]); - arena = arenas[arena_ind]; - } else { - try_tcache_alloc = true; - try_tcache_dalloc = true; - arena = NULL; - } - - p = *ptr; - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - /* - * usize isn't knowable before iralloc() returns when extra is - * non-zero. Therefore, compute its maximum possible value and - * use that in PROF_ALLOC_PREP() to decide whether to capture a - * backtrace. prof_realloc() will use the actual usize to - * decide whether to sample. - */ - size_t max_usize = (alignment == 0) ? s2u(size+extra) : - sa2u(size+extra, alignment); - prof_ctx_t *old_ctx = prof_ctx_get(p); - old_size = isalloc(p, true); - if (config_valgrind && opt_valgrind) - old_rzsize = p2rz(p); - PROF_ALLOC_PREP(1, max_usize, cnt); - if (cnt == NULL) - goto label_oom; - /* - * Use minimum usize to determine whether promotion may happen. - */ - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U - && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) - <= SMALL_MAXCLASS) { - q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= - size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), - alignment, zero, no_move, try_tcache_alloc, - try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - if (max_usize < PAGE) { - usize = max_usize; - arena_prof_promoted(q, usize); - } else - usize = isalloc(q, config_prof); - } else { - q = irallocx(p, size, extra, alignment, zero, no_move, - try_tcache_alloc, try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - usize = isalloc(q, config_prof); - } - prof_realloc(q, usize, cnt, old_size, old_ctx); + if (no_move) { + size_t usize = je_xallocx(*ptr, size, extra, flags); + ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; if (rsize != NULL) *rsize = usize; } else { - if (config_stats) { - old_size = isalloc(p, false); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_size); - } else if (config_valgrind && opt_valgrind) { - old_size = isalloc(p, false); - old_rzsize = u2rz(old_size); - } - q = irallocx(p, size, extra, alignment, zero, no_move, - try_tcache_alloc, try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - if (config_stats) - usize = isalloc(q, config_prof); - if (rsize != NULL) { - if (config_stats == false) - usize = isalloc(q, config_prof); - *rsize = usize; - } + void *p = je_rallocx(*ptr, size+extra, flags); + if (p != NULL) { + *ptr = p; + ret = ALLOCM_SUCCESS; + } else + ret = ALLOCM_ERR_OOM; + if (rsize != NULL) + *rsize = isalloc(*ptr, config_prof); } - - *ptr = q; - if (config_stats) { - thread_allocated_t *ta; - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_size; - } - UTRACE(p, size, q); - JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); - return (ALLOCM_SUCCESS); -label_err: - if (no_move) { - UTRACE(p, size, q); - return (ALLOCM_ERR_NOT_MOVED); - } -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in rallocm(): " - "out of memory\n"); - abort(); - } - UTRACE(p, size, 0); - return (ALLOCM_ERR_OOM); + return (ret); } int je_sallocm(const void *ptr, size_t *rsize, int flags) { - size_t sz; - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - sz = ivsalloc(ptr, config_prof); - else { - assert(ptr != NULL); - sz = isalloc(ptr, config_prof); - } assert(rsize != NULL); - *rsize = sz; - + *rsize = je_sallocx(ptr, flags); return (ALLOCM_SUCCESS); } int je_dallocm(void *ptr, int flags) { - size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache; - - assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); - - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - try_tcache = (chunk == ptr || chunk->arena != - arenas[arena_ind]); - } else - try_tcache = true; - - UTRACE(ptr, 0, 0); - if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_prof && opt_prof) { - if (config_stats == false && config_valgrind == false) - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); - } - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqallocx(ptr, try_tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); + je_dallocx(ptr, flags); return (ALLOCM_SUCCESS); } @@ -1687,18 +1827,10 @@ int je_nallocm(size_t *rsize, size_t size, int flags) { size_t usize; - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - assert(size != 0); - - if (malloc_init()) - return (ALLOCM_ERR_OOM); - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + usize = je_nallocx(size, flags); if (usize == 0) return (ALLOCM_ERR_OOM); - if (rsize != NULL) *rsize = usize; return (ALLOCM_SUCCESS); diff --git a/src/tcache.c b/src/tcache.c index 88ec4810..6de92960 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -292,7 +292,7 @@ tcache_create(arena_t *arena) else if (size <= tcache_maxclass) tcache = (tcache_t *)arena_malloc_large(arena, size, true); else - tcache = (tcache_t *)icallocx(size, false, arena); + tcache = (tcache_t *)icalloct(size, false, arena); if (tcache == NULL) return (NULL); @@ -366,7 +366,7 @@ tcache_destroy(tcache_t *tcache) arena_dalloc_large(arena, chunk, tcache); } else - idallocx(tcache, false); + idalloct(tcache, false); } void diff --git a/src/tsd.c b/src/tsd.c index 8431751f..700caabf 100644 --- a/src/tsd.c +++ b/src/tsd.c @@ -21,7 +21,7 @@ void malloc_tsd_dalloc(void *wrapper) { - idallocx(wrapper, false); + idalloct(wrapper, false); } void diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c new file mode 100644 index 00000000..f12855e7 --- /dev/null +++ b/test/integration/mallocx.c @@ -0,0 +1,149 @@ +#include "test/jemalloc_test.h" + +#define CHUNK 0x400000 +/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ +#define MAXALIGN ((size_t)0x2000000LU) +#define NITER 4 + +TEST_BEGIN(test_basic) +{ + size_t nsz, rsz, sz; + void *p; + + sz = 42; + nsz = nallocx(sz, 0); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + rsz = sallocx(p, 0); + assert_zu_ge(rsz, sz, "Real size smaller than expected"); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); + dallocx(p, 0); + + p = mallocx(sz, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, 0); + + nsz = nallocx(sz, MALLOCX_ZERO); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + rsz = sallocx(p, 0); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_alignment_errors) +{ + void *p; + size_t nsz, sz, alignment; + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x8000000000000000); + sz = UINT64_C(0x8000000000000000); +#else + alignment = 0x80000000LU; + sz = 0x80000000LU; +#endif + nsz = nallocx(sz, MALLOCX_ALIGN(alignment)); + assert_zu_eq(nsz, 0, "Expected error for nallocx(%zu, %#x)", sz, + MALLOCX_ALIGN(alignment)); + p = mallocx(sz, MALLOCX_ALIGN(alignment)); + assert_ptr_null(p, "Expected error for mallocx(%zu, %#x)", sz, + MALLOCX_ALIGN(alignment)); + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x4000000000000000); + sz = UINT64_C(0x8400000000000001); +#else + alignment = 0x40000000LU; + sz = 0x84000001LU; +#endif + nsz = nallocx(sz, MALLOCX_ALIGN(alignment)); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, MALLOCX_ALIGN(alignment)); + assert_ptr_null(p, "Expected error for mallocx(%zu, %#x)", sz, + MALLOCX_ALIGN(alignment)); + + alignment = 0x10LU; +#if LG_SIZEOF_PTR == 3 + sz = UINT64_C(0xfffffffffffffff0); +#else + sz = 0xfffffff0LU; +#endif + nsz = nallocx(sz, MALLOCX_ALIGN(alignment)); + assert_zu_eq(nsz, 0, "Expected error for nallocx(%zu, %#x)", sz, + MALLOCX_ALIGN(alignment)); + nsz = nallocx(sz, MALLOCX_ALIGN(alignment)); + assert_zu_eq(nsz, 0, "Expected error for nallocx(%zu, %#x)", sz, + MALLOCX_ALIGN(alignment)); + p = mallocx(sz, MALLOCX_ALIGN(alignment)); + assert_ptr_null(p, "Expected error for mallocx(%zu, %#x)", sz, + MALLOCX_ALIGN(alignment)); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ + size_t nsz, rsz, sz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_zu_ne(nsz, 0, + "nallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_ptr_not_null(ps[i], + "mallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + rsz = sallocx(ps[i], 0); + assert_zu_ge(rsz, sz, + "Real size smaller than expected for " + "alignment=%zu, size=%zu", alignment, sz); + assert_zu_eq(nsz, rsz, + "nallocx()/sallocx() size mismatch for " + "alignment=%zu, size=%zu", alignment, sz); + assert_ptr_null( + (void *)((uintptr_t)ps[i] & (alignment-1)), + "%p inadequately aligned for" + " alignment=%zu, size=%zu", ps[i], + alignment, sz); + total += rsz; + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + dallocx(ps[i], 0); + ps[i] = NULL; + } + } + } + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_basic, + test_alignment_errors, + test_alignment_and_size)); +} diff --git a/test/integration/rallocm.c b/test/integration/rallocm.c index c13cd699..33c11bb7 100644 --- a/test/integration/rallocm.c +++ b/test/integration/rallocm.c @@ -1,5 +1,3 @@ -#include - #include "test/jemalloc_test.h" TEST_BEGIN(test_same_size) diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c new file mode 100644 index 00000000..cc9138ba --- /dev/null +++ b/test/integration/rallocx.c @@ -0,0 +1,51 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_grow_and_shrink) +{ + void *p, *q; + size_t tsz; +#define NCYCLES 3 + unsigned i, j; +#define NSZS 2500 + size_t szs[NSZS]; +#define MAXSZ ZU(12 * 1024 * 1024) + + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + szs[0] = sallocx(p, 0); + + for (i = 0; i < NCYCLES; i++) { + for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { + q = rallocx(p, szs[j-1]+1, 0); + assert_ptr_not_null(q, + "Unexpected rallocx() error for size=%zu-->%zu", + szs[j-1], szs[j-1]+1); + szs[j] = sallocx(q, 0); + assert_zu_ne(szs[j], szs[j-1]+1, + "Expected size to at least: %zu", szs[j-1]+1); + p = q; + } + + for (j--; j > 0; j--) { + q = rallocx(p, szs[j-1], 0); + assert_ptr_not_null(q, + "Unexpected rallocx() error for size=%zu-->%zu", + szs[j], szs[j-1]); + tsz = sallocx(q, 0); + assert_zu_eq(tsz, szs[j-1], + "Expected size=%zu, got size=%zu", szs[j-1], tsz); + p = q; + } + } + + dallocx(p, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_grow_and_shrink)); +} diff --git a/test/integration/xallocx.c b/test/integration/xallocx.c new file mode 100644 index 00000000..ab4cf945 --- /dev/null +++ b/test/integration/xallocx.c @@ -0,0 +1,59 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_same_size) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz, 0, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_no_move) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz, sz-42, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_no_move_fail) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz + 5, 0, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_same_size, + test_extra_no_move, + test_no_move_fail)); +} diff --git a/test/unit/mq.c b/test/unit/mq.c index 01e72fd1..e6cba101 100644 --- a/test/unit/mq.c +++ b/test/unit/mq.c @@ -39,8 +39,7 @@ thd_receiver_start(void *arg) for (i = 0; i < (NSENDERS * NMSGS); i++) { mq_msg_t *msg = mq_get(mq); assert_ptr_not_null(msg, "mq_get() should never return NULL"); - assert_d_eq(jet_dallocm(msg, 0), ALLOCM_SUCCESS, - "Unexpected dallocm() failure"); + jet_dallocx(msg, 0); } return (NULL); } @@ -54,8 +53,8 @@ thd_sender_start(void *arg) for (i = 0; i < NMSGS; i++) { mq_msg_t *msg; void *p; - assert_d_eq(jet_allocm(&p, NULL, sizeof(mq_msg_t), 0), - ALLOCM_SUCCESS, "Unexpected allocm() failure"); + p = jet_mallocx(sizeof(mq_msg_t), 0); + assert_ptr_not_null(p, "Unexpected allocm() failure"); msg = (mq_msg_t *)p; mq_put(mq, msg); }