Merge branch 'dev'

This commit is contained in:
Jason Evans 2014-03-31 09:33:19 -07:00
commit 46c0af68bd
29 changed files with 177 additions and 88 deletions

View File

@ -5,6 +5,30 @@ found in the git revision history:
https://github.com/jemalloc/jemalloc
* 3.6.0 (March 31, 2014)
This version contains a critical bug fix for a regression present in 3.5.0 and
3.5.1.
Bug fixes:
- Fix a regression in arena_chunk_alloc() that caused crashes during
small/large allocation if chunk allocation failed. In the absence of this
bug, chunk allocation failure would result in allocation failure, e.g. NULL
return from malloc(). This regression was introduced in 3.5.0.
- Fix backtracing for gcc intrinsics-based backtracing by specifying
-fno-omit-frame-pointer to gcc. Note that the application (and all the
libraries it links to) must also be compiled with this option for
backtracing to be reliable.
- Use dss allocation precedence for huge allocations as well as small/large
allocations.
- Fix test assertion failure message formatting. This bug did not manifect on
x86_64 systems because of implementation subtleties in va_list.
- Fix inconsequential test failures for hash and SFMT code.
New features:
- Support heap profiling on FreeBSD. This feature depends on the proc
filesystem being mounted during heap profile dumping.
* 3.5.1 (February 25, 2014)
This version primarily addresses minor bugs in test code.

View File

@ -141,7 +141,7 @@ TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
$(srcroot)test/integration/xallocx.c
ifeq ($(enable_experimental), 1)
TESTS_INTEGRATION += $(srcroot)test/integration/allocm.c \
$(srcroot)test/integration/ALLOCM_ARENA.c \
$(srcroot)test/integration/MALLOCX_ARENA.c \
$(srcroot)test/integration/rallocm.c
endif
TESTS_STRESS :=

View File

@ -4197,8 +4197,12 @@ sub FindLibrary {
# For libc libraries, the copy in /usr/lib/debug contains debugging symbols
sub DebuggingLibrary {
my $file = shift;
if ($file =~ m|^/| && -f "/usr/lib/debug$file") {
return "/usr/lib/debug$file";
if ($file =~ m|^/|) {
if (-f "/usr/lib/debug$file") {
return "/usr/lib/debug$file";
} elsif (-f "/usr/lib/debug$file.debug") {
return "/usr/lib/debug$file.debug";
}
}
return undef;
}
@ -4360,6 +4364,19 @@ sub ParseLibraries {
$finish = HexExtend($2);
$offset = $zero_offset;
$lib = $3;
}
# FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
# function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
#
# Example:
# 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s
# o.1 NCH -1
elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) {
$start = HexExtend($1);
$finish = HexExtend($2);
$offset = $zero_offset;
$lib = FindLibrary($5);
} else {
next;
}
@ -4382,6 +4399,7 @@ sub ParseLibraries {
}
}
if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; }
push(@{$result}, [$lib, $start, $finish, $offset]);
}
@ -4589,6 +4607,12 @@ sub ExtractSymbols {
my $finish = $lib->[2];
my $offset = $lib->[3];
# Use debug library if it exists
my $debug_libname = DebuggingLibrary($libname);
if ($debug_libname) {
$libname = $debug_libname;
}
# Get list of pcs that belong in this library.
my $contained = [];
my ($start_pc_index, $finish_pc_index);
@ -5019,7 +5043,7 @@ sub GetProcedureBoundariesViaNm {
# Tag this routine with the starting address in case the image
# has multiple occurrences of this routine. We use a syntax
# that resembles template paramters that are automatically
# that resembles template parameters that are automatically
# stripped out by ShortFunctionName()
$this_routine .= "<$start_val>";

View File

@ -150,6 +150,11 @@ if test "x$EXTRA_CFLAGS" != "x" ; then
fi
AC_PROG_CPP
AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
if test "x${ac_cv_big_endian}" = "x1" ; then
AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ])
fi
AC_CHECK_SIZEOF([void *])
if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
LG_SIZEOF_PTR=3
@ -742,22 +747,6 @@ if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
-a "x$GCC" = "xyes" ; then
AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [LIBS="$LIBS -lgcc"], [enable_prof_libgcc="0"])
dnl The following is conservative, in that it only has entries for CPUs on
dnl which jemalloc has been tested.
AC_MSG_CHECKING([libgcc-based backtracing reliability on ${host_cpu}])
case "${host_cpu}" in
i[[3456]]86)
AC_MSG_RESULT([unreliable])
enable_prof_libgcc="0";
;;
x86_64)
AC_MSG_RESULT([reliable])
;;
*)
AC_MSG_RESULT([unreliable])
enable_prof_libgcc="0";
;;
esac
if test "x${enable_prof_libgcc}" = "x1" ; then
backtrace_method="libgcc"
AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ])
@ -779,6 +768,7 @@ fi
)
if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
-a "x$GCC" = "xyes" ; then
JE_CFLAGS_APPEND([-fno-omit-frame-pointer])
backtrace_method="gcc intrinsics"
AC_DEFINE([JEMALLOC_PROF_GCC], [ ])
else

View File

@ -1439,8 +1439,12 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Set the precedence of dss allocation as related to mmap
allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<link
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. See
<link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. Note
that even during huge allocation this setting is read from the arena
that would be chosen for small or large allocation so that applications
can depend on consistent dss versus mmap allocation regardless of
allocation size. See <link
linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
settings.
</para></listitem>
</varlistentry>

View File

@ -320,7 +320,7 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
#if (LG_SIZEOF_PTR == 3)
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
#else
uint64_t hashes[2];

View File

@ -17,18 +17,20 @@ extern size_t huge_allocated;
/* Protects chunk-related data structures. */
extern malloc_mutex_t huge_mtx;
void *huge_malloc(size_t size, bool zero);
void *huge_palloc(size_t size, size_t alignment, bool zero);
void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
void *huge_palloc(size_t size, size_t alignment, bool zero,
dss_prec_t dss_prec);
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc);
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
dss_prec_t huge_dss_prec_get(arena_t *arena);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool huge_boot(void);

View File

@ -770,7 +770,7 @@ imalloct(size_t size, bool try_tcache, arena_t *arena)
if (size <= arena_maxclass)
return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(size, false));
return (huge_malloc(size, false, huge_dss_prec_get(arena)));
}
JEMALLOC_ALWAYS_INLINE void *
@ -787,7 +787,7 @@ icalloct(size_t size, bool try_tcache, arena_t *arena)
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(size, true));
return (huge_malloc(size, true, huge_dss_prec_get(arena)));
}
JEMALLOC_ALWAYS_INLINE void *
@ -813,9 +813,9 @@ ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
else
ret = huge_palloc(usize, alignment, zero);
ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
@ -984,7 +984,7 @@ iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
try_tcache_dalloc));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc));
alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
}
}

View File

@ -190,6 +190,9 @@
/* C99 restrict keyword supported. */
#undef JEMALLOC_HAS_RESTRICT
/* For use by hash code. */
#undef JEMALLOC_BIG_ENDIAN
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT

View File

@ -197,6 +197,7 @@ huge_allocated
huge_boot
huge_dalloc
huge_dalloc_junk
huge_dss_prec_get
huge_malloc
huge_mtx
huge_ndalloc

View File

@ -614,8 +614,11 @@ arena_chunk_alloc(arena_t *arena)
if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena);
else
else {
chunk = arena_chunk_init_hard(arena);
if (chunk == NULL)
return (NULL);
}
/* Insert the run into the runs_avail tree. */
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,

View File

@ -16,14 +16,14 @@ malloc_mutex_t huge_mtx;
static extent_tree_t huge;
void *
huge_malloc(size_t size, bool zero)
huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
{
return (huge_palloc(size, chunksize, zero));
return (huge_palloc(size, chunksize, zero, dss_prec));
}
void *
huge_palloc(size_t size, size_t alignment, bool zero)
huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
{
void *ret;
size_t csize;
@ -48,8 +48,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
ret = chunk_alloc(csize, alignment, false, &is_zeroed,
chunk_dss_prec_get());
ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@ -98,7 +97,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc)
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
{
void *ret;
size_t copysize;
@ -113,18 +112,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* space and copying.
*/
if (alignment > chunksize)
ret = huge_palloc(size + extra, alignment, zero);
ret = huge_palloc(size + extra, alignment, zero, dss_prec);
else
ret = huge_malloc(size + extra, zero);
ret = huge_malloc(size + extra, zero, dss_prec);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
ret = huge_palloc(size, alignment, zero);
ret = huge_palloc(size, alignment, zero, dss_prec);
else
ret = huge_malloc(size, zero);
ret = huge_malloc(size, zero, dss_prec);
if (ret == NULL)
return (NULL);
@ -264,6 +263,13 @@ huge_salloc(const void *ptr)
return (ret);
}
dss_prec_t
huge_dss_prec_get(arena_t *arena)
{
return (arena_dss_prec_get(choose_arena(arena)));
}
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{

View File

@ -2076,7 +2076,7 @@ a0alloc(size_t size, bool zero)
if (size <= arena_maxclass)
return (arena_malloc(arenas[0], size, zero, false));
else
return (huge_malloc(size, zero));
return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
}
void *

View File

@ -935,9 +935,12 @@ prof_dump_maps(bool propagate_err)
char filename[PATH_MAX + 1];
cassert(config_prof);
#ifdef __FreeBSD__
malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
#else
malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
(int)getpid());
#endif
mfd = open(filename, O_RDONLY);
if (mfd != -1) {
ssize_t nread;

View File

@ -61,7 +61,7 @@
* @return output
*/
JEMALLOC_ALWAYS_INLINE
static vector unsigned int vec_recursion(vector unsigned int a,
vector unsigned int vec_recursion(vector unsigned int a,
vector unsigned int b,
vector unsigned int c,
vector unsigned int d) {

View File

@ -1,13 +1,19 @@
#define ASSERT_BUFSIZE 256
#define assert_cmp(t, a, b, cmp, neg_cmp, pri, fmt...) do { \
t a_ = (a); \
t b_ = (b); \
if (!(a_ cmp b_)) { \
p_test_fail( \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) "#cmp" (%s) --> " \
"%"pri" "#neg_cmp" %"pri": ", \
__func__, __FILE__, __LINE__, \
#a, #b, a_, b_, fmt); \
#a, #b, a_, b_); \
malloc_snprintf(message, sizeof(message), fmt); \
p_test_fail(prefix, message); \
} \
} while (0)
@ -208,24 +214,32 @@
bool a_ = (a); \
bool b_ = (b); \
if (!(a_ == b_)) { \
p_test_fail( \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) == (%s) --> %s != %s: ", \
__func__, __FILE__, __LINE__, \
#a, #b, a_ ? "true" : "false", \
b_ ? "true" : "false", fmt); \
b_ ? "true" : "false"); \
malloc_snprintf(message, sizeof(message), fmt); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_b_ne(a, b, fmt...) do { \
bool a_ = (a); \
bool b_ = (b); \
if (!(a_ != b_)) { \
p_test_fail( \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) != (%s) --> %s == %s: ", \
__func__, __FILE__, __LINE__, \
#a, #b, a_ ? "true" : "false", \
b_ ? "true" : "false", fmt); \
b_ ? "true" : "false"); \
malloc_snprintf(message, sizeof(message), fmt); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_true(a, fmt...) assert_b_eq(a, true, fmt)
@ -233,26 +247,39 @@
#define assert_str_eq(a, b, fmt...) do { \
if (strcmp((a), (b))) { \
p_test_fail( \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) same as (%s) --> " \
"\"%s\" differs from \"%s\": ", \
__func__, __FILE__, __LINE__, #a, #b, a, b, fmt); \
__func__, __FILE__, __LINE__, #a, #b, a, b); \
malloc_snprintf(message, sizeof(message), fmt); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_str_ne(a, b, fmt...) do { \
if (!strcmp((a), (b))) { \
p_test_fail( \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) differs from (%s) --> " \
"\"%s\" same as \"%s\": ", \
__func__, __FILE__, __LINE__, #a, #b, a, b, fmt); \
__func__, __FILE__, __LINE__, #a, #b, a, b); \
malloc_snprintf(message, sizeof(message), fmt); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_not_reached(fmt...) do { \
p_test_fail("%s:%s:%d: Unreachable code reached: ", \
__func__, __FILE__, __LINE__, fmt); \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Unreachable code reached: ", \
__func__, __FILE__, __LINE__); \
malloc_snprintf(message, sizeof(message), fmt); \
p_test_fail(prefix, message); \
} while (0)
/*
@ -299,4 +326,4 @@ void test_fail(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
test_status_t p_test(test_t* t, ...);
void p_test_init(const char *name);
void p_test_fini(void);
void p_test_fail(const char *format, ...);
void p_test_fail(const char *prefix, const char *message);

View File

@ -8,7 +8,7 @@ thd_start(void *arg)
unsigned thread_ind = (unsigned)(uintptr_t)arg;
unsigned arena_ind;
void *p;
size_t rsz, sz;
size_t sz;
sz = sizeof(arena_ind);
assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
@ -27,9 +27,9 @@ thd_start(void *arg)
sizeof(const char *)), 0, "Error in mallctlbymib()");
}
assert_d_eq(allocm(&p, &rsz, 1, ALLOCM_ARENA(arena_ind)),
ALLOCM_SUCCESS, "Unexpected allocm() error");
dallocm(p, 0);
p = mallocx(1, MALLOCX_ARENA(arena_ind));
assert_ptr_not_null(p, "Unexpected mallocx() error");
dallocx(p, 0);
return (NULL);
}

View File

@ -1,8 +1,7 @@
#include "test/jemalloc_test.h"
#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
#define MAXALIGN ((size_t)0x2000000LU)
#define MAXALIGN (((size_t)1) << 25)
#define NITER 4
TEST_BEGIN(test_basic)

View File

@ -1,8 +1,7 @@
#include "test/jemalloc_test.h"
#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
#define MAXALIGN ((size_t)0x2000000LU)
#define MAXALIGN (((size_t)1) << 25)
#define NITER 4
TEST_BEGIN(test_basic)

View File

@ -112,7 +112,7 @@ TEST_BEGIN(test_align)
{
void *p, *q;
size_t align;
#define MAX_ALIGN (ZU(1) << 29)
#define MAX_ALIGN (ZU(1) << 25)
align = ZU(1);
p = mallocx(1, MALLOCX_ALIGN(align));
@ -137,7 +137,7 @@ TEST_BEGIN(test_lg_align_and_zero)
{
void *p, *q;
size_t lg_align, sz;
#define MAX_LG_ALIGN 29
#define MAX_LG_ALIGN 25
#define MAX_VALIDATE (ZU(1) << 22)
lg_align = ZU(0);

View File

@ -49,6 +49,9 @@
#include "test/jemalloc_test.h"
#include "test/SFMT-params.h"
#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif
#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif

View File

@ -86,15 +86,9 @@ p_test(test_t* t, ...)
}
void
p_test_fail(const char *format, ...)
p_test_fail(const char *prefix, const char *message)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
format = va_arg(ap, const char *);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
malloc_printf("\n");
malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
test_status = test_status_fail;
}

View File

@ -1576,7 +1576,7 @@ TEST_BEGIN(test_by_array_64)
for (i = 0; i < BLOCK_SIZE64; i++) {
if (i < COUNT_1) {
assert_u64_eq(array64[i], init_by_array_64_expected[i],
"Output mismatch for i=%d");
"Output mismatch for i=%d", i);
}
r = gen_rand64(ctx);
assert_u64_eq(r, array64[i],

View File

@ -29,7 +29,7 @@ TEST_BEGIN(test_count_insert_search_remove)
assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
assert_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
/* Insert. */
@ -101,11 +101,11 @@ TEST_END
TEST_BEGIN(test_insert_iter_remove)
{
#define NITEMS 1000
#define NITEMS ZU(1000)
ckh_t ckh;
void **p[NITEMS];
void *q, *r;
unsigned i;
size_t i;
assert_false(ckh_new(&ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp),
"Unexpected ckh_new() error");
@ -116,7 +116,7 @@ TEST_BEGIN(test_insert_iter_remove)
}
for (i = 0; i < NITEMS; i++) {
unsigned j;
size_t j;
for (j = i; j < NITEMS; j++) {
assert_false(ckh_insert(&ckh, p[j], p[j]),
@ -152,7 +152,7 @@ TEST_BEGIN(test_insert_iter_remove)
for (tabind = 0; ckh_iter(&ckh, &tabind, &q, &r) ==
false;) {
unsigned k;
size_t k;
assert_ptr_eq(q, r, "Key and val not equal");
@ -188,7 +188,7 @@ TEST_BEGIN(test_insert_iter_remove)
}
assert_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
ckh_delete(&ckh);
#undef NITEMS

View File

@ -122,9 +122,15 @@ hash_variant_verify(hash_variant_t variant)
(final[3] << 24);
switch (variant) {
#ifdef JEMALLOC_BIG_ENDIAN
case hash_variant_x86_32: expected = 0x6213303eU; break;
case hash_variant_x86_128: expected = 0x266820caU; break;
case hash_variant_x64_128: expected = 0xcc622b6fU; break;
#else
case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
case hash_variant_x86_128: expected = 0xb3ece62aU; break;
case hash_variant_x64_128: expected = 0x6384ba69U; break;
#endif
default: not_reached();
}

View File

@ -73,7 +73,7 @@ test_junk(size_t sz_min, size_t sz_max)
if (sz_prev > 0) {
assert_c_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
0, sz_prev);
ZU(0), sz_prev);
assert_c_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);

View File

@ -21,7 +21,7 @@ quarantine_clear(void)
TEST_BEGIN(test_quarantine)
{
#define SZ 256
#define SZ ZU(256)
#define NQUARANTINED (QUARANTINE_SIZE/SZ)
void *quarantined[NQUARANTINED+1];
size_t i, j;

View File

@ -48,8 +48,9 @@ TEST_BEGIN(test_rtree_bits)
assert_u_eq(rtree_get(rtree, keys[k]), 1,
"rtree_get() should return previously set "
"value and ignore insignificant key bits; "
"i=%u, j=%u, k=%u, set key=%#x, "
"get key=%#x", i, j, k, keys[j], keys[k]);
"i=%u, j=%u, k=%u, set key=%#"PRIxPTR", "
"get key=%#"PRIxPTR, i, j, k, keys[j],
keys[k]);
}
assert_u_eq(rtree_get(rtree,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0,

View File

@ -20,7 +20,7 @@ test_zero(size_t sz_min, size_t sz_max)
if (sz_prev > 0) {
assert_c_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
0, sz_prev);
ZU(0), sz_prev);
assert_c_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);