diff --git a/Makefile.in b/Makefile.in
index e411804a..800dd08d 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -142,7 +142,8 @@ TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
$(srcroot)test/integration/rallocx.c \
$(srcroot)test/integration/thread_arena.c \
$(srcroot)test/integration/thread_tcache_enabled.c \
- $(srcroot)test/integration/xallocx.c
+ $(srcroot)test/integration/xallocx.c \
+ $(srcroot)test/integration/chunk.c
TESTS_STRESS :=
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS)
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index 78e9b3c6..a7c38b55 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -1283,6 +1283,69 @@ malloc_conf = "xmalloc:true";]]>
+
+
+ arena.<i>.chunk.alloc
+ (chunk_alloc_t *)
+ rw
+
+ Get or set the chunk allocation function for arena
+ <i>. If setting, the chunk deallocation function should
+ also be set via
+ arena.<i>.chunk.dealloc to a companion
+ function that knows how to deallocate the chunks.
+
+ typedef void *(chunk_alloc_t)
+ size_t size
+ size_t alignment
+ bool *zero
+ unsigned arena_ind
+
+ A chunk allocation function conforms to the chunk_alloc_t
+ type and upon success returns a pointer to size
+ bytes of memory on behalf of arena arena_ind such
+ that the chunk's base address is a multiple of
+ alignment, as well as setting
+ *zero to indicate whether the chunk is zeroed.
+ Upon error the function returns NULL and leaves
+ *zero unmodified. The
+ size parameter is always a multiple of the chunk
+ size. The alignment parameter is always a power
+ of two at least as large as the chunk size. Zeroing is mandatory if
+ *zero is true upon function
+ entry.
+
+
+
+
+ arena.<i>.chunk.dealloc
+ (chunk_dealloc_t *)
+ rw
+
+ Get or set the chunk deallocation function for arena
+ <i>. If setting, the chunk deallocation function must
+ be capable of deallocating all extant chunks associated with arena
+ <i>, usually by passing unknown chunks to the deallocation
+ function that was replaced. In practice, it is feasible to control
+ allocation for arenas created via arenas.extend such
+ that all chunks originate from an application-supplied chunk allocator
+ (by setting custom chunk allocation/deallocation functions just after
+ arena creation), but the automatically created arenas may have already
+ created chunks prior to the application having an opportunity to take
+ over chunk allocation.
+
+ typedef void (chunk_dealloc_t)
+ void *chunk
+ size_t size
+ unsigned arena_ind
+
+ A chunk deallocation function conforms to the
+ chunk_dealloc_t type and deallocates a
+ chunk of given size on
+ behalf of arena arena_ind.
+
+
arenas.narenas
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 605a87e5..d50159b3 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -370,6 +370,12 @@ struct arena_s {
*/
arena_avail_tree_t runs_avail;
+ /*
+ * user-configureable chunk allocation and deallocation functions.
+ */
+ chunk_alloc_t *chunk_alloc;
+ chunk_dealloc_t *chunk_dealloc;
+
/* bins is used to store trees of free regions. */
arena_bin_t bins[NBINS];
};
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 87d8700d..cea0e8ae 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -43,10 +43,12 @@ extern size_t chunk_npages;
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */
-void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
- dss_prec_t dss_prec);
+void *chunk_alloc(arena_t *arena, size_t size, size_t alignment, bool base,
+ bool *zero, dss_prec_t dss_prec);
+void *chunk_alloc_default(size_t size, size_t alignment, bool *zero,
+ unsigned arena_ind);
void chunk_unmap(void *chunk, size_t size);
-void chunk_dealloc(void *chunk, size_t size, bool unmap);
+void chunk_dealloc(arena_t *arena, void *chunk, size_t size, bool unmap);
bool chunk_boot(void);
void chunk_prefork(void);
void chunk_postfork_parent(void);
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index ba95ca81..000ef6d5 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -24,6 +24,9 @@ struct extent_node_s {
/* Total region size. */
size_t size;
+ /* Arena from which this extent came, if any */
+ arena_t *arena;
+
/* True if zero-filled; used by chunk recycling code. */
bool zeroed;
};
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index a2b9c779..ab8d44a2 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -17,13 +17,15 @@ extern size_t huge_allocated;
/* Protects chunk-related data structures. */
extern malloc_mutex_t huge_mtx;
-void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
-void *huge_palloc(size_t size, size_t alignment, bool zero,
+void *huge_malloc(arena_t *arena, size_t size, bool zero,
+ dss_prec_t dss_prec);
+void *huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero,
dss_prec_t dss_prec);
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
-void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
+void *huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc,
+ dss_prec_t dss_prec);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index dc77b5a1..9e779c65 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -702,7 +702,8 @@ imalloct(size_t size, bool try_tcache, arena_t *arena)
if (size <= arena_maxclass)
return (arena_malloc(arena, size, false, try_tcache));
else
- return (huge_malloc(size, false, huge_dss_prec_get(arena)));
+ return (huge_malloc(arena, size, false,
+ huge_dss_prec_get(arena)));
}
JEMALLOC_ALWAYS_INLINE void *
@@ -719,7 +720,8 @@ icalloct(size_t size, bool try_tcache, arena_t *arena)
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
- return (huge_malloc(size, true, huge_dss_prec_get(arena)));
+ return (huge_malloc(arena, size, true,
+ huge_dss_prec_get(arena)));
}
JEMALLOC_ALWAYS_INLINE void *
@@ -745,9 +747,11 @@ ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
- ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
+ ret = huge_malloc(arena, usize, zero,
+ huge_dss_prec_get(arena));
else
- ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
+ ret = huge_palloc(arena, usize, alignment, zero,
+ huge_dss_prec_get(arena));
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
@@ -915,7 +919,7 @@ iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
alignment, zero, try_tcache_alloc,
try_tcache_dalloc));
} else {
- return (huge_ralloc(ptr, oldsize, size, extra,
+ return (huge_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
}
}
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index ccbb3a90..589b56a1 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -104,6 +104,7 @@ buferror
choose_arena
choose_arena_hard
chunk_alloc
+chunk_alloc_default
chunk_alloc_dss
chunk_alloc_mmap
chunk_boot
diff --git a/include/jemalloc/jemalloc_protos.h.in b/include/jemalloc/jemalloc_protos.h.in
index 59aeee11..8e945fa5 100644
--- a/include/jemalloc/jemalloc_protos.h.in
+++ b/include/jemalloc/jemalloc_protos.h.in
@@ -44,3 +44,6 @@ JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
+
+typedef void *(chunk_alloc_t)(size_t, size_t, bool *, unsigned);
+typedef bool (chunk_dealloc_t)(void *, size_t, unsigned);
diff --git a/src/arena.c b/src/arena.c
index d956be3e..6db2b630 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -570,8 +570,8 @@ arena_chunk_init_hard(arena_t *arena)
zero = false;
malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
- &zero, arena->dss_prec);
+ chunk = (arena_chunk_t *)chunk_alloc(arena, chunksize, chunksize,
+ false, &zero, arena->dss_prec);
malloc_mutex_lock(&arena->lock);
if (chunk == NULL)
return (NULL);
@@ -668,7 +668,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
arena->spare = chunk;
malloc_mutex_unlock(&arena->lock);
- chunk_dealloc((void *)spare, chunksize, true);
+ chunk_dealloc(arena, (void *)spare, chunksize, true);
malloc_mutex_lock(&arena->lock);
if (config_stats)
arena->stats.mapped -= chunksize;
@@ -2319,6 +2319,8 @@ arena_new(arena_t *arena, unsigned ind)
arena->ind = ind;
arena->nthreads = 0;
+ arena->chunk_alloc = chunk_alloc_default;
+ arena->chunk_dealloc = (chunk_dealloc_t *)chunk_unmap;
if (malloc_mutex_init(&arena->lock))
return (true);
diff --git a/src/base.c b/src/base.c
index 03dcf8f4..e8b312ef 100644
--- a/src/base.c
+++ b/src/base.c
@@ -32,7 +32,7 @@ base_pages_alloc(size_t minsize)
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
zero = false;
- base_pages = chunk_alloc(csize, chunksize, true, &zero,
+ base_pages = chunk_alloc(NULL, csize, chunksize, true, &zero,
chunk_dss_prec_get());
if (base_pages == NULL)
return (true);
diff --git a/src/chunk.c b/src/chunk.c
index 246324a2..8bb07229 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -104,7 +104,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
malloc_mutex_unlock(&chunks_mtx);
node = base_node_alloc();
if (node == NULL) {
- chunk_dealloc(ret, size, true);
+ chunk_dealloc(NULL, ret, size, true);
return (NULL);
}
malloc_mutex_lock(&chunks_mtx);
@@ -141,8 +141,8 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
* takes advantage of this to avoid demanding zeroed chunks, but taking
* advantage of them if they are returned.
*/
-void *
-chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
+static void *
+chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero,
dss_prec_t dss_prec)
{
void *ret;
@@ -156,32 +156,56 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
if (have_dss && dss_prec == dss_prec_primary) {
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
alignment, base, zero)) != NULL)
- goto label_return;
+ return (ret);
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
+ return (ret);
}
/* mmap. */
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
alignment, base, zero)) != NULL)
- goto label_return;
+ return (ret);
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
- goto label_return;
+ return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary) {
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
alignment, base, zero)) != NULL)
- goto label_return;
+ return (ret);
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
+ return (ret);
}
/* All strategies for allocation failed. */
- ret = NULL;
-label_return:
+ return (NULL);
+}
+
+/*
+ * Default arena chunk allocation routine in the absence of user-override.
+ */
+void *
+chunk_alloc_default(size_t size, size_t alignment, bool *zero,
+ unsigned arena_ind)
+{
+
+ return (chunk_alloc_core(size, alignment, false, zero,
+ arenas[arena_ind]->dss_prec));
+}
+
+void *
+chunk_alloc(arena_t *arena, size_t size, size_t alignment, bool base,
+ bool *zero, dss_prec_t dss_prec)
+{
+ void *ret;
+
+ if (arena)
+ ret = arena->chunk_alloc(size, alignment, zero, arena->ind);
+ else
+ ret = chunk_alloc_core(size, alignment, base, zero, dss_prec);
+
if (ret != NULL) {
if (config_ivsalloc && base == false) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
- chunk_dealloc(ret, size, true);
+ chunk_dealloc(arena, ret, size, true);
return (NULL);
}
}
@@ -312,7 +336,7 @@ chunk_unmap(void *chunk, size_t size)
}
void
-chunk_dealloc(void *chunk, size_t size, bool unmap)
+chunk_dealloc(arena_t *arena, void *chunk, size_t size, bool unmap)
{
assert(chunk != NULL);
@@ -329,8 +353,12 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
malloc_mutex_unlock(&chunks_mtx);
}
- if (unmap)
- chunk_unmap(chunk, size);
+ if (unmap) {
+ if (arena)
+ arena->chunk_dealloc(chunk, size, arena->ind);
+ else
+ chunk_unmap(chunk, size);
+ }
}
bool
diff --git a/src/ctl.c b/src/ctl.c
index 9ee5de9f..395c32a1 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -113,6 +113,8 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(arena_i_purge)
static void arena_purge(unsigned arena_ind);
CTL_PROTO(arena_i_dss)
+CTL_PROTO(arena_i_chunk_alloc)
+CTL_PROTO(arena_i_chunk_dealloc)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
@@ -251,9 +253,15 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_accum"), CTL(opt_prof_accum)}
};
+static const ctl_named_node_t chunk_node[] = {
+ {NAME("alloc"), CTL(arena_i_chunk_alloc)},
+ {NAME("dealloc"), CTL(arena_i_chunk_dealloc)}
+};
+
static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)},
- {NAME("dss"), CTL(arena_i_dss)}
+ {NAME("dss"), CTL(arena_i_dss)},
+ {NAME("chunk"), CHILD(named, chunk)},
};
static const ctl_named_node_t super_arena_i_node[] = {
{NAME(""), CHILD(named, arena_i)}
@@ -1368,6 +1376,57 @@ label_return:
return (ret);
}
+static int
+arena_i_chunk_alloc_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = mib[1];
+ arena_t *arena;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
+ malloc_mutex_lock(&arena->lock);
+ READ(arena->chunk_alloc, chunk_alloc_t *);
+ WRITE(arena->chunk_alloc, chunk_alloc_t *);
+ } else {
+ ret = EFAULT;
+ goto label_outer_return;
+ }
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&arena->lock);
+label_outer_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static int
+arena_i_chunk_dealloc_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+
+ int ret;
+ unsigned arena_ind = mib[1];
+ arena_t *arena;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
+ malloc_mutex_lock(&arena->lock);
+ READ(arena->chunk_dealloc, chunk_dealloc_t *);
+ WRITE(arena->chunk_dealloc, chunk_dealloc_t *);
+ } else {
+ ret = EFAULT;
+ goto label_outer_return;
+ }
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&arena->lock);
+label_outer_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
static const ctl_named_node_t *
arena_i_index(const size_t *mib, size_t miblen, size_t i)
{
diff --git a/src/huge.c b/src/huge.c
index e725fd90..ab05c905 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -16,14 +16,15 @@ malloc_mutex_t huge_mtx;
static extent_tree_t huge;
void *
-huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
+huge_malloc(arena_t *arena, size_t size, bool zero, dss_prec_t dss_prec)
{
- return (huge_palloc(size, chunksize, zero, dss_prec));
+ return (huge_palloc(arena, size, chunksize, zero, dss_prec));
}
void *
-huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
+huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero,
+ dss_prec_t dss_prec)
{
void *ret;
size_t csize;
@@ -48,7 +49,7 @@ huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
+ ret = chunk_alloc(arena, csize, alignment, false, &is_zeroed, dss_prec);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@@ -57,6 +58,7 @@ huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
/* Insert node into huge. */
node->addr = ret;
node->size = csize;
+ node->arena = arena;
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
@@ -96,8 +98,9 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
}
void *
-huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
+huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc,
+ dss_prec_t dss_prec)
{
void *ret;
size_t copysize;
@@ -112,18 +115,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* space and copying.
*/
if (alignment > chunksize)
- ret = huge_palloc(size + extra, alignment, zero, dss_prec);
+ ret = huge_palloc(arena, size + extra, alignment, zero, dss_prec);
else
- ret = huge_malloc(size + extra, zero, dss_prec);
+ ret = huge_malloc(arena, size + extra, zero, dss_prec);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
- ret = huge_palloc(size, alignment, zero, dss_prec);
+ ret = huge_palloc(arena, size, alignment, zero, dss_prec);
else
- ret = huge_malloc(size, zero, dss_prec);
+ ret = huge_malloc(arena, size, zero, dss_prec);
if (ret == NULL)
return (NULL);
@@ -238,7 +241,7 @@ huge_dalloc(void *ptr, bool unmap)
if (unmap)
huge_dalloc_junk(node->addr, node->size);
- chunk_dealloc(node->addr, node->size, unmap);
+ chunk_dealloc(node->arena, node->addr, node->size, unmap);
base_node_dealloc(node);
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 289d7f74..e0f9275f 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1983,7 +1983,7 @@ a0alloc(size_t size, bool zero)
if (size <= arena_maxclass)
return (arena_malloc(arenas[0], size, zero, false));
else
- return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
+ return (huge_malloc(NULL, size, zero, huge_dss_prec_get(arenas[0])));
}
void *
diff --git a/test/integration/chunk.c b/test/integration/chunk.c
new file mode 100644
index 00000000..13659894
--- /dev/null
+++ b/test/integration/chunk.c
@@ -0,0 +1,61 @@
+#include "test/jemalloc_test.h"
+
+chunk_alloc_t *old_alloc;
+chunk_dealloc_t *old_dealloc;
+
+bool
+chunk_dealloc(void *chunk, size_t size, unsigned arena_ind)
+{
+
+ return (old_dealloc(chunk, size, arena_ind));
+}
+
+void *
+chunk_alloc(size_t size, size_t alignment, bool *zero, unsigned arena_ind)
+{
+
+ return (old_alloc(size, alignment, zero, arena_ind));
+}
+
+TEST_BEGIN(test_chunk)
+{
+ void *p;
+ chunk_alloc_t *new_alloc;
+ chunk_dealloc_t *new_dealloc;
+ size_t old_size, new_size;
+
+ new_alloc = chunk_alloc;
+ new_dealloc = chunk_dealloc;
+ old_size = sizeof(chunk_alloc_t *);
+ new_size = sizeof(chunk_alloc_t *);
+
+ assert_d_eq(mallctl("arena.0.chunk.alloc", &old_alloc,
+ &old_size, &new_alloc, new_size), 0,
+ "Unexpected alloc error");
+ assert_ptr_ne(old_alloc, new_alloc,
+ "Unexpected alloc error");
+ assert_d_eq(mallctl("arena.0.chunk.dealloc", &old_dealloc,
+ &old_size, &new_dealloc, new_size), 0,
+ "Unexpected dealloc error");
+ assert_ptr_ne(old_dealloc, new_dealloc,
+ "Unexpected dealloc error");
+
+ p = mallocx(42, 0);
+ assert_ptr_ne(p, NULL, "Unexpected alloc error");
+ free(p);
+
+ assert_d_eq(mallctl("arena.0.chunk.alloc", NULL,
+ NULL, &old_alloc, old_size), 0,
+ "Unexpected alloc error");
+ assert_d_eq(mallctl("arena.0.chunk.dealloc", NULL,
+ NULL, &old_dealloc, old_size), 0,
+ "Unexpected dealloc error");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(test_chunk));
+}