diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index 185f955a..74daf6a8 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -2087,12 +2087,12 @@ typedef struct {
r-
[]
- Total number of bytes in active chunks mapped by the
- allocator. This is a multiple of the chunk size, and is larger than
- stats.active.
- This does not include inactive chunks, even those that contain unused
- dirty pages, which means that there is no strict ordering between this
- and Total number of bytes in active extents mapped by the
+ allocator. This is larger than stats.active. This
+ does not include inactive extents, even those that contain unused dirty
+ pages, which means that there is no strict ordering between this and
+ stats.resident.
@@ -2737,9 +2737,10 @@ MAPPED_LIBRARIES:
To dump core whenever a problem occurs:
ln -s 'abort:true' /etc/malloc.conf
- To specify in the source a chunk size that is 16 MiB:
+ To specify in the source that only one arena should be automatically
+ created:
+malloc_conf = "narenas:1";]]>SEE ALSO
diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in
index 6721bc85..49e2cf06 100644
--- a/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -137,7 +137,7 @@
#undef JEMALLOC_TCACHE
/*
- * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
@@ -176,7 +176,7 @@
#undef JEMALLOC_MAPS_COALESCE
/*
- * If defined, use munmap() to unmap freed chunks, rather than storing them for
+ * If defined, use munmap() to unmap freed extents, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h
index e62ab6b9..af52f9ff 100644
--- a/include/jemalloc/internal/rtree.h
+++ b/include/jemalloc/internal/rtree.h
@@ -1,6 +1,6 @@
/*
* This radix tree implementation is tailored to the singular purpose of
- * associating metadata with chunks that are currently owned by jemalloc.
+ * associating metadata with extents that are currently owned by jemalloc.
*
*******************************************************************************
*/
diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h
index d5eea8e7..da019605 100644
--- a/include/jemalloc/internal/stats.h
+++ b/include/jemalloc/internal/stats.h
@@ -5,7 +5,6 @@ typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct arena_stats_s arena_stats_t;
-typedef struct chunk_stats_s chunk_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@@ -76,7 +75,7 @@ struct malloc_large_stats_s {
*/
uint64_t nrequests;
- /* Current number of (multi-)chunk allocations of this size class. */
+ /* Current number of allocations of this size class. */
size_t curlextents;
};
diff --git a/src/zone.c b/src/zone.c
index 4609503a..ca235da4 100644
--- a/src/zone.c
+++ b/src/zone.c
@@ -54,7 +54,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
* our zone into two parts, and use one as the default allocator and
* the other as the default deallocator/reallocator. Since that will
* not work in practice, we must check all pointers to assure that they
- * reside within a mapped chunk before determining size.
+ * reside within a mapped extent before determining size.
*/
return (ivsalloc(tsdn_fetch(), ptr));
}
diff --git a/test/integration/aligned_alloc.c b/test/integration/aligned_alloc.c
index 60900148..ec2f5a7b 100644
--- a/test/integration/aligned_alloc.c
+++ b/test/integration/aligned_alloc.c
@@ -1,6 +1,5 @@
#include "test/jemalloc_test.h"
-#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
#define MAXALIGN ((size_t)0x2000000LU)
#define NITER 4
diff --git a/test/integration/posix_memalign.c b/test/integration/posix_memalign.c
index 19741c6c..d5e39b63 100644
--- a/test/integration/posix_memalign.c
+++ b/test/integration/posix_memalign.c
@@ -1,6 +1,5 @@
#include "test/jemalloc_test.h"
-#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
#define MAXALIGN ((size_t)0x2000000LU)
#define NITER 4
diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c
index 8eb5a60c..1954bfc5 100644
--- a/test/unit/mallctl.c
+++ b/test/unit/mallctl.c
@@ -625,7 +625,7 @@ TEST_END
TEST_BEGIN(test_arenas_lextent_constants)
{
-#define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \
+#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("arenas.lextent.0."#name, &name, &sz, NULL, \
@@ -633,9 +633,9 @@ TEST_BEGIN(test_arenas_lextent_constants)
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
- TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, LARGE_MINCLASS);
+ TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, LARGE_MINCLASS);
-#undef TEST_ARENAS_HCHUNK_CONSTANT
+#undef TEST_ARENAS_LEXTENT_CONSTANT
}
TEST_END