Remove opt_dss and opt_mmap, such that if DSS allocation is enabled, both DSS

and heap allocation are always enabled.
This commit is contained in:
Jason Evans 2009-12-29 00:09:15 -08:00
parent 5463a5298d
commit 6d7bb5357a
2 changed files with 20 additions and 92 deletions

View File

@ -193,14 +193,6 @@ Double/halve the size of the maximum size class that is a multiple of the
cacheline size (64). cacheline size (64).
Above this size, subpage spacing (256 bytes) is used for size classes. Above this size, subpage spacing (256 bytes) is used for size classes.
The default value is 512 bytes. The default value is 512 bytes.
@roff_dss@.It D
@roff_dss@Use
@roff_dss@.Xr sbrk 2
@roff_dss@to acquire memory in the data storage segment (DSS).
@roff_dss@This option is enabled by default.
@roff_dss@See the
@roff_dss@.Dq M
@roff_dss@option for related information and interactions.
.It F .It F
Double/halve the per-arena maximum number of dirty unused pages that are Double/halve the per-arena maximum number of dirty unused pages that are
allowed to accumulate before informing the kernel about at least half of those allowed to accumulate before informing the kernel about at least half of those
@ -237,22 +229,6 @@ will prevent any dirty unused pages from accumulating.
.It K .It K
Double/halve the virtual memory chunk size. Double/halve the virtual memory chunk size.
The default chunk size is 1 MB. The default chunk size is 1 MB.
@roff_dss@.It M
@roff_dss@Use
@roff_dss@.Xr mmap 2
@roff_dss@to acquire anonymously mapped memory.
@roff_dss@This option is enabled by default.
@roff_dss@If both the
@roff_dss@.Dq D
@roff_dss@and
@roff_dss@.Dq M
@roff_dss@options are enabled, the allocator prefers the DSS over anonymous
@roff_dss@mappings, but allocation only fails if memory cannot be acquired via
@roff_dss@either method.
@roff_dss@If neither option is enabled, then the
@roff_dss@.Dq M
@roff_dss@option is implicitly enabled in order to assure that there is a method
@roff_dss@for acquiring memory.
.It N .It N
Double/halve the number of arenas. Double/halve the number of arenas.
The default number of arenas is two times the number of CPUs, or one if there The default number of arenas is two times the number of CPUs, or one if there
@ -342,9 +318,8 @@ The default value is 128 bytes.
@roff_dss@This allocator uses both @roff_dss@This allocator uses both
@roff_dss@.Xr sbrk 2 @roff_dss@.Xr sbrk 2
@roff_dss@and @roff_dss@and
@roff_dss@.Xr mmap 2 @roff_dss@.Xr mmap 2 ,
@roff_dss@by default, but it can be configured at run time to use only one or @roff_dss@in that order of preference.
@roff_dss@the other.
.Pp .Pp
This allocator uses multiple arenas in order to reduce lock contention for This allocator uses multiple arenas in order to reduce lock contention for
threaded programs on multi-processor systems. threaded programs on multi-processor systems.

View File

@ -1020,10 +1020,6 @@ static bool opt_abort = false;
static bool opt_junk = false; static bool opt_junk = false;
# endif # endif
#endif #endif
#ifdef JEMALLOC_DSS
static bool opt_dss = true;
static bool opt_mmap = true;
#endif
#ifdef JEMALLOC_MAG #ifdef JEMALLOC_MAG
static bool opt_mag = true; static bool opt_mag = true;
static size_t opt_mag_size_2pow = MAG_SIZE_2POW_DEFAULT; static size_t opt_mag_size_2pow = MAG_SIZE_2POW_DEFAULT;
@ -1471,12 +1467,10 @@ base_pages_alloc(size_t minsize)
{ {
#ifdef JEMALLOC_DSS #ifdef JEMALLOC_DSS
if (opt_dss) {
if (base_pages_alloc_dss(minsize) == false) if (base_pages_alloc_dss(minsize) == false)
return (false); return (false);
}
if (opt_mmap && minsize != 0) if (minsize != 0)
#endif #endif
{ {
if (base_pages_alloc_mmap(minsize) == false) if (base_pages_alloc_mmap(minsize) == false)
@ -1943,7 +1937,6 @@ chunk_alloc(size_t size, bool zero)
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
#ifdef JEMALLOC_DSS #ifdef JEMALLOC_DSS
if (opt_dss) {
ret = chunk_recycle_dss(size, zero); ret = chunk_recycle_dss(size, zero);
if (ret != NULL) { if (ret != NULL) {
goto RETURN; goto RETURN;
@ -1952,15 +1945,11 @@ chunk_alloc(size_t size, bool zero)
ret = chunk_alloc_dss(size); ret = chunk_alloc_dss(size);
if (ret != NULL) if (ret != NULL)
goto RETURN; goto RETURN;
}
if (opt_mmap)
#endif #endif
{
ret = chunk_alloc_mmap(size); ret = chunk_alloc_mmap(size);
if (ret != NULL) if (ret != NULL)
goto RETURN; goto RETURN;
}
/* All strategies for allocation failed. */ /* All strategies for allocation failed. */
ret = NULL; ret = NULL;
@ -2108,12 +2097,9 @@ chunk_dealloc(void *chunk, size_t size)
#endif #endif
#ifdef JEMALLOC_DSS #ifdef JEMALLOC_DSS
if (opt_dss) {
if (chunk_dealloc_dss(chunk, size) == false) if (chunk_dealloc_dss(chunk, size) == false)
return; return;
}
if (opt_mmap)
#endif #endif
chunk_dealloc_mmap(chunk, size); chunk_dealloc_mmap(chunk, size);
} }
@ -4455,7 +4441,7 @@ huge_dalloc(void *ptr)
/* Unmap chunk. */ /* Unmap chunk. */
#ifdef JEMALLOC_FILL #ifdef JEMALLOC_FILL
#ifdef JEMALLOC_DSS #ifdef JEMALLOC_DSS
if (opt_dss && opt_junk) if (opt_junk)
memset(node->addr, 0x5a, node->size); memset(node->addr, 0x5a, node->size);
#endif #endif
#endif #endif
@ -4696,17 +4682,11 @@ malloc_print_stats(void)
"\n", ""); "\n", "");
malloc_message("Boolean JEMALLOC_OPTIONS: ", malloc_message("Boolean JEMALLOC_OPTIONS: ",
opt_abort ? "A" : "a", "", ""); opt_abort ? "A" : "a", "", "");
#ifdef JEMALLOC_DSS
malloc_message(opt_dss ? "D" : "d", "", "", "");
#endif
#ifdef JEMALLOC_MAG #ifdef JEMALLOC_MAG
malloc_message(opt_mag ? "G" : "g", "", "", ""); malloc_message(opt_mag ? "G" : "g", "", "", "");
#endif #endif
#ifdef JEMALLOC_FILL #ifdef JEMALLOC_FILL
malloc_message(opt_junk ? "J" : "j", "", "", ""); malloc_message(opt_junk ? "J" : "j", "", "", "");
#endif
#ifdef JEMALLOC_DSS
malloc_message(opt_mmap ? "M" : "m", "", "", "");
#endif #endif
malloc_message("P", "", "", ""); malloc_message("P", "", "", "");
#ifdef JEMALLOC_TRACE #ifdef JEMALLOC_TRACE
@ -5110,16 +5090,6 @@ MALLOC_OUT:
- 1) - 1)
opt_cspace_max_2pow++; opt_cspace_max_2pow++;
break; break;
case 'd':
#ifdef JEMALLOC_DSS
opt_dss = false;
#endif
break;
case 'D':
#ifdef JEMALLOC_DSS
opt_dss = true;
#endif
break;
case 'f': case 'f':
opt_dirty_max >>= 1; opt_dirty_max >>= 1;
break; break;
@ -5159,16 +5129,6 @@ MALLOC_OUT:
(sizeof(size_t) << 3)) (sizeof(size_t) << 3))
opt_chunk_2pow++; opt_chunk_2pow++;
break; break;
case 'm':
#ifdef JEMALLOC_DSS
opt_mmap = false;
#endif
break;
case 'M':
#ifdef JEMALLOC_DSS
opt_mmap = true;
#endif
break;
case 'n': case 'n':
opt_narenas_lshift--; opt_narenas_lshift--;
break; break;
@ -5253,12 +5213,6 @@ MALLOC_OUT:
} }
} }
#ifdef JEMALLOC_DSS
/* Make sure that there is some method for acquiring memory. */
if (opt_dss == false && opt_mmap == false)
opt_mmap = true;
#endif
#ifdef JEMALLOC_TRACE #ifdef JEMALLOC_TRACE
if (opt_trace) { if (opt_trace) {
malloc_mutex_init(&trace_mtx); malloc_mutex_init(&trace_mtx);
@ -5379,7 +5333,6 @@ MALLOC_OUT:
* chunk-aligned. Doing this before allocating any other chunks allows * chunk-aligned. Doing this before allocating any other chunks allows
* the use of space that would otherwise be wasted. * the use of space that would otherwise be wasted.
*/ */
if (opt_dss)
base_pages_alloc(0); base_pages_alloc(0);
#endif #endif
base_nodes = NULL; base_nodes = NULL;