Port to FreeBSD.

Use FreeBSD-specific functions (_pthread_mutex_init_calloc_cb(),
_malloc_{pre,post}fork()) to avoid bootstrapping issues due to
allocation in libc and libthr.

Add malloc_strtoumax() and use it instead of strtoul().  Disable
validation code in malloc_vsnprintf() and malloc_strtoumax() until
jemalloc is initialized.  This is necessary because locale
initialization causes allocation for both vsnprintf() and strtoumax().

Force the lazy-lock feature on in order to avoid pthread_self(),
because it causes allocation.

Use syscall(SYS_write, ...) rather than write(...), because libthr wraps
write() and causes allocation.  Without this workaround, it would not be
possible to print error messages in malloc_conf_init() without
substantially reworking bootstrapping.

Fix choose_arena_hard() to look at how many threads are assigned to the
candidate choice, rather than checking whether the arena is
uninitialized.  This bug potentially caused more arenas to be
initialized than necessary.
This commit is contained in:
Jason Evans 2012-02-02 22:04:57 -08:00
parent 6da5418ded
commit 41b6afb834
13 changed files with 237 additions and 47 deletions

10
README
View File

@ -1,10 +1,10 @@
jemalloc is a general-purpose scalable concurrent malloc(3) implementation. jemalloc is a general-purpose scalable concurrent malloc(3) implementation.
This distribution is a stand-alone "portable" implementation that currently This distribution is a stand-alone "portable" implementation that currently
targets Linux and Apple OS X. jemalloc is included as the default allocator in targets FreeBSD, Linux and Apple OS X. jemalloc is included as the default
the FreeBSD and NetBSD operating systems, and it is used by the Mozilla Firefox allocator in the FreeBSD and NetBSD operating systems, and it is used by the
web browser on Microsoft Windows-related platforms. Depending on your needs, Mozilla Firefox web browser on Microsoft Windows-related platforms. Depending
one of the other divergent versions may suit your needs better than this on your needs, one of the other divergent versions may suit your needs better
distribution. than this distribution.
The COPYING file contains copyright and licensing information. The COPYING file contains copyright and licensing information.

View File

@ -777,6 +777,17 @@ if test "x$have__malloc_thread_cleanup" = "x1" ; then
force_tls="1" force_tls="1"
fi fi
dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If
dnl so, mutex initialization causes allocation, and we need to implement this
dnl callback function in order to prevent recursive allocation.
AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb],
[have__pthread_mutex_init_calloc_cb="1"],
[have__pthread_mutex_init_calloc_cb="0"]
)
if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then
AC_DEFINE([JEMALLOC_MUTEX_INIT_CB])
fi
dnl Disable lazy locking by default. dnl Disable lazy locking by default.
AC_ARG_ENABLE([lazy_lock], AC_ARG_ENABLE([lazy_lock],
[AS_HELP_STRING([--enable-lazy-lock], [AS_HELP_STRING([--enable-lazy-lock],

View File

@ -10,6 +10,7 @@
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
void *base_alloc(size_t size); void *base_alloc(size_t size);
void *base_calloc(size_t number, size_t size);
extent_node_t *base_node_alloc(void); extent_node_t *base_node_alloc(void);
void base_node_dealloc(extent_node_t *node); void base_node_dealloc(extent_node_t *node);
bool base_boot(void); bool base_boot(void);

View File

@ -1,5 +1,6 @@
#include <sys/mman.h> #include <sys/mman.h>
#include <sys/param.h> #include <sys/param.h>
#include <sys/syscall.h>
#include <sys/time.h> #include <sys/time.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/uio.h> #include <sys/uio.h>
@ -370,6 +371,8 @@ extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
extern arena_t **arenas; extern arena_t **arenas;
extern unsigned narenas; extern unsigned narenas;
extern bool malloc_initialized;
arena_t *arenas_extend(unsigned ind); arena_t *arenas_extend(unsigned ind);
void arenas_cleanup(void *arg); void arenas_cleanup(void *arg);
arena_t *choose_arena_hard(void); arena_t *choose_arena_hard(void);

View File

@ -6,9 +6,12 @@ typedef OSSpinLock malloc_mutex_t;
#define MALLOC_MUTEX_INITIALIZER 0 #define MALLOC_MUTEX_INITIALIZER 0
#else #else
typedef pthread_mutex_t malloc_mutex_t; typedef pthread_mutex_t malloc_mutex_t;
# ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP # if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP # define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
# else # else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER # define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
# endif # endif
#endif #endif

View File

@ -71,7 +71,7 @@ a_name##_tsd_set(a_type *val);
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_externs(a_name, a_type) \ #define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##_tls; \ extern __thread a_type a_name##_tls; \
extern __thread bool *a_name##_initialized; \ extern __thread bool a_name##_initialized; \
extern bool a_name##_booted; extern bool a_name##_booted;
#elif (defined(JEMALLOC_TLS)) #elif (defined(JEMALLOC_TLS))
#define malloc_tsd_externs(a_name, a_type) \ #define malloc_tsd_externs(a_name, a_type) \

View File

@ -85,6 +85,7 @@
extern void (*je_malloc_message)(void *wcbopaque, const char *s); extern void (*je_malloc_message)(void *wcbopaque, const char *s);
int buferror(int errnum, char *buf, size_t buflen); int buferror(int errnum, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base);
/* /*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating

View File

@ -68,6 +68,20 @@
*/ */
#undef JEMALLOC_MALLOC_THREAD_CLEANUP #undef JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#undef JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Defined if __attribute__((...)) syntax is supported. */ /* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR #undef JEMALLOC_HAVE_ATTR
#ifdef JEMALLOC_HAVE_ATTR #ifdef JEMALLOC_HAVE_ATTR

View File

@ -66,6 +66,17 @@ base_alloc(size_t size)
return (ret); return (ret);
} }
void *
base_calloc(size_t number, size_t size)
{
void *ret = base_alloc(number * size);
if (ret != NULL)
memset(ret, 0, number * size);
return (ret);
}
extent_node_t * extent_node_t *
base_node_alloc(void) base_node_alloc(void)
{ {

View File

@ -615,19 +615,19 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
goto RETURN; goto RETURN;
} }
} else { } else {
unsigned long index; uintmax_t index;
const ctl_node_t *inode; const ctl_node_t *inode;
/* Children are indexed. */ /* Children are indexed. */
index = strtoul(elm, NULL, 10); index = malloc_strtoumax(elm, NULL, 10);
if (index == ULONG_MAX) { if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
ret = ENOENT; ret = ENOENT;
goto RETURN; goto RETURN;
} }
inode = &node->u.named.children[0]; inode = &node->u.named.children[0];
node = inode->u.indexed.index(mibp, *depthp, node = inode->u.indexed.index(mibp, *depthp,
index); (size_t)index);
if (node == NULL) { if (node == NULL) {
ret = ENOENT; ret = ENOENT;
goto RETURN; goto RETURN;

View File

@ -38,10 +38,18 @@ arena_t **arenas;
unsigned narenas; unsigned narenas;
/* Set to true once the allocator has been initialized. */ /* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false; bool malloc_initialized = false;
#ifdef JEMALLOC_THREADED_INIT
/* Used to let the initializing thread recursively allocate. */ /* Used to let the initializing thread recursively allocate. */
static pthread_t malloc_initializer = (unsigned long)0; static pthread_t malloc_initializer = (unsigned long)0;
# define INITIALIZER pthread_self()
# define IS_INITIALIZER (malloc_initializer == pthread_self())
#else
static bool malloc_initializer = false;
# define INITIALIZER true
# define IS_INITIALIZER malloc_initializer
#endif
/* Used to avoid initialization races. */ /* Used to avoid initialization races. */
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
@ -127,7 +135,7 @@ choose_arena_hard(void)
} }
} }
if (arenas[choose] == 0 || first_null == narenas) { if (arenas[choose]->nthreads == 0 || first_null == narenas) {
/* /*
* Use an unloaded arena, or the least loaded arena if * Use an unloaded arena, or the least loaded arena if
* all arenas are already initialized. * all arenas are already initialized.
@ -413,22 +421,22 @@ malloc_conf_init(void)
#define CONF_HANDLE_SIZE_T(o, n, min, max) \ #define CONF_HANDLE_SIZE_T(o, n, min, max) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \ if (sizeof(#n)-1 == klen && strncmp(#n, k, \
klen) == 0) { \ klen) == 0) { \
unsigned long ul; \ uintmax_t um; \
char *end; \ char *end; \
\ \
errno = 0; \ errno = 0; \
ul = strtoul(v, &end, 0); \ um = malloc_strtoumax(v, &end, 0); \
if (errno != 0 || (uintptr_t)end - \ if (errno != 0 || (uintptr_t)end - \
(uintptr_t)v != vlen) { \ (uintptr_t)v != vlen) { \
malloc_conf_error( \ malloc_conf_error( \
"Invalid conf value", \ "Invalid conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} else if (ul < min || ul > max) { \ } else if (um < min || um > max) { \
malloc_conf_error( \ malloc_conf_error( \
"Out-of-range conf value", \ "Out-of-range conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} else \ } else \
o = ul; \ o = um; \
continue; \ continue; \
} }
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
@ -519,7 +527,7 @@ malloc_init_hard(void)
arena_t *init_arenas[1]; arena_t *init_arenas[1];
malloc_mutex_lock(&init_lock); malloc_mutex_lock(&init_lock);
if (malloc_initialized || malloc_initializer == pthread_self()) { if (malloc_initialized || IS_INITIALIZER) {
/* /*
* Another thread initialized the allocator before this one * Another thread initialized the allocator before this one
* acquired init_lock, or this thread is the initializing * acquired init_lock, or this thread is the initializing
@ -528,7 +536,8 @@ malloc_init_hard(void)
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
return (false); return (false);
} }
if (malloc_initializer != (unsigned long)0) { #ifdef JEMALLOC_THREADED_INIT
if (IS_INITIALIZER == false) {
/* Busy-wait until the initializing thread completes. */ /* Busy-wait until the initializing thread completes. */
do { do {
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
@ -538,6 +547,8 @@ malloc_init_hard(void)
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
return (false); return (false);
} }
#endif
malloc_initializer = INITIALIZER;
#ifdef DYNAMIC_PAGE_SHIFT #ifdef DYNAMIC_PAGE_SHIFT
/* Get page size. */ /* Get page size. */
@ -564,6 +575,7 @@ malloc_init_hard(void)
malloc_conf_init(); malloc_conf_init();
#ifndef JEMALLOC_MUTEX_INIT_CB
/* Register fork handlers. */ /* Register fork handlers. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) { jemalloc_postfork_child) != 0) {
@ -571,11 +583,7 @@ malloc_init_hard(void)
if (opt_abort) if (opt_abort)
abort(); abort();
} }
#endif
if (ctl_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (opt_stats_print) { if (opt_stats_print) {
/* Print statistics at exit. */ /* Print statistics at exit. */
@ -596,6 +604,11 @@ malloc_init_hard(void)
return (true); return (true);
} }
if (ctl_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (config_prof) if (config_prof)
prof_boot1(); prof_boot1();
@ -654,7 +667,6 @@ malloc_init_hard(void)
} }
/* Get number of CPUs. */ /* Get number of CPUs. */
malloc_initializer = pthread_self();
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
ncpus = malloc_ncpus(); ncpus = malloc_ncpus();
malloc_mutex_lock(&init_lock); malloc_mutex_lock(&init_lock);
@ -1018,8 +1030,7 @@ je_realloc(void *ptr, size_t size)
} }
if (ptr != NULL) { if (ptr != NULL) {
assert(malloc_initialized || malloc_initializer == assert(malloc_initialized || IS_INITIALIZER);
pthread_self());
if (config_prof || config_stats) if (config_prof || config_stats)
old_size = isalloc(ptr); old_size = isalloc(ptr);
@ -1124,8 +1135,7 @@ je_free(void *ptr)
if (ptr != NULL) { if (ptr != NULL) {
size_t usize; size_t usize;
assert(malloc_initialized || malloc_initializer == assert(malloc_initialized || IS_INITIALIZER);
pthread_self());
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = isalloc(ptr); usize = isalloc(ptr);
@ -1208,7 +1218,7 @@ je_malloc_usable_size(const void *ptr)
{ {
size_t ret; size_t ret;
assert(malloc_initialized || malloc_initializer == pthread_self()); assert(malloc_initialized || IS_INITIALIZER);
if (config_ivsalloc) if (config_ivsalloc)
ret = ivsalloc(ptr); ret = ivsalloc(ptr);
@ -1372,7 +1382,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
assert(*ptr != NULL); assert(*ptr != NULL);
assert(size != 0); assert(size != 0);
assert(SIZE_T_MAX - size >= extra); assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || malloc_initializer == pthread_self()); assert(malloc_initialized || IS_INITIALIZER);
p = *ptr; p = *ptr;
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
@ -1457,7 +1467,7 @@ je_sallocm(const void *ptr, size_t *rsize, int flags)
{ {
size_t sz; size_t sz;
assert(malloc_initialized || malloc_initializer == pthread_self()); assert(malloc_initialized || IS_INITIALIZER);
if (config_ivsalloc) if (config_ivsalloc)
sz = ivsalloc(ptr); sz = ivsalloc(ptr);
@ -1479,7 +1489,7 @@ je_dallocm(void *ptr, int flags)
size_t usize; size_t usize;
assert(ptr != NULL); assert(ptr != NULL);
assert(malloc_initialized || malloc_initializer == pthread_self()); assert(malloc_initialized || IS_INITIALIZER);
if (config_stats) if (config_stats)
usize = isalloc(ptr); usize = isalloc(ptr);
@ -1528,8 +1538,13 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* malloc during fork(). * malloc during fork().
*/ */
#ifndef JEMALLOC_MUTEX_INIT_CB
void void
jemalloc_prefork(void) jemalloc_prefork(void)
#else
void
_malloc_prefork(void)
#endif
{ {
unsigned i; unsigned i;
@ -1544,8 +1559,13 @@ jemalloc_prefork(void)
chunk_dss_prefork(); chunk_dss_prefork();
} }
#ifndef JEMALLOC_MUTEX_INIT_CB
void void
jemalloc_postfork_parent(void) jemalloc_postfork_parent(void)
#else
void
_malloc_postfork(void)
#endif
{ {
unsigned i; unsigned i;

View File

@ -56,21 +56,25 @@ pthread_create(pthread_t *__restrict thread,
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
#endif
bool bool
malloc_mutex_init(malloc_mutex_t *mutex) malloc_mutex_init(malloc_mutex_t *mutex)
{ {
#ifdef JEMALLOC_OSSPIN #ifdef JEMALLOC_OSSPIN
*mutex = 0; *mutex = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
if (_pthread_mutex_init_calloc_cb(mutex, base_calloc) != 0)
return (true);
#else #else
pthread_mutexattr_t attr; pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0) if (pthread_mutexattr_init(&attr) != 0)
return (true); return (true);
#ifdef PTHREAD_MUTEX_ADAPTIVE_NP pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
#else
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
#endif
if (pthread_mutex_init(mutex, &attr) != 0) { if (pthread_mutex_init(mutex, &attr) != 0) {
pthread_mutexattr_destroy(&attr); pthread_mutexattr_destroy(&attr);
return (true); return (true);
@ -99,10 +103,14 @@ void
malloc_mutex_postfork_child(malloc_mutex_t *mutex) malloc_mutex_postfork_child(malloc_mutex_t *mutex)
{ {
#ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock(mutex);
#else
if (malloc_mutex_init(mutex)) { if (malloc_mutex_init(mutex)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in " malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n"); "child\n");
if (opt_abort) if (opt_abort)
abort(); abort();
} }
#endif
} }

View File

@ -44,7 +44,7 @@ JEMALLOC_CATTR(visibility("hidden"), static)
void void
wrtmessage(void *cbopaque, const char *s) wrtmessage(void *cbopaque, const char *s)
{ {
UNUSED int result = write(STDERR_FILENO, s, strlen(s)); UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
} }
void (*je_malloc_message)(void *, const char *s) void (*je_malloc_message)(void *, const char *s)
@ -69,6 +69,123 @@ buferror(int errnum, char *buf, size_t buflen)
#endif #endif
} }
uintmax_t
malloc_strtoumax(const char *nptr, char **endptr, int base)
{
uintmax_t ret, digit;
int b;
bool neg;
const char *p, *ns;
if (base < 0 || base == 1 || base > 36) {
errno = EINVAL;
return (UINTMAX_MAX);
}
b = base;
/* Swallow leading whitespace and get sign, if any. */
neg = false;
p = nptr;
while (true) {
switch (*p) {
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
p++;
break;
case '-':
neg = true;
/* Fall through. */
case '+':
p++;
/* Fall through. */
default:
goto PREFIX;
}
}
/* Get prefix, if any. */
PREFIX:
/*
* Note where the first non-whitespace/sign character is so that it is
* possible to tell whether any digits are consumed (e.g., " 0" vs.
* " -x").
*/
ns = p;
if (*p == '0') {
switch (p[1]) {
case '0': case '1': case '2': case '3': case '4': case '5':
case '6': case '7':
if (b == 0)
b = 8;
if (b == 8)
p++;
break;
case 'x':
switch (p[2]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F':
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
if (b == 0)
b = 16;
if (b == 16)
p += 2;
break;
default:
break;
}
break;
default:
break;
}
}
if (b == 0)
b = 10;
/* Convert. */
ret = 0;
while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
|| (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
|| (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
uintmax_t pret = ret;
ret *= b;
ret += digit;
if (ret < pret) {
/* Overflow. */
errno = ERANGE;
return (UINTMAX_MAX);
}
p++;
}
if (neg)
ret = -ret;
if (endptr != NULL) {
if (p == ns) {
/* No characters were converted. */
*endptr = (char *)nptr;
} else
*endptr = (char *)p;
}
if (config_debug && malloc_initialized) {
uintmax_t tret;
int perrno;
char *pend;
perrno = errno;
if (endptr != NULL)
pend = *endptr;
tret = strtoumax(nptr, endptr, base);
assert(tret == ret);
assert(errno == perrno);
assert(endptr == NULL || *endptr == pend);
}
return (ret);
}
static char * static char *
u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
{ {
@ -220,7 +337,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
val = va_arg(ap, ptrdiff_t); \ val = va_arg(ap, ptrdiff_t); \
break; \ break; \
case 'z': \ case 'z': \
val = va_arg(ap, size_t); \ val = va_arg(ap, ssize_t); \
break; \ break; \
case 'p': /* Synthetic; used for %p. */ \ case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \ val = va_arg(ap, uintptr_t); \
@ -289,10 +406,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
break; break;
case '0': case '1': case '2': case '3': case '4': case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': { case '5': case '6': case '7': case '8': case '9': {
unsigned long uwidth; uintmax_t uwidth;
errno = 0; errno = 0;
uwidth = strtoul(f, (char **)&f, 10); uwidth = malloc_strtoumax(f, (char **)&f, 10);
assert(uwidth != ULONG_MAX || errno != ERANGE); assert(uwidth != UINTMAX_MAX || errno !=
ERANGE);
width = (int)uwidth; width = (int)uwidth;
if (*f == '.') { if (*f == '.') {
f++; f++;
@ -314,10 +432,10 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
break; break;
case '0': case '1': case '2': case '3': case '4': case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': { case '5': case '6': case '7': case '8': case '9': {
unsigned long uprec; uintmax_t uprec;
errno = 0; errno = 0;
uprec = strtoul(f, (char **)&f, 10); uprec = malloc_strtoumax(f, (char **)&f, 10);
assert(uprec != ULONG_MAX || errno != ERANGE); assert(uprec != UINTMAX_MAX || errno != ERANGE);
prec = (int)uprec; prec = (int)uprec;
break; break;
} }
@ -435,7 +553,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
str[size - 1] = '\0'; str[size - 1] = '\0';
ret = i; ret = i;
if (config_debug) { if (config_debug && malloc_initialized) {
char buf[MALLOC_PRINTF_BUFSIZE]; char buf[MALLOC_PRINTF_BUFSIZE];
int tret; int tret;