diff --git a/.gitignore b/.gitignore index 6607a5fd..4c408ec2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,25 +1,72 @@ +/*.gcov.* + /autom4te.cache/ + +/bin/jemalloc.sh + /config.stamp /config.log /config.status /configure + /doc/html.xsl /doc/manpages.xsl /doc/jemalloc.xml /doc/jemalloc.html /doc/jemalloc.3 + /lib/ + /Makefile -/include/jemalloc/internal/jemalloc_internal\.h -/include/jemalloc/internal/size_classes\.h -/include/jemalloc/jemalloc\.h -/include/jemalloc/jemalloc_defs\.h -/test/jemalloc_test\.h + +/include/jemalloc/internal/jemalloc_internal.h +/include/jemalloc/internal/jemalloc_internal_defs.h +/include/jemalloc/internal/private_namespace.h +/include/jemalloc/internal/private_unnamespace.h +/include/jemalloc/internal/public_namespace.h +/include/jemalloc/internal/public_symbols.txt +/include/jemalloc/internal/public_unnamespace.h +/include/jemalloc/internal/size_classes.h +/include/jemalloc/jemalloc.h +/include/jemalloc/jemalloc_defs.h +/include/jemalloc/jemalloc_macros.h +/include/jemalloc/jemalloc_mangle.h +/include/jemalloc/jemalloc_mangle_jet.h +/include/jemalloc/jemalloc_protos.h +/include/jemalloc/jemalloc_protos_jet.h +/include/jemalloc/jemalloc_rename.h + /src/*.[od] -/test/*.[od] -/test/*.out -/test/[a-zA-Z_]* -!test/*.c -!test/*.exp +/src/*.gcda +/src/*.gcno + +/test/test.sh +test/include/test/jemalloc_test.h +test/include/test/jemalloc_test_defs.h + +/test/integration/[A-Za-z]* +!/test/integration/[A-Za-z]*.* +/test/integration/*.[od] +/test/integration/*.gcda +/test/integration/*.gcno +/test/integration/*.out + +/test/src/*.[od] +/test/src/*.gcda +/test/src/*.gcno + +/test/stress/[A-Za-z]* +!/test/stress/[A-Za-z]*.* +/test/stress/*.[od] +/test/stress/*.gcda +/test/stress/*.gcno +/test/stress/*.out + +/test/unit/[A-Za-z]* +!/test/unit/[A-Za-z]*.* +/test/unit/*.[od] +/test/unit/*.gcda +/test/unit/*.gcno +/test/unit/*.out + /VERSION -/bin/jemalloc.sh diff --git a/COPYING b/COPYING index 019e8132..bdda0feb 100644 --- a/COPYING +++ b/COPYING @@ -1,10 +1,10 @@ Unless otherwise specified, files in the jemalloc source distribution are subject to the following license: -------------------------------------------------------------------------------- -Copyright (C) 2002-2013 Jason Evans . +Copyright (C) 2002-2014 Jason Evans . All rights reserved. Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2013 Facebook, Inc. All rights reserved. +Copyright (C) 2009-2014 Facebook, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/ChangeLog b/ChangeLog index 0efc7426..c5e4198d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -6,6 +6,59 @@ found in the git revision history: http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git git://canonware.com/jemalloc.git +* 3.5.0 (January 22, 2014) + + This version focuses on refactoring and automated testing, though it also + includes some non-trivial heap profiling optimizations not mentioned below. + + New features: + - Add the *allocx() API, which is a successor to the experimental *allocm() + API. The *allocx() functions are slightly simpler to use because they have + fewer parameters, they directly return the results of primary interest, and + mallocx()/rallocx() avoid the strict aliasing pitfall that + allocm()/rallocx() share with posix_memalign(). Note that *allocm() is + slated for removal in the next non-bugfix release. + - Add support for LinuxThreads. + + Bug fixes: + - Unless heap profiling is enabled, disable floating point code and don't link + with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64 + systems, makes it possible to completely disable floating point register + use. Some versions of glibc neglect to save/restore caller-saved floating + point registers during dynamic lazy symbol loading, and the symbol loading + code uses whatever malloc the application happens to have linked/loaded + with, the result being potential floating point register corruption. + - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling + backtrace creation in imemalign(). This bug impacted posix_memalign() and + aligned_alloc(). + - Fix a file descriptor leak in a prof_dump_maps() error path. + - Fix prof_dump() to close the dump file descriptor for all relevant error + paths. + - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for + allocation, not just deallocation. + - Fix a data race for large allocation stats counters. + - Fix a potential infinite loop during thread exit. This bug occurred on + Solaris, and could affect other platforms with similar pthreads TSD + implementations. + - Don't junk-fill reallocations unless usable size changes. This fixes a + violation of the *allocx()/*allocm() semantics. + - Fix growing large reallocation to junk fill new space. + - Fix huge deallocation to junk fill when munmap is disabled. + - Change the default private namespace prefix from empty to je_, and change + --with-private-namespace-prefix so that it prepends an additional prefix + rather than replacing je_. This reduces the likelihood of applications + which statically link jemalloc experiencing symbol name collisions. + - Add missing private namespace mangling (relevant when + --with-private-namespace is specified). + - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as + static even for debug builds. + - Add a missing mutex unlock in a malloc_init_hard() error path. In practice + this error path is never executed. + - Fix numerous bugs in malloc_strotumax() error handling/reporting. These + bugs had no impact except for malformed inputs. + - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by + existing calls, so they had no impact. + * 3.4.1 (October 20, 2013) Bug fixes: diff --git a/INSTALL b/INSTALL index 6e371ce5..841704d2 100644 --- a/INSTALL +++ b/INSTALL @@ -61,10 +61,10 @@ any of the following arguments (not a definitive list) to 'configure': allocator on OSX. --with-private-namespace= - Prefix all library-private APIs with . For shared libraries, + Prefix all library-private APIs with je_. For shared libraries, symbol visibility mechanisms prevent these symbols from being exported, but for static libraries, naming collisions are a real possibility. By - default, the prefix is "" (empty string). + default, is empty, which results in a symbol prefix of je_ . --with-install-suffix= Append to the base name of all installed files, such that multiple @@ -81,6 +81,19 @@ any of the following arguments (not a definitive list) to 'configure': performance hit, but is very useful during application development. Implies --enable-ivsalloc. +--enable-code-coverage + Enable code coverage support, for use during jemalloc test development. + Additional testing targets are available if this option is enabled: + + coverage + coverage_unit + coverage_integration + coverage_stress + + These targets do not clear code coverage results from previous runs, and + there are interactions between the various coverage targets, so it is + usually advisable to run 'make clean' between repeated code coverage runs. + --enable-ivsalloc Enable validation code, which verifies that pointers reside within jemalloc-owned chunks before dereferencing them. This incurs a substantial diff --git a/Makefile.in b/Makefile.in index 5909416e..67c4d5d8 100644 --- a/Makefile.in +++ b/Makefile.in @@ -47,6 +47,7 @@ cfghdrs_out := @cfghdrs_out@ cfgoutputs_in := @cfgoutputs_in@ cfgoutputs_out := @cfgoutputs_out@ enable_autogen := @enable_autogen@ +enable_code_coverage := @enable_code_coverage@ enable_experimental := @enable_experimental@ enable_zone_allocator := @enable_zone_allocator@ DSO_LDFLAGS = @DSO_LDFLAGS@ @@ -73,18 +74,17 @@ LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix) # Lists of files. BINS := $(srcroot)bin/pprof $(objroot)bin/jemalloc.sh -CHDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h \ - $(objroot)include/jemalloc/jemalloc_defs$(install_suffix).h -CSRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c $(srcroot)src/atomic.c \ - $(srcroot)src/base.c $(srcroot)src/bitmap.c $(srcroot)src/chunk.c \ - $(srcroot)src/chunk_dss.c $(srcroot)src/chunk_mmap.c \ - $(srcroot)src/ckh.c $(srcroot)src/ctl.c $(srcroot)src/extent.c \ - $(srcroot)src/hash.c $(srcroot)src/huge.c $(srcroot)src/mb.c \ - $(srcroot)src/mutex.c $(srcroot)src/prof.c $(srcroot)src/quarantine.c \ - $(srcroot)src/rtree.c $(srcroot)src/stats.c $(srcroot)src/tcache.c \ - $(srcroot)src/util.c $(srcroot)src/tsd.c +C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h +C_SRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c \ + $(srcroot)src/atomic.c $(srcroot)src/base.c $(srcroot)src/bitmap.c \ + $(srcroot)src/chunk.c $(srcroot)src/chunk_dss.c \ + $(srcroot)src/chunk_mmap.c $(srcroot)src/ckh.c $(srcroot)src/ctl.c \ + $(srcroot)src/extent.c $(srcroot)src/hash.c $(srcroot)src/huge.c \ + $(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/prof.c \ + $(srcroot)src/quarantine.c $(srcroot)src/rtree.c $(srcroot)src/stats.c \ + $(srcroot)src/tcache.c $(srcroot)src/util.c $(srcroot)src/tsd.c ifeq ($(enable_zone_allocator), 1) -CSRCS += $(srcroot)src/zone.c +C_SRCS += $(srcroot)src/zone.c endif ifeq ($(IMPORTLIB),$(SO)) STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A) @@ -103,27 +103,71 @@ DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.html) DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3) DOCS := $(DOCS_HTML) $(DOCS_MAN3) -CTESTS := $(srcroot)test/aligned_alloc.c $(srcroot)test/allocated.c \ - $(srcroot)test/ALLOCM_ARENA.c $(srcroot)test/bitmap.c \ - $(srcroot)test/mremap.c $(srcroot)test/posix_memalign.c \ - $(srcroot)test/thread_arena.c $(srcroot)test/thread_tcache_enabled.c +C_TESTLIB_SRCS := $(srcroot)test/src/math.c $(srcroot)test/src/mtx.c \ + $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \ + $(srcroot)test/src/thd.c +C_UTIL_INTEGRATION_SRCS := $(srcroot)src/util.c +TESTS_UNIT := $(srcroot)test/unit/bitmap.c \ + $(srcroot)test/unit/ckh.c \ + $(srcroot)test/unit/hash.c \ + $(srcroot)test/unit/junk.c \ + $(srcroot)test/unit/mallctl.c \ + $(srcroot)test/unit/math.c \ + $(srcroot)test/unit/mq.c \ + $(srcroot)test/unit/mtx.c \ + $(srcroot)test/unit/prof_accum.c \ + $(srcroot)test/unit/prof_gdump.c \ + $(srcroot)test/unit/prof_idump.c \ + $(srcroot)test/unit/ql.c \ + $(srcroot)test/unit/qr.c \ + $(srcroot)test/unit/quarantine.c \ + $(srcroot)test/unit/rb.c \ + $(srcroot)test/unit/rtree.c \ + $(srcroot)test/unit/SFMT.c \ + $(srcroot)test/unit/stats.c \ + $(srcroot)test/unit/tsd.c \ + $(srcroot)test/unit/util.c \ + $(srcroot)test/unit/zero.c +TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ + $(srcroot)test/integration/allocated.c \ + $(srcroot)test/integration/mallocx.c \ + $(srcroot)test/integration/mremap.c \ + $(srcroot)test/integration/posix_memalign.c \ + $(srcroot)test/integration/rallocx.c \ + $(srcroot)test/integration/thread_arena.c \ + $(srcroot)test/integration/thread_tcache_enabled.c \ + $(srcroot)test/integration/xallocx.c ifeq ($(enable_experimental), 1) -CTESTS += $(srcroot)test/allocm.c $(srcroot)test/rallocm.c +TESTS_INTEGRATION += $(srcroot)test/integration/allocm.c \ + $(srcroot)test/integration/ALLOCM_ARENA.c \ + $(srcroot)test/integration/rallocm.c endif +TESTS_STRESS := +TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS) -COBJS := $(CSRCS:$(srcroot)%.c=$(objroot)%.$(O)) -CPICOBJS := $(CSRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) -CTESTOBJS := $(CTESTS:$(srcroot)%.c=$(objroot)%.$(O)) +C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O)) +C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) +C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O)) +C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O)) +C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) +C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) +C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O)) +C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS) + +TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O)) +TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O)) +TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O)) +TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS) .PHONY: all dist build_doc_html build_doc_man build_doc .PHONY: install_bin install_include install_lib .PHONY: install_doc_html install_doc_man install_doc install .PHONY: tests check clean distclean relclean -.SECONDARY : $(CTESTOBJS) +.SECONDARY : $(TESTS_OBJS) # Default target. -all: build +all: build_lib dist: build_doc @@ -141,30 +185,45 @@ build_doc: $(DOCS) # Include generated dependency files. # ifdef CC_MM --include $(COBJS:%.$(O)=%.d) --include $(CPICOBJS:%.$(O)=%.d) --include $(CTESTOBJS:%.$(O)=%.d) +-include $(C_OBJS:%.$(O)=%.d) +-include $(C_PIC_OBJS:%.$(O)=%.d) +-include $(C_JET_OBJS:%.$(O)=%.d) +-include $(C_TESTLIB_OBJS:%.$(O)=%.d) +-include $(TESTS_OBJS:%.$(O)=%.d) endif -$(COBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c -$(CPICOBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c -$(CPICOBJS): CFLAGS += $(PIC_CFLAGS) -$(CTESTOBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c -$(CTESTOBJS): CPPFLAGS += -I$(objroot)test +$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c +$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c +$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS) +$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c +$(C_JET_OBJS): CFLAGS += -DJEMALLOC_JET +$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c +$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST +$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c +$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST +$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c +$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c +$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB +$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include +$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST +$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST +$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST +$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c +$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include ifneq ($(IMPORTLIB),$(SO)) -$(COBJS): CPPFLAGS += -DDLLEXPORT +$(C_OBJS): CPPFLAGS += -DDLLEXPORT endif ifndef CC_MM -# Dependencies +# Dependencies. HEADER_DIRS = $(srcroot)include/jemalloc/internal \ $(objroot)include/jemalloc $(objroot)include/jemalloc/internal HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)) -$(COBJS) $(CPICOBJS) $(CTESTOBJS): $(HEADERS) -$(CTESTOBJS): $(objroot)test/jemalloc_test.h +$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): $(HEADERS) +$(TESTS_OBJS): $(objroot)test/unit/jemalloc_test.h endif -$(COBJS) $(CPICOBJS) $(CTESTOBJS): %.$(O): +$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O): @mkdir -p $(@D) $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $< ifdef CC_MM @@ -177,27 +236,33 @@ ifneq ($(SOREV),$(SO)) ln -sf $( $(objroot)$${t}.out 2>&1; \ - if test -e "$(srcroot)$${t}.exp"; then \ - diff -w -u $(srcroot)$${t}.exp \ - $(objroot)$${t}.out >/dev/null 2>&1; \ - fail=$$?; \ - if test "$${fail}" -eq "1" ; then \ - failures=`expr $${failures} + 1`; \ - echo "*** FAIL ***"; \ - else \ - echo "pass"; \ - fi; \ - else \ - echo "*** FAIL *** (.exp file is missing)"; \ - failures=`expr $${failures} + 1`; \ - fi; \ - done; \ - echo "========================================="; \ - echo "Failures: $${failures}/$${total}"' +check_unit_dir: + @mkdir -p $(objroot)test/unit +check_integration_dir: + @mkdir -p $(objroot)test/integration +check_stress_dir: + @mkdir -p $(objroot)test/stress +check_dir: check_unit_dir check_integration_dir check_stress_dir + +check_unit: tests_unit check_unit_dir + $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%) +check_integration: tests_integration check_integration_dir + $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) +check_stress: tests_stress check_stress_dir + $(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%) +check: tests check_dir + $(SHELL) $(objroot)test/test.sh $(TESTS:$(srcroot)%.c=$(objroot)%) + +ifeq ($(enable_code_coverage), 1) +coverage_unit: check_unit + $(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src unit $(C_TESTLIB_UNIT_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/unit unit $(TESTS_UNIT_OBJS) + +coverage_integration: check_integration + $(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)src integration $(C_UTIL_INTEGRATION_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS) + +coverage_stress: check_stress + $(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/stress stress $(TESTS_STRESS_OBJS) + +coverage: check + $(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)src integration $(C_UTIL_INTEGRATION_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src unit $(C_TESTLIB_UNIT_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/unit unit $(TESTS_UNIT_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS) + $(SHELL) $(srcroot)coverage.sh $(srcroot)test/stress integration $(TESTS_STRESS_OBJS) +endif clean: - rm -f $(COBJS) - rm -f $(CPICOBJS) - rm -f $(COBJS:%.$(O)=%.d) - rm -f $(CPICOBJS:%.$(O)=%.d) - rm -f $(CTESTOBJS:%.$(O)=%$(EXE)) - rm -f $(CTESTOBJS) - rm -f $(CTESTOBJS:%.$(O)=%.d) - rm -f $(CTESTOBJS:%.$(O)=%.out) + rm -f $(C_OBJS) + rm -f $(C_PIC_OBJS) + rm -f $(C_JET_OBJS) + rm -f $(C_TESTLIB_OBJS) + rm -f $(C_OBJS:%.$(O)=%.d) + rm -f $(C_OBJS:%.$(O)=%.gcda) + rm -f $(C_OBJS:%.$(O)=%.gcno) + rm -f $(C_PIC_OBJS:%.$(O)=%.d) + rm -f $(C_PIC_OBJS:%.$(O)=%.gcda) + rm -f $(C_PIC_OBJS:%.$(O)=%.gcno) + rm -f $(C_JET_OBJS:%.$(O)=%.d) + rm -f $(C_JET_OBJS:%.$(O)=%.gcda) + rm -f $(C_JET_OBJS:%.$(O)=%.gcno) + rm -f $(C_TESTLIB_OBJS:%.$(O)=%.d) + rm -f $(C_TESTLIB_OBJS:%.$(O)=%.gcda) + rm -f $(C_TESTLIB_OBJS:%.$(O)=%.gcno) + rm -f $(TESTS_OBJS:%.$(O)=%$(EXE)) + rm -f $(TESTS_OBJS) + rm -f $(TESTS_OBJS:%.$(O)=%.d) + rm -f $(TESTS_OBJS:%.$(O)=%.gcda) + rm -f $(TESTS_OBJS:%.$(O)=%.gcno) + rm -f $(TESTS_OBJS:%.$(O)=%.out) rm -f $(DSOS) $(STATIC_LIBS) + rm -f $(objroot)*.gcov.* distclean: clean rm -rf $(objroot)autom4te.cache + rm -f $(objroot)bin/jemalloc.sh rm -f $(objroot)config.log rm -f $(objroot)config.status rm -f $(objroot)config.stamp diff --git a/config.guess b/config.guess index d622a44e..b79252d6 100755 --- a/config.guess +++ b/config.guess @@ -1,14 +1,12 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012 Free Software Foundation, Inc. +# Copyright 1992-2013 Free Software Foundation, Inc. -timestamp='2012-02-10' +timestamp='2013-06-10' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or +# the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but @@ -22,19 +20,17 @@ timestamp='2012-02-10' # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - - -# Originally written by Per Bothner. Please send patches (context -# diff format) to and include a ChangeLog -# entry. +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). # -# This script attempts to guess a canonical system name similar to -# config.sub. If it succeeds, it prints the system name on stdout, and -# exits with 0. Otherwise, it exits with 1. +# Originally written by Per Bothner. # # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD +# +# Please send patches with a ChangeLog entry to config-patches@gnu.org. + me=`echo "$0" | sed -e 's,.*/,,'` @@ -54,9 +50,7 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, -2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 -Free Software Foundation, Inc. +Copyright 1992-2013 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -138,6 +132,27 @@ UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown +case "${UNAME_SYSTEM}" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval $set_cc_for_build + cat <<-EOF > $dummy.c + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` + ;; +esac + # Note: order is significant - the case branches are not exclusive. case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in @@ -200,6 +215,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "${machine}-${os}${release}" exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} @@ -302,7 +321,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix${UNAME_RELEASE} exit ;; - arm:riscos:*:*|arm:RISCOS:*:*) + arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) @@ -801,6 +820,9 @@ EOF i*:CYGWIN*:*) echo ${UNAME_MACHINE}-pc-cygwin exit ;; + *:MINGW64*:*) + echo ${UNAME_MACHINE}-pc-mingw64 + exit ;; *:MINGW*:*) echo ${UNAME_MACHINE}-pc-mingw32 exit ;; @@ -852,21 +874,21 @@ EOF exit ;; *:GNU:*:*) # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} exit ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix exit ;; aarch64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in @@ -879,59 +901,54 @@ EOF EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 - if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi - echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} + if test "$?" = 0 ; then LIBC="gnulibc1" ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arm*:Linux:*:*) eval $set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then - echo ${UNAME_MACHINE}-unknown-linux-gnueabi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi else - echo ${UNAME_MACHINE}-unknown-linux-gnueabihf + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf fi fi exit ;; avr32*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; cris:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-gnu + echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; crisv32:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-gnu + echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; frv:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; hexagon:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:Linux:*:*) - LIBC=gnu - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #ifdef __dietlibc__ - LIBC=dietlibc - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` - echo "${UNAME_MACHINE}-pc-linux-${LIBC}" + echo ${UNAME_MACHINE}-pc-linux-${LIBC} exit ;; ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; mips:Linux:*:* | mips64:Linux:*:*) eval $set_cc_for_build @@ -950,54 +967,63 @@ EOF #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } ;; + or1k:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; or32:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; padre:Linux:*:*) - echo sparc-unknown-linux-gnu + echo sparc-unknown-linux-${LIBC} exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-gnu + echo hppa64-unknown-linux-${LIBC} exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-gnu ;; - PA8*) echo hppa2.0-unknown-linux-gnu ;; - *) echo hppa-unknown-linux-gnu ;; + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; esac exit ;; ppc64:Linux:*:*) - echo powerpc64-unknown-linux-gnu + echo powerpc64-unknown-linux-${LIBC} exit ;; ppc:Linux:*:*) - echo powerpc-unknown-linux-gnu + echo powerpc-unknown-linux-${LIBC} + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-${LIBC} + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-${LIBC} exit ;; s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux + echo ${UNAME_MACHINE}-ibm-linux-${LIBC} exit ;; sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; tile*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; vax:Linux:*:*) - echo ${UNAME_MACHINE}-dec-linux-gnu + echo ${UNAME_MACHINE}-dec-linux-${LIBC} exit ;; x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; xtensa*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. @@ -1201,6 +1227,9 @@ EOF BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux${UNAME_RELEASE} exit ;; @@ -1227,19 +1256,21 @@ EOF exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - case $UNAME_PROCESSOR in - i386) - eval $set_cc_for_build - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - UNAME_PROCESSOR="x86_64" - fi - fi ;; - unknown) UNAME_PROCESSOR=powerpc ;; - esac + eval $set_cc_for_build + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + fi echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) @@ -1256,7 +1287,7 @@ EOF NEO-?:NONSTOP_KERNEL:*:*) echo neo-tandem-nsk${UNAME_RELEASE} exit ;; - NSE-?:NONSTOP_KERNEL:*:*) + NSE-*:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk${UNAME_RELEASE} exit ;; NSR-?:NONSTOP_KERNEL:*:*) @@ -1330,9 +1361,6 @@ EOF exit ;; esac -#echo '(No uname command or uname output not recognized.)' 1>&2 -#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 - eval $set_cc_for_build cat >$dummy.c <. @@ -26,11 +20,12 @@ timestamp='2012-02-10' # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). -# Please send patches to . Submit a context -# diff and a properly formatted GNU ChangeLog entry. +# Please send patches with a ChangeLog entry to config-patches@gnu.org. # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. @@ -73,9 +68,7 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, -2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 -Free Software Foundation, Inc. +Copyright 1992-2013 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -123,7 +116,7 @@ esac maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ knetbsd*-gnu* | netbsd*-gnu* | \ kopensolaris*-gnu* | \ storm-chaos* | os2-emx* | rtmk-nova*) @@ -156,7 +149,7 @@ case $os in -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ - -apple | -axis | -knuth | -cray | -microblaze) + -apple | -axis | -knuth | -cray | -microblaze*) os= basic_machine=$1 ;; @@ -225,6 +218,12 @@ case $os in -isc*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; -lynx*) os=-lynxos ;; @@ -253,10 +252,12 @@ case $basic_machine in | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ - | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ - | be32 | be64 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | be32 | be64 \ | bfin \ - | c4x | clipper \ + | c4x | c8051 | clipper \ | d10v | d30v | dlx | dsp16xx \ | epiphany \ | fido | fr30 | frv \ @@ -264,10 +265,11 @@ case $basic_machine in | hexagon \ | i370 | i860 | i960 | ia64 \ | ip2k | iq2000 \ + | k1om \ | le32 | le64 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | maxq | mb | microblaze | mcore | mep | metag \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ @@ -285,16 +287,17 @@ case $basic_machine in | mipsisa64r2 | mipsisa64r2el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ | mipstx39 | mipstx39el \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ | nds32 | nds32le | nds32be \ - | nios | nios2 \ + | nios | nios2 | nios2eb | nios2el \ | ns16k | ns32k \ | open8 \ - | or32 \ + | or1k | or32 \ | pdp10 | pdp11 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ | pyramid \ @@ -322,7 +325,7 @@ case $basic_machine in c6x) basic_machine=tic6x-unknown ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip) + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) basic_machine=$basic_machine-unknown os=-none ;; @@ -364,13 +367,13 @@ case $basic_machine in | aarch64-* | aarch64_be-* \ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ | avr-* | avr32-* \ | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | clipper-* | craynv-* | cydra-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ @@ -379,11 +382,13 @@ case $basic_machine in | hexagon-* \ | i*86-* | i860-* | i960-* | ia64-* \ | ip2k-* | iq2000-* \ + | k1om-* \ | le32-* | le64-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ | mips16-* \ | mips64-* | mips64el-* \ @@ -401,12 +406,13 @@ case $basic_machine in | mipsisa64r2-* | mipsisa64r2el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ | mipstx39-* | mipstx39el-* \ | mmix-* \ | mt-* \ | msp430-* \ | nds32-* | nds32le-* | nds32be-* \ - | nios-* | nios2-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | open8-* \ | orion-* \ @@ -782,11 +788,15 @@ case $basic_machine in basic_machine=ns32k-utek os=-sysv ;; - microblaze) + microblaze*) basic_machine=microblaze-xilinx ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; mingw32) - basic_machine=i386-pc + basic_machine=i686-pc os=-mingw32 ;; mingw32ce) @@ -822,7 +832,7 @@ case $basic_machine in basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` ;; msys) - basic_machine=i386-pc + basic_machine=i686-pc os=-msys ;; mvs) @@ -1013,7 +1023,11 @@ case $basic_machine in basic_machine=i586-unknown os=-pw32 ;; - rdos) + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) basic_machine=i386-pc os=-rdos ;; @@ -1340,21 +1354,21 @@ case $os in -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ - | -sym* | -kopensolaris* \ + | -sym* | -kopensolaris* | -plan9* \ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ | -aos* | -aros* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -openbsd* | -solidbsd* \ + | -bitrig* | -openbsd* | -solidbsd* \ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ | -chorusos* | -chorusrdb* | -cegcc* \ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -linux-gnu* | -linux-android* \ - | -linux-newlib* | -linux-uclibc* \ + | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ | -uxpv* | -beos* | -mpeix* | -udk* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ @@ -1486,9 +1500,6 @@ case $os in -aros*) os=-aros ;; - -kaos*) - os=-kaos - ;; -zvmoe) os=-zvmoe ;; @@ -1537,6 +1548,12 @@ case $basic_machine in c4x-* | tic4x-*) os=-coff ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; tic54x-*) os=-coff ;; @@ -1577,6 +1594,9 @@ case $basic_machine in mips*-*) os=-elf ;; + or1k-*) + os=-elf + ;; or32-*) os=-coff ;; diff --git a/configure.ac b/configure.ac index 73d3f94f..61fd868b 100644 --- a/configure.ac +++ b/configure.ac @@ -199,6 +199,7 @@ case "${host_cpu}" in if test "x${je_cv_asm}" = "xyes" ; then CPU_SPINWAIT='__asm__ volatile("pause")' fi + AC_DEFINE_UNQUOTED([HAVE_SSE2], [ ]) ;; x86_64) JE_COMPILABLE([__asm__ syntax], [], @@ -206,6 +207,10 @@ case "${host_cpu}" in if test "x${je_cv_asm}" = "xyes" ; then CPU_SPINWAIT='__asm__ volatile("pause")' fi + AC_DEFINE_UNQUOTED([HAVE_SSE2], [ ]) + ;; + powerpc) + AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ]) ;; *) ;; @@ -256,6 +261,7 @@ case "${host}" in force_tls="0" DSO_LDFLAGS='-shared -Wl,-dylib_install_name,$(@F)' SOREV="${rev}.${so}" + sbrk_deprecated="1" ;; *-*-freebsd*) CFLAGS="$CFLAGS" @@ -355,11 +361,6 @@ AC_SUBST([ARFLAGS]) AC_SUBST([AROUT]) AC_SUBST([CC_MM]) -if test "x$abi" != "xpecoff"; then - dnl Heap profiling uses the log(3) function. - LIBS="$LIBS -lm" -fi - JE_COMPILABLE([__attribute__ syntax], [static __attribute__((unused)) void foo(void){}], [], @@ -416,7 +417,7 @@ AC_PROG_RANLIB AC_PATH_PROG([LD], [ld], [false], [$PATH]) AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) -public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free malloc_usable_size malloc_stats_print mallctl mallctlnametomib mallctlbymib" +public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size" dnl Check for allocator-related functions that should be wrapped. AC_CHECK_FUNC([memalign], @@ -444,18 +445,35 @@ if test "x$enable_experimental" = "x1" ; then fi AC_SUBST([enable_experimental]) +dnl Do not compute test code coverage by default. +GCOV_FLAGS= +AC_ARG_ENABLE([code-coverage], + [AS_HELP_STRING([--enable-code-coverage], + [Enable code coverage])], +[if test "x$enable_code_coverage" = "xno" ; then + enable_code_coverage="0" +else + enable_code_coverage="1" +fi +], +[enable_code_coverage="0"] +) +if test "x$enable_code_coverage" = "x1" ; then + deoptimize="no" + echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes" + if test "x${deoptimize}" = "xyes" ; then + JE_CFLAGS_APPEND([-O0]) + fi + JE_CFLAGS_APPEND([-fprofile-arcs -ftest-coverage]) + EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage" + AC_DEFINE([JEMALLOC_CODE_COVERAGE], [ ]) +fi +AC_SUBST([enable_code_coverage]) + dnl Perform no name mangling by default. AC_ARG_WITH([mangling], [AS_HELP_STRING([--with-mangling=], [Mangle symbols in ])], [mangling_map="$with_mangling"], [mangling_map=""]) -for nm in `echo ${mangling_map} |tr ',' ' '` ; do - k="`echo ${nm} |tr ':' ' ' |awk '{print $1}'`" - n="je_${k}" - m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` - AC_DEFINE_UNQUOTED([${n}], [${m}]) - dnl Remove key from public_syms so that it isn't redefined later. - public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${k}\$" |tr '\n' ' '` -done dnl Do not prefix public APIs by default. AC_ARG_WITH([jemalloc_prefix], @@ -472,14 +490,6 @@ if test "x$JEMALLOC_PREFIX" != "x" ; then AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"]) AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"]) fi -dnl Generate macros to rename public symbols. All public symbols are prefixed -dnl with je_ in the source code, so these macro definitions are needed even if -dnl --with-jemalloc-prefix wasn't specified. -for stem in ${public_syms}; do - n="je_${stem}" - m="${JEMALLOC_PREFIX}${stem}" - AC_DEFINE_UNQUOTED([${n}], [${m}]) -done AC_ARG_WITH([export], [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])], @@ -488,18 +498,15 @@ AC_ARG_WITH([export], fi] ) -dnl Do not mangle library-private APIs by default. +dnl Mangle library-private APIs. AC_ARG_WITH([private_namespace], [AS_HELP_STRING([--with-private-namespace=], [Prefix to prepend to all library-private APIs])], - [JEMALLOC_PRIVATE_NAMESPACE="$with_private_namespace"], - [JEMALLOC_PRIVATE_NAMESPACE=""] + [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"], + [JEMALLOC_PRIVATE_NAMESPACE="je_"] ) -AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], ["$JEMALLOC_PRIVATE_NAMESPACE"]) -if test "x$JEMALLOC_PRIVATE_NAMESPACE" != "x" ; then - AC_DEFINE_UNQUOTED([JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix)], [${JEMALLOC_PRIVATE_NAMESPACE}##string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix]) -else - AC_DEFINE_UNQUOTED([JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix)], [string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix]) -fi +AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE]) +private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" +AC_SUBST([private_namespace]) dnl Do not add suffix to installed files by default. AC_ARG_WITH([install_suffix], @@ -510,37 +517,72 @@ AC_ARG_WITH([install_suffix], install_suffix="$INSTALL_SUFFIX" AC_SUBST([install_suffix]) +dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of +dnl jemalloc_protos_jet.h easy. +je_="je_" +AC_SUBST([je_]) + cfgoutputs_in="${srcroot}Makefile.in" cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/html.xsl.in" cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/manpages.xsl.in" cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/jemalloc.xml.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc.h.in" +cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_macros.h.in" +cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_protos.h.in" cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal.h.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/jemalloc_test.h.in" +cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/test.sh.in" +cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/include/test/jemalloc_test.h.in" cfgoutputs_out="Makefile" cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" -cfgoutputs_out="${cfgoutputs_out} doc/jemalloc${install_suffix}.xml" -cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc${install_suffix}.h" +cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h" -cfgoutputs_out="${cfgoutputs_out} test/jemalloc_test.h" +cfgoutputs_out="${cfgoutputs_out} test/test.sh" +cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" cfgoutputs_tup="Makefile" cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" -cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc${install_suffix}.xml:doc/jemalloc.xml.in" -cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc${install_suffix}.h:include/jemalloc/jemalloc.h.in" +cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h" -cfgoutputs_tup="${cfgoutputs_tup} test/jemalloc_test.h:test/jemalloc_test.h.in" +cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" +cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" cfghdrs_in="${srcroot}include/jemalloc/jemalloc_defs.h.in" +cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in" +cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_namespace.sh" +cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_unnamespace.sh" +cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_symbols.txt" +cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_namespace.sh" +cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_unnamespace.sh" cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/size_classes.sh" +cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_rename.sh" +cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_mangle.sh" +cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc.sh" +cfghdrs_in="${cfghdrs_in} ${srcroot}test/include/test/jemalloc_test_defs.h.in" -cfghdrs_out="include/jemalloc/jemalloc_defs${install_suffix}.h" +cfghdrs_out="include/jemalloc/jemalloc_defs.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" +cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" -cfghdrs_tup="include/jemalloc/jemalloc_defs${install_suffix}.h:include/jemalloc/jemalloc_defs.h.in" +cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" +cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in" +cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:${srcroot}test/include/test/jemalloc_test_defs.h.in" dnl Do not silence irrelevant compiler warnings by default, since enabling this dnl option incurs a performance penalty. @@ -595,7 +637,7 @@ dnl Only optimize if not debugging. if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS. optimize="no" - echo "$EXTRA_CFLAGS" | grep "\-O" >/dev/null || optimize="yes" + echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes" if test "x${optimize}" = "xyes" ; then if test "x$GCC" = "xyes" ; then JE_CFLAGS_APPEND([-O3]) @@ -748,6 +790,12 @@ if test "x$enable_prof" = "x1" ; then AC_MSG_ERROR([Heap profiling requires TLS]); fi force_tls="1" + + if test "x$abi" != "xpecoff"; then + dnl Heap profiling uses the log(3) function. + LIBS="$LIBS -lm" + fi + AC_DEFINE([JEMALLOC_PROF], [ ]) fi AC_SUBST([enable_prof]) @@ -781,7 +829,7 @@ fi ) if test "x$enable_mremap" = "x1" ; then JE_COMPILABLE([mremap(...MREMAP_FIXED...)], [ -#define _GNU_SOURCE +#define _GNU_SOURCE #include ], [ void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0); @@ -825,7 +873,12 @@ fi dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support. AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"]) if test "x$have_sbrk" = "x1" ; then - AC_DEFINE([JEMALLOC_HAVE_SBRK], [ ]) + if test "x$sbrk_deprecated" == "x1" ; then + AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated]) + enable_dss="0" + else + AC_DEFINE([JEMALLOC_HAVE_SBRK], [ ]) + fi else enable_dss="0" fi @@ -1271,9 +1324,102 @@ dnl ============================================================================ dnl Check for typedefs, structures, and compiler characteristics. AC_HEADER_STDBOOL +dnl ============================================================================ +dnl Define commands that generate output files. + +AC_CONFIG_COMMANDS([include/jemalloc/internal/private_namespace.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/private_unnamespace.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [ + f="${objroot}include/jemalloc/internal/public_symbols.txt" + mkdir -p "${objroot}include/jemalloc/internal" + cp /dev/null "${f}" + for nm in `echo ${mangling_map} |tr ',' ' '` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'` + m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'` + echo "${n}:${m}" >> "${f}" + dnl Remove name from public_syms so that it isn't redefined later. + public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` + done + for sym in ${public_syms} ; do + n="${sym}" + m="${JEMALLOC_PREFIX}${sym}" + echo "${n}:${m}" >> "${f}" + done +], [ + srcdir="${srcdir}" + objroot="${objroot}" + mangling_map="${mangling_map}" + public_syms="${public_syms}" + JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [ - mkdir -p "include/jemalloc/internal" + mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/size_classes.sh" > "${objroot}include/jemalloc/internal/size_classes.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [ + mkdir -p "${objroot}include/jemalloc" + cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" + install_suffix="${install_suffix}" ]) dnl Process .in files. @@ -1283,6 +1429,7 @@ AC_CONFIG_HEADERS([$cfghdrs_tup]) dnl ============================================================================ dnl Generate outputs. + AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc.sh]) AC_SUBST([cfgoutputs_in]) AC_SUBST([cfgoutputs_out]) @@ -1298,6 +1445,7 @@ AC_MSG_RESULT([CC : ${CC}]) AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}]) AC_MSG_RESULT([CFLAGS : ${CFLAGS}]) AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}]) +AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}]) AC_MSG_RESULT([LIBS : ${LIBS}]) AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}]) AC_MSG_RESULT([]) @@ -1324,6 +1472,7 @@ AC_MSG_RESULT([autogen : ${enable_autogen}]) AC_MSG_RESULT([experimental : ${enable_experimental}]) AC_MSG_RESULT([cc-silence : ${enable_cc_silence}]) AC_MSG_RESULT([debug : ${enable_debug}]) +AC_MSG_RESULT([code-coverage : ${enable_code_coverage}]) AC_MSG_RESULT([stats : ${enable_stats}]) AC_MSG_RESULT([prof : ${enable_prof}]) AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}]) diff --git a/coverage.sh b/coverage.sh new file mode 100755 index 00000000..6d1362a8 --- /dev/null +++ b/coverage.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +set -e + +objdir=$1 +suffix=$2 +shift 2 +objs=$@ + +gcov -b -p -f -o "${objdir}" ${objs} + +# Move gcov outputs so that subsequent gcov invocations won't clobber results +# for the same sources with different compilation flags. +for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do + mv "${f}" "${f}.${suffix}" +done diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index abd5e6fc..c7e2e872 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -33,11 +33,17 @@ aligned_alloc realloc free - malloc_usable_size - malloc_stats_print + mallocx + rallocx + xallocx + sallocx + dallocx + nallocx mallctl mallctlnametomib mallctlbymib + malloc_stats_print + malloc_usable_size allocm rallocm sallocm @@ -92,16 +98,37 @@ Non-standard API - size_t malloc_usable_size - const void *ptr + void *mallocx + size_t size + int flags - void malloc_stats_print - void (*write_cb) - void *, const char * - - void *cbopaque - const char *opts + void *rallocx + void *ptr + size_t size + int flags + + + size_t xallocx + void *ptr + size_t size + size_t extra + int flags + + + size_t sallocx + void *ptr + int flags + + + void dallocx + void *ptr + int flags + + + size_t nallocx + size_t size + int flags int mallctl @@ -126,6 +153,18 @@ void *newp size_t newlen + + void malloc_stats_print + void (*write_cb) + void *, const char * + + void *cbopaque + const char *opts + + + size_t malloc_usable_size + const void *ptr + void (*malloc_message) void *cbopaque @@ -225,41 +264,102 @@ Non-standard API + The mallocx, + rallocx, + xallocx, + sallocx, + dallocx, and + nallocx functions all have a + flags argument that can be used to specify + options. The functions only check the options that are contextually + relevant. Use bitwise or (|) operations to + specify one or more of the following: + + + MALLOCX_LG_ALIGN(la) + - The malloc_usable_size function - returns the usable size of the allocation pointed to by - ptr. The return value may be larger than the size - that was requested during allocation. The - malloc_usable_size function is not a - mechanism for in-place realloc; rather - it is provided solely as a tool for introspection purposes. Any - discrepancy between the requested allocation size and the size reported - by malloc_usable_size should not be - depended on, since such behavior is entirely implementation-dependent. + Align the memory allocation to start at an address + that is a multiple of (1 << + la). This macro does not validate + that la is within the valid + range. + + + MALLOCX_ALIGN(a) + + + Align the memory allocation to start at an address + that is a multiple of a, where + a is a power of two. This macro does not + validate that a is a power of 2. + + + + MALLOCX_ZERO + + Initialize newly allocated memory to contain zero + bytes. In the growing reallocation case, the real size prior to + reallocation defines the boundary between untouched bytes and those + that are initialized to contain zero bytes. If this macro is + absent, newly allocated memory is uninitialized. + + + MALLOCX_ARENA(a) + + + Use the arena specified by the index + a (and by necessity bypass the thread + cache). This macro has no effect for huge regions, nor for regions + that were allocated via an arena other than the one specified. + This macro does not validate that a + specifies an arena index in the valid range. + + - The malloc_stats_print function - writes human-readable summary statistics via the - write_cb callback function pointer and - cbopaque data passed to - write_cb, or - malloc_message if - write_cb is NULL. This - function can be called repeatedly. General information that never - changes during execution can be omitted by specifying "g" as a character - within the opts string. Note that - malloc_message uses the - mallctl* functions internally, so - inconsistent statistics can be reported if multiple threads use these - functions simultaneously. If is - specified during configuration, “m” and “a” can - be specified to omit merged arena and per arena statistics, respectively; - “b” and “l” can be specified to omit per size - class statistics for bins and large objects, respectively. Unrecognized - characters are silently ignored. Note that thread caching may prevent - some statistics from being completely up to date, since extra locking - would be required to merge counters that track thread cache operations. - + The mallocx function allocates at + least size bytes of memory, and returns a pointer + to the base address of the allocation. Behavior is undefined if + size is 0, or if request size + overflows due to size class and/or alignment constraints. + + The rallocx function resizes the + allocation at ptr to be at least + size bytes, and returns a pointer to the base + address of the resulting allocation, which may or may not have moved from + its original location. Behavior is undefined if + size is 0, or if request size + overflows due to size class and/or alignment constraints. + + The xallocx function resizes the + allocation at ptr in place to be at least + size bytes, and returns the real size of the + allocation. If extra is non-zero, an attempt is + made to resize the allocation to be at least (size + + extra) bytes, though inability to allocate + the extra byte(s) will not by itself result in failure to resize. + Behavior is undefined if size is + 0, or if (size + extra + > SIZE_T_MAX). + + The sallocx function returns the + real size of the allocation at ptr. + + The dallocx function causes the + memory referenced by ptr to be made available for + future allocations. + + The nallocx function allocates no + memory, but it performs the same size computation as the + mallocx function, and returns the real + size of the allocation that would result from the equivalent + mallocx function call. Behavior is + undefined if size is 0, or if + request size overflows due to size class and/or alignment + constraints. The mallctl function provides a general interface for introspecting the memory allocator, as well as @@ -297,15 +397,14 @@ it is legitimate to construct code like the following: + + The malloc_stats_print function + writes human-readable summary statistics via the + write_cb callback function pointer and + cbopaque data passed to + write_cb, or + malloc_message if + write_cb is NULL. This + function can be called repeatedly. General information that never + changes during execution can be omitted by specifying "g" as a character + within the opts string. Note that + malloc_message uses the + mallctl* functions internally, so + inconsistent statistics can be reported if multiple threads use these + functions simultaneously. If is + specified during configuration, “m” and “a” can + be specified to omit merged arena and per arena statistics, respectively; + “b” and “l” can be specified to omit per size + class statistics for bins and large objects, respectively. Unrecognized + characters are silently ignored. Note that thread caching may prevent + some statistics from being completely up to date, since extra locking + would be required to merge counters that track thread cache operations. + + + The malloc_usable_size function + returns the usable size of the allocation pointed to by + ptr. The return value may be larger than the size + that was requested during allocation. The + malloc_usable_size function is not a + mechanism for in-place realloc; rather + it is provided solely as a tool for introspection purposes. Any + discrepancy between the requested allocation size and the size reported + by malloc_usable_size should not be + depended on, since such behavior is entirely implementation-dependent. + Experimental API @@ -358,7 +492,7 @@ for (i = 0; i < nbins; i++) { Initialize newly allocated memory to contain zero bytes. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those - that are initialized to contain zero bytes. If this option is + that are initialized to contain zero bytes. If this macro is absent, newly allocated memory is uninitialized. @@ -373,9 +507,11 @@ for (i = 0; i < nbins; i++) { Use the arena specified by the index - a. This macro does not validate that - a specifies an arena in the valid - range. + a (and by necessity bypass the thread + cache). This macro has no effect for huge regions, nor for regions + that were allocated via an arena other than the one specified. + This macro does not validate that a + specifies an arena index in the valid range. @@ -385,8 +521,9 @@ for (i = 0; i < nbins; i++) { *ptr to the base address of the allocation, and sets *rsize to the real size of the allocation if rsize is not NULL. Behavior - is undefined if size is - 0. + is undefined if size is 0, or + if request size overflows due to size class and/or alignment + constraints. The rallocm function resizes the allocation at *ptr to be at least @@ -396,11 +533,12 @@ for (i = 0; i < nbins; i++) { rsize is not NULL. If extra is non-zero, an attempt is made to resize the allocation to be at least size + + language="C">(size + extra) bytes, though inability to allocate the extra byte(s) will not by itself result in failure. Behavior is - undefined if size is 0, or if - (size + + undefined if size is 0, if + request size overflows due to size class and/or alignment constraints, or + if (size + extra > SIZE_T_MAX). @@ -417,8 +555,9 @@ for (i = 0; i < nbins; i++) { rsize is not NULL it sets *rsize to the real size of the allocation that would result from the equivalent allocm - function call. Behavior is undefined if - size is 0. + function call. Behavior is undefined if size is + 0, or if request size overflows due to size class + and/or alignment constraints. @@ -605,7 +744,7 @@ for (i = 0; i < nbins; i++) { which controls refreshing of cached dynamic statistics. - + version (const char *) @@ -626,7 +765,7 @@ for (i = 0; i < nbins; i++) { detecting whether another thread caused a refresh. - + config.debug (bool) @@ -636,7 +775,7 @@ for (i = 0; i < nbins; i++) { build configuration. - + config.dss (bool) @@ -646,7 +785,7 @@ for (i = 0; i < nbins; i++) { build configuration. - + config.fill (bool) @@ -656,7 +795,7 @@ for (i = 0; i < nbins; i++) { build configuration. - + config.lazy_lock (bool) @@ -666,7 +805,7 @@ for (i = 0; i < nbins; i++) { during build configuration. - + config.mremap (bool) @@ -676,7 +815,7 @@ for (i = 0; i < nbins; i++) { build configuration. - + config.munmap (bool) @@ -686,7 +825,7 @@ for (i = 0; i < nbins; i++) { build configuration. - + config.prof (bool) @@ -696,7 +835,7 @@ for (i = 0; i < nbins; i++) { build configuration. - + config.prof_libgcc (bool) @@ -706,7 +845,7 @@ for (i = 0; i < nbins; i++) { specified during build configuration. - + config.prof_libunwind (bool) @@ -716,7 +855,7 @@ for (i = 0; i < nbins; i++) { during build configuration. - + config.stats (bool) @@ -726,7 +865,7 @@ for (i = 0; i < nbins; i++) { build configuration. - + config.tcache (bool) @@ -736,7 +875,7 @@ for (i = 0; i < nbins; i++) { during build configuration. - + config.tls (bool) @@ -746,7 +885,7 @@ for (i = 0; i < nbins; i++) { build configuration. - + config.utrace (bool) @@ -756,7 +895,7 @@ for (i = 0; i < nbins; i++) { build configuration. - + config.valgrind (bool) @@ -766,7 +905,7 @@ for (i = 0; i < nbins; i++) { build configuration. - + config.xmalloc (bool) @@ -791,19 +930,6 @@ for (i = 0; i < nbins; i++) { - - - opt.lg_chunk - (size_t) - r- - - Virtual memory chunk size (log base 2). If a chunk - size outside the supported size range is specified, the size is - silently clipped to the minimum/maximum supported size. The default - chunk size is 4 MiB (2^22). - - - opt.dss @@ -815,7 +941,23 @@ for (i = 0; i < nbins; i++) { related to mmap 2 allocation. The following settings are supported: “disabled”, “primary”, - and “secondary” (default). + and “secondary”. The default is “secondary” if + config.dss is + true, “disabled” otherwise. + + + + + + opt.lg_chunk + (size_t) + r- + + Virtual memory chunk size (log base 2). If a chunk + size outside the supported size range is specified, the size is + silently clipped to the minimum/maximum supported size. The default + chunk size is 4 MiB (2^22). + @@ -934,7 +1076,8 @@ for (i = 0; i < nbins; i++) { Zero filling enabled/disabled. If enabled, each byte of uninitialized allocated memory will be initialized to 0. Note that this initialization only happens once for each byte, so - realloc and + realloc, + rallocx and rallocm calls do not zero memory that was previously allocated. This is intended for debugging and will impact performance negatively. This option is disabled by default. @@ -1063,7 +1206,7 @@ malloc_conf = "xmalloc:true";]]> opt.prof_active (bool) - r- + rw [] Profiling activated/deactivated. This is a secondary @@ -1175,7 +1318,7 @@ malloc_conf = "xmalloc:true";]]> by default. - + thread.arena (unsigned) @@ -1202,7 +1345,7 @@ malloc_conf = "xmalloc:true";]]> cases. - + thread.allocatedp (uint64_t *) @@ -1229,7 +1372,7 @@ malloc_conf = "xmalloc:true";]]> cases. - + thread.deallocatedp (uint64_t *) @@ -1243,7 +1386,7 @@ malloc_conf = "xmalloc:true";]]> mallctl* calls. - + thread.tcache.enabled (bool) @@ -1257,7 +1400,7 @@ malloc_conf = "xmalloc:true";]]> - + thread.tcache.flush (void) @@ -1323,7 +1466,7 @@ malloc_conf = "xmalloc:true";]]> initialized. - + arenas.quantum (size_t) @@ -1332,7 +1475,7 @@ malloc_conf = "xmalloc:true";]]> Quantum size. - + arenas.page (size_t) @@ -1341,7 +1484,7 @@ malloc_conf = "xmalloc:true";]]> Page size. - + arenas.tcache_max (size_t) @@ -1351,7 +1494,7 @@ malloc_conf = "xmalloc:true";]]> Maximum thread-cached size class. - + arenas.nbins (unsigned) @@ -1360,7 +1503,7 @@ malloc_conf = "xmalloc:true";]]> Number of bin size classes. - + arenas.nhbins (unsigned) @@ -1380,7 +1523,7 @@ malloc_conf = "xmalloc:true";]]> Maximum size supported by size class. - + arenas.bin.<i>.nregs (uint32_t) @@ -1389,7 +1532,7 @@ malloc_conf = "xmalloc:true";]]> Number of regions per page run. - + arenas.bin.<i>.run_size (size_t) @@ -1398,7 +1541,7 @@ malloc_conf = "xmalloc:true";]]> Number of bytes per page run. - + arenas.nlruns (size_t) @@ -1407,7 +1550,7 @@ malloc_conf = "xmalloc:true";]]> Total number of large size classes. - + arenas.lrun.<i>.size (size_t) @@ -1417,7 +1560,7 @@ malloc_conf = "xmalloc:true";]]> class. - + arenas.purge (unsigned) @@ -1427,7 +1570,7 @@ malloc_conf = "xmalloc:true";]]> for all arenas if none is specified. - + arenas.extend (unsigned) @@ -1451,7 +1594,7 @@ malloc_conf = "xmalloc:true";]]> - + prof.dump (const char *) @@ -1467,7 +1610,7 @@ malloc_conf = "xmalloc:true";]]> option. - + prof.interval (uint64_t) @@ -1527,7 +1670,7 @@ malloc_conf = "xmalloc:true";]]> entirely devoted to allocator metadata. - + stats.mapped (size_t) @@ -1541,7 +1684,7 @@ malloc_conf = "xmalloc:true";]]> does not include inactive chunks. - + stats.chunks.current (size_t) @@ -1553,7 +1696,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.chunks.total (uint64_t) @@ -1563,7 +1706,7 @@ malloc_conf = "xmalloc:true";]]> Cumulative number of chunks allocated. - + stats.chunks.high (size_t) @@ -1574,7 +1717,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.huge.allocated (size_t) @@ -1585,7 +1728,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.huge.nmalloc (uint64_t) @@ -1596,7 +1739,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.huge.ndalloc (uint64_t) @@ -1607,7 +1750,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.arenas.<i>.dss (const char *) @@ -1621,7 +1764,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.arenas.<i>.nthreads (unsigned) @@ -1631,7 +1774,7 @@ malloc_conf = "xmalloc:true";]]> arena. - + stats.arenas.<i>.pactive (size_t) @@ -1652,7 +1795,7 @@ malloc_conf = "xmalloc:true";]]> similar has not been called. - + stats.arenas.<i>.mapped (size_t) @@ -1662,7 +1805,7 @@ malloc_conf = "xmalloc:true";]]> Number of mapped bytes. - + stats.arenas.<i>.npurge (uint64_t) @@ -1673,7 +1816,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.arenas.<i>.nmadvise (uint64_t) @@ -1685,9 +1828,9 @@ malloc_conf = "xmalloc:true";]]> similar calls made to purge dirty pages. - + - stats.arenas.<i>.npurged + stats.arenas.<i>.purged (uint64_t) r- [] @@ -1695,7 +1838,7 @@ malloc_conf = "xmalloc:true";]]> Number of pages purged. - + stats.arenas.<i>.small.allocated (size_t) @@ -1706,7 +1849,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.arenas.<i>.small.nmalloc (uint64_t) @@ -1717,7 +1860,7 @@ malloc_conf = "xmalloc:true";]]> small bins. - + stats.arenas.<i>.small.ndalloc (uint64_t) @@ -1728,7 +1871,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.arenas.<i>.small.nrequests (uint64_t) @@ -1739,7 +1882,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.arenas.<i>.large.allocated (size_t) @@ -1750,7 +1893,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.arenas.<i>.large.nmalloc (uint64_t) @@ -1761,7 +1904,7 @@ malloc_conf = "xmalloc:true";]]> directly by the arena. - + stats.arenas.<i>.large.ndalloc (uint64_t) @@ -1772,7 +1915,7 @@ malloc_conf = "xmalloc:true";]]> directly by the arena. - + stats.arenas.<i>.large.nrequests (uint64_t) @@ -1783,7 +1926,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.arenas.<i>.bins.<j>.allocated (size_t) @@ -1794,7 +1937,7 @@ malloc_conf = "xmalloc:true";]]> bin. - + stats.arenas.<i>.bins.<j>.nmalloc (uint64_t) @@ -1805,7 +1948,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.arenas.<i>.bins.<j>.ndalloc (uint64_t) @@ -1816,7 +1959,7 @@ malloc_conf = "xmalloc:true";]]> - + stats.arenas.<i>.bins.<j>.nrequests (uint64_t) @@ -1827,7 +1970,7 @@ malloc_conf = "xmalloc:true";]]> requests. - + stats.arenas.<i>.bins.<j>.nfills (uint64_t) @@ -1837,7 +1980,7 @@ malloc_conf = "xmalloc:true";]]> Cumulative number of tcache fills. - + stats.arenas.<i>.bins.<j>.nflushes (uint64_t) @@ -1847,7 +1990,7 @@ malloc_conf = "xmalloc:true";]]> Cumulative number of tcache flushes. - + stats.arenas.<i>.bins.<j>.nruns (uint64_t) @@ -1857,7 +2000,7 @@ malloc_conf = "xmalloc:true";]]> Cumulative number of runs created. - + stats.arenas.<i>.bins.<j>.nreruns (uint64_t) @@ -1868,7 +2011,7 @@ malloc_conf = "xmalloc:true";]]> to allocate changed. - + stats.arenas.<i>.bins.<j>.curruns (size_t) @@ -1878,7 +2021,7 @@ malloc_conf = "xmalloc:true";]]> Current number of runs. - + stats.arenas.<i>.lruns.<j>.nmalloc (uint64_t) @@ -1889,7 +2032,7 @@ malloc_conf = "xmalloc:true";]]> class served directly by the arena. - + stats.arenas.<i>.lruns.<j>.ndalloc (uint64_t) @@ -1900,7 +2043,7 @@ malloc_conf = "xmalloc:true";]]> size class served directly by the arena. - + stats.arenas.<i>.lruns.<j>.nrequests (uint64_t) @@ -1911,7 +2054,7 @@ malloc_conf = "xmalloc:true";]]> class. - + stats.arenas.<i>.lruns.<j>.curruns (size_t) @@ -2037,9 +2180,26 @@ malloc_conf = "xmalloc:true";]]> Non-standard API - The malloc_usable_size function - returns the usable size of the allocation pointed to by - ptr. + The mallocx and + rallocx functions return a pointer to + the allocated memory if successful; otherwise a NULL + pointer is returned to indicate insufficient contiguous memory was + available to service the allocation request. + + The xallocx function returns the + real size of the resulting resized allocation pointed to by + ptr, which is a value less than + size if the allocation could not be adequately + grown in place. + + The sallocx function returns the + real size of the allocation pointed to by ptr. + + + The nallocx returns the real size + that would result from a successful equivalent + mallocx function call, or zero if + insufficient memory is available to perform the size computation. The mallctl, mallctlnametomib, and @@ -2056,12 +2216,6 @@ malloc_conf = "xmalloc:true";]]> is too large or too small; in this case as much data as possible are read despite the error. - - ENOMEM - - *oldlenp is too short to - hold the requested value. - ENOENT @@ -2090,6 +2244,10 @@ malloc_conf = "xmalloc:true";]]> + + The malloc_usable_size function + returns the usable size of the allocation pointed to by + ptr. Experimental API diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index bbcfedac..9d000c03 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -158,6 +158,7 @@ struct arena_chunk_map_s { }; typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; +typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t; /* Arena chunk header. */ struct arena_chunk_s { @@ -174,11 +175,12 @@ struct arena_chunk_s { size_t nruns_avail; /* - * Number of available run adjacencies. Clean and dirty available runs - * are not coalesced, which causes virtual memory fragmentation. The - * ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking - * this fragmentation. - * */ + * Number of available run adjacencies that purging could coalesce. + * Clean and dirty available runs are not coalesced, which causes + * virtual memory fragmentation. The ratio of + * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this + * fragmentation. + */ size_t nruns_adjac; /* @@ -404,7 +406,16 @@ void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero); +#ifdef JEMALLOC_JET +typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, + uint8_t); +extern arena_redzone_corruption_t *arena_redzone_corruption; +typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); +extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; +#else void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); +#endif +void arena_quarantine_junk_small(void *ptr, size_t usize); void *arena_malloc_small(arena_t *arena, size_t size, bool zero); void *arena_malloc_large(arena_t *arena, size_t size, bool zero); void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); @@ -415,10 +426,18 @@ void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_t *mapelm); void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind); +#ifdef JEMALLOC_JET +typedef void (arena_dalloc_junk_large_t)(void *, size_t); +extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; +#endif void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); -void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, +#ifdef JEMALLOC_JET +typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); +extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; +#endif +bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, @@ -473,7 +492,7 @@ size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); prof_ctx_t *arena_prof_ctx_get(const void *ptr); -void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); +void arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx); void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); size_t arena_salloc(const void *ptr, bool demote); void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, @@ -885,10 +904,10 @@ arena_prof_ctx_get(const void *ptr) } JEMALLOC_INLINE void -arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) +arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx) { arena_chunk_t *chunk; - size_t pageind, mapbits; + size_t pageind; cassert(config_prof); assert(ptr != NULL); @@ -896,10 +915,17 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + + if (usize > SMALL_MAXCLASS || (prof_promote && + ((uintptr_t)ctx != (uintptr_t)1U || arena_mapbits_large_get(chunk, + pageind) != 0))) { + assert(arena_mapbits_large_get(chunk, pageind) != 0); + arena_mapp_get(chunk, pageind)->prof_ctx = ctx; + } else { + assert(arena_mapbits_large_get(chunk, pageind) == 0); if (prof_promote == false) { + size_t mapbits = arena_mapbits_get(chunk, pageind); arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE)); @@ -911,12 +937,11 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) bin_info = &arena_bin_info[binind]; regind = arena_run_regind(run, bin_info, ptr); - *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset - + (regind * sizeof(prof_ctx_t *)))) = ctx; - } else - assert((uintptr_t)ctx == (uintptr_t)1U); - } else - arena_mapp_get(chunk, pageind)->prof_ctx = ctx; + *((prof_ctx_t **)((uintptr_t)run + + bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t + *)))) = ctx; + } + } } JEMALLOC_ALWAYS_INLINE void * diff --git a/include/jemalloc/internal/chunk_dss.h b/include/jemalloc/internal/chunk_dss.h index 6585f071..4535ce09 100644 --- a/include/jemalloc/internal/chunk_dss.h +++ b/include/jemalloc/internal/chunk_dss.h @@ -7,7 +7,7 @@ typedef enum { dss_prec_secondary = 2, dss_prec_limit = 3 -} dss_prec_t ; +} dss_prec_t; #define DSS_PREC_DEFAULT dss_prec_secondary #define DSS_DEFAULT "secondary" diff --git a/include/jemalloc/internal/ckh.h b/include/jemalloc/internal/ckh.h index 50c39ed9..58712a6a 100644 --- a/include/jemalloc/internal/ckh.h +++ b/include/jemalloc/internal/ckh.h @@ -17,7 +17,7 @@ typedef bool ckh_keycomp_t (const void *, const void *); * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit * one bucket per L1 cache line. */ -#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) +#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ diff --git a/include/jemalloc/internal/hash.h b/include/jemalloc/internal/hash.h index 56ecc793..09b69df5 100644 --- a/include/jemalloc/internal/hash.h +++ b/include/jemalloc/internal/hash.h @@ -19,6 +19,11 @@ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE +uint32_t hash_x86_32(const void *key, int len, uint32_t seed); +void hash_x86_128(const void *key, const int len, uint32_t seed, + uint64_t r_out[2]); +void hash_x64_128(const void *key, const int len, const uint32_t seed, + uint64_t r_out[2]); void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]); #endif @@ -43,14 +48,14 @@ JEMALLOC_INLINE uint32_t hash_get_block_32(const uint32_t *p, int i) { - return p[i]; + return (p[i]); } JEMALLOC_INLINE uint64_t hash_get_block_64(const uint64_t *p, int i) { - return p[i]; + return (p[i]); } JEMALLOC_INLINE uint32_t @@ -63,7 +68,7 @@ hash_fmix_32(uint32_t h) h *= 0xc2b2ae35; h ^= h >> 16; - return h; + return (h); } JEMALLOC_INLINE uint64_t @@ -76,7 +81,7 @@ hash_fmix_64(uint64_t k) k *= QU(0xc4ceb9fe1a85ec53LLU); k ^= k >> 33; - return k; + return (k); } JEMALLOC_INLINE uint32_t @@ -127,12 +132,12 @@ hash_x86_32(const void *key, int len, uint32_t seed) h1 = hash_fmix_32(h1); - return h1; + return (h1); } UNUSED JEMALLOC_INLINE void hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]) + uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; const int nblocks = len / 16; @@ -234,7 +239,7 @@ hash_x86_128(const void *key, const int len, uint32_t seed, UNUSED JEMALLOC_INLINE void hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]) + uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 16; @@ -310,7 +315,6 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, r_out[1] = h2; } - /******************************************************************************/ /* API. */ JEMALLOC_INLINE void diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h index d987d370..ddf13138 100644 --- a/include/jemalloc/internal/huge.h +++ b/include/jemalloc/internal/huge.h @@ -19,10 +19,14 @@ extern malloc_mutex_t huge_mtx; void *huge_malloc(size_t size, bool zero); void *huge_palloc(size_t size, size_t alignment, bool zero); -void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, +bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra); void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc); +#ifdef JEMALLOC_JET +typedef void (huge_dalloc_junk_t)(void *, size_t); +extern huge_dalloc_junk_t *huge_dalloc_junk; +#endif void huge_dalloc(void *ptr, bool unmap); size_t huge_salloc(const void *ptr); prof_ctx_t *huge_prof_ctx_get(const void *ptr); diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 53c135c2..d24a1fe6 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -1,5 +1,5 @@ #ifndef JEMALLOC_INTERNAL_H -#define JEMALLOC_INTERNAL_H +#define JEMALLOC_INTERNAL_H #include #ifdef _WIN32 # include @@ -54,8 +54,7 @@ typedef intptr_t ssize_t; #endif #include -#define JEMALLOC_NO_DEMANGLE -#include "../jemalloc@install_suffix@.h" +#include "jemalloc_internal_defs.h" #ifdef JEMALLOC_UTRACE #include @@ -66,13 +65,18 @@ typedef intptr_t ssize_t; #include #endif -#include "jemalloc/internal/private_namespace.h" - -#ifdef JEMALLOC_CC_SILENCE -#define UNUSED JEMALLOC_ATTR(unused) +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "../jemalloc@install_suffix@.h" +# undef JEMALLOC_NO_RENAME #else -#define UNUSED +# define JEMALLOC_N(n) @private_namespace@##n +# include "../jemalloc@install_suffix@.h" #endif +#include "jemalloc/internal/private_namespace.h" static const bool config_debug = #ifdef JEMALLOC_DEBUG @@ -221,48 +225,13 @@ static const bool config_ivsalloc = * JEMALLOC_H_INLINES : Inline functions. */ /******************************************************************************/ -#define JEMALLOC_H_TYPES +#define JEMALLOC_H_TYPES +#include "jemalloc/internal/jemalloc_internal_macros.h" + +#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) #define ALLOCM_LG_ALIGN_MASK ((int)0x3f) -#define ZU(z) ((size_t)z) -#define QU(q) ((uint64_t)q) - -#ifndef __DECONST -# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) -#endif - -/* - * JEMALLOC_ALWAYS_INLINE is used within header files for functions that are - * static inline functions if inlining is enabled, and single-definition - * library-private functions if inlining is disabled. - * - * JEMALLOC_ALWAYS_INLINE_C is for use in .c files, in which case the denoted - * functions are always static, regardless of whether inlining is enabled. - */ -#ifdef JEMALLOC_DEBUG - /* Disable inlining to make debugging easier. */ -# define JEMALLOC_ALWAYS_INLINE -# define JEMALLOC_ALWAYS_INLINE_C static -# define JEMALLOC_INLINE -# define inline -#else -# define JEMALLOC_ENABLE_INLINE -# ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ALWAYS_INLINE \ - static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) -# define JEMALLOC_ALWAYS_INLINE_C \ - static inline JEMALLOC_ATTR(always_inline) -# else -# define JEMALLOC_ALWAYS_INLINE static inline -# define JEMALLOC_ALWAYS_INLINE_C static inline -# endif -# define JEMALLOC_INLINE static inline -# ifdef _MSC_VER -# define inline _inline -# endif -#endif - /* Smallest size class to support. */ #define LG_TINY_MIN 3 #define TINY_MIN (1U << LG_TINY_MIN) @@ -493,7 +462,7 @@ static const bool config_ivsalloc = #undef JEMALLOC_H_TYPES /******************************************************************************/ -#define JEMALLOC_H_STRUCTS +#define JEMALLOC_H_STRUCTS #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" @@ -522,14 +491,14 @@ typedef struct { uint64_t deallocated; } thread_allocated_t; /* - * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro + * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro * argument. */ -#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) +#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0}) #undef JEMALLOC_H_STRUCTS /******************************************************************************/ -#define JEMALLOC_H_EXTERNS +#define JEMALLOC_H_EXTERNS extern bool opt_abort; extern bool opt_junk; @@ -589,7 +558,7 @@ void jemalloc_postfork_child(void); #undef JEMALLOC_H_EXTERNS /******************************************************************************/ -#define JEMALLOC_H_INLINES +#define JEMALLOC_H_INLINES #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" @@ -764,32 +733,36 @@ choose_arena(arena_t *arena) #include "jemalloc/internal/quarantine.h" #ifndef JEMALLOC_ENABLE_INLINE -void *imallocx(size_t size, bool try_tcache, arena_t *arena); +void *imalloct(size_t size, bool try_tcache, arena_t *arena); void *imalloc(size_t size); -void *icallocx(size_t size, bool try_tcache, arena_t *arena); +void *icalloct(size_t size, bool try_tcache, arena_t *arena); void *icalloc(size_t size); -void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, +void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena); void *ipalloc(size_t usize, size_t alignment, bool zero); size_t isalloc(const void *ptr, bool demote); size_t ivsalloc(const void *ptr, bool demote); size_t u2rz(size_t usize); size_t p2rz(const void *ptr); -void idallocx(void *ptr, bool try_tcache); +void idalloct(void *ptr, bool try_tcache); void idalloc(void *ptr); -void iqallocx(void *ptr, bool try_tcache); +void iqalloct(void *ptr, bool try_tcache); void iqalloc(void *ptr); -void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment, - bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, +void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena); +void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment, + bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena); void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, - bool zero, bool no_move); + bool zero); +bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, + bool zero); malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) JEMALLOC_ALWAYS_INLINE void * -imallocx(size_t size, bool try_tcache, arena_t *arena) +imalloct(size_t size, bool try_tcache, arena_t *arena) { assert(size != 0); @@ -804,11 +777,11 @@ JEMALLOC_ALWAYS_INLINE void * imalloc(size_t size) { - return (imallocx(size, true, NULL)); + return (imalloct(size, true, NULL)); } JEMALLOC_ALWAYS_INLINE void * -icallocx(size_t size, bool try_tcache, arena_t *arena) +icalloct(size_t size, bool try_tcache, arena_t *arena) { if (size <= arena_maxclass) @@ -821,11 +794,11 @@ JEMALLOC_ALWAYS_INLINE void * icalloc(size_t size) { - return (icallocx(size, true, NULL)); + return (icalloct(size, true, NULL)); } JEMALLOC_ALWAYS_INLINE void * -ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, +ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena) { void *ret; @@ -853,7 +826,7 @@ JEMALLOC_ALWAYS_INLINE void * ipalloc(size_t usize, size_t alignment, bool zero) { - return (ipallocx(usize, alignment, zero, true, NULL)); + return (ipalloct(usize, alignment, zero, true, NULL)); } /* @@ -885,7 +858,7 @@ ivsalloc(const void *ptr, bool demote) { /* Return 0 if ptr is not within a chunk managed by jemalloc. */ - if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) + if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0) return (0); return (isalloc(ptr, demote)); @@ -914,7 +887,7 @@ p2rz(const void *ptr) } JEMALLOC_ALWAYS_INLINE void -idallocx(void *ptr, bool try_tcache) +idalloct(void *ptr, bool try_tcache) { arena_chunk_t *chunk; @@ -931,31 +904,63 @@ JEMALLOC_ALWAYS_INLINE void idalloc(void *ptr) { - idallocx(ptr, true); + idalloct(ptr, true); } JEMALLOC_ALWAYS_INLINE void -iqallocx(void *ptr, bool try_tcache) +iqalloct(void *ptr, bool try_tcache) { if (config_fill && opt_quarantine) quarantine(ptr); else - idallocx(ptr, try_tcache); + idalloct(ptr, try_tcache); } JEMALLOC_ALWAYS_INLINE void iqalloc(void *ptr) { - iqallocx(ptr, true); + iqalloct(ptr, true); } JEMALLOC_ALWAYS_INLINE void * -irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, - bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) +iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, + arena_t *arena) +{ + void *p; + size_t usize, copysize; + + usize = sa2u(size + extra, alignment); + if (usize == 0) + return (NULL); + p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); + if (p == NULL) { + if (extra == 0) + return (NULL); + /* Try again, without extra this time. */ + usize = sa2u(size, alignment); + if (usize == 0) + return (NULL); + p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); + if (p == NULL) + return (NULL); + } + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(p, ptr, copysize); + iqalloct(ptr, try_tcache_dalloc); + return (p); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, + bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) { - void *ret; size_t oldsize; assert(ptr != NULL); @@ -965,68 +970,50 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { - size_t usize, copysize; - /* * Existing object alignment is inadequate; allocate new space * and copy. */ - if (no_move) - return (NULL); - usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, without extra this time. */ - usize = sa2u(size, alignment); - if (usize == 0) - return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, - arena); - if (ret == NULL) - return (NULL); - } - /* - * Copy at most size bytes (not size+extra), since the caller - * has no expectation that the extra bytes will be reliably - * preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - memcpy(ret, ptr, copysize); - iqallocx(ptr, try_tcache_dalloc); - return (ret); + return (iralloct_realign(ptr, oldsize, size, extra, alignment, + zero, try_tcache_alloc, try_tcache_dalloc, arena)); } - if (no_move) { - if (size <= arena_maxclass) { - return (arena_ralloc_no_move(ptr, oldsize, size, - extra, zero)); - } else { - return (huge_ralloc_no_move(ptr, oldsize, size, - extra)); - } + if (size + extra <= arena_maxclass) { + return (arena_ralloc(arena, ptr, oldsize, size, extra, + alignment, zero, try_tcache_alloc, + try_tcache_dalloc)); } else { - if (size + extra <= arena_maxclass) { - return (arena_ralloc(arena, ptr, oldsize, size, extra, - alignment, zero, try_tcache_alloc, - try_tcache_dalloc)); - } else { - return (huge_ralloc(ptr, oldsize, size, extra, - alignment, zero, try_tcache_dalloc)); - } + return (huge_ralloc(ptr, oldsize, size, extra, + alignment, zero, try_tcache_dalloc)); } } JEMALLOC_ALWAYS_INLINE void * -iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, - bool no_move) +iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) { - return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true, - NULL)); + return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL)); +} + +JEMALLOC_ALWAYS_INLINE bool +ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) +{ + size_t oldsize; + + assert(ptr != NULL); + assert(size != 0); + + oldsize = isalloc(ptr, config_prof); + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* Existing object alignment is inadequate. */ + return (true); + } + + if (size <= arena_maxclass) + return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); + else + return (huge_ralloc_no_move(ptr, oldsize, size, extra)); } malloc_tsd_externs(thread_allocated, thread_allocated_t) diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in new file mode 100644 index 00000000..752bb103 --- /dev/null +++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -0,0 +1,199 @@ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +#undef JEMALLOC_PREFIX +#undef JEMALLOC_CPREFIX + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#undef JEMALLOC_PRIVATE_NAMESPACE + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#undef CPU_SPINWAIT + +/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ +#undef JEMALLOC_ATOMIC9 + +/* + * Defined if OSAtomic*() functions are available, as provided by Darwin, and + * documented in the atomic(3) manual page. + */ +#undef JEMALLOC_OSATOMIC + +/* + * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and + * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the + * functions are defined in libgcc instead of being inlines) + */ +#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 + +/* + * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and + * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the + * functions are defined in libgcc instead of being inlines) + */ +#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 + +/* + * Defined if OSSpin*() functions are available, as provided by Darwin, and + * documented in the spinlock(3) manual page. + */ +#undef JEMALLOC_OSSPIN + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +#undef JEMALLOC_MALLOC_THREAD_CLEANUP + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +#undef JEMALLOC_THREADED_INIT + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +#undef JEMALLOC_MUTEX_INIT_CB + +/* Defined if sbrk() is supported. */ +#undef JEMALLOC_HAVE_SBRK + +/* Non-empty if the tls_model attribute is supported. */ +#undef JEMALLOC_TLS_MODEL + +/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ +#undef JEMALLOC_CC_SILENCE + +/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ +#undef JEMALLOC_CODE_COVERAGE + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +#undef JEMALLOC_DEBUG + +/* JEMALLOC_STATS enables statistics calculation. */ +#undef JEMALLOC_STATS + +/* JEMALLOC_PROF enables allocation profiling. */ +#undef JEMALLOC_PROF + +/* Use libunwind for profile backtracing if defined. */ +#undef JEMALLOC_PROF_LIBUNWIND + +/* Use libgcc for profile backtracing if defined. */ +#undef JEMALLOC_PROF_LIBGCC + +/* Use gcc intrinsics for profile backtracing if defined. */ +#undef JEMALLOC_PROF_GCC + +/* + * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. + * This makes it possible to allocate/deallocate objects without any locking + * when the cache is in the steady state. + */ +#undef JEMALLOC_TCACHE + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage + * segment (DSS). + */ +#undef JEMALLOC_DSS + +/* Support memory filling (junk/zero/quarantine/redzone). */ +#undef JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +#undef JEMALLOC_UTRACE + +/* Support Valgrind. */ +#undef JEMALLOC_VALGRIND + +/* Support optional abort() on OOM. */ +#undef JEMALLOC_XMALLOC + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +#undef JEMALLOC_LAZY_LOCK + +/* One page is 2^STATIC_PAGE_SHIFT bytes. */ +#undef STATIC_PAGE_SHIFT + +/* + * If defined, use munmap() to unmap freed chunks, rather than storing them for + * later reuse. This is disabled by default on Linux because common sequences + * of mmap()/munmap() calls will cause virtual memory map holes. + */ +#undef JEMALLOC_MUNMAP + +/* + * If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is + * disabled by default because it is Linux-specific and it will cause virtual + * memory map holes, much like munmap(2) does. + */ +#undef JEMALLOC_MREMAP + +/* TLS is used to map arenas and magazine caches to threads. */ +#undef JEMALLOC_TLS + +/* + * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside + * within jemalloc-owned chunks before dereferencing them. + */ +#undef JEMALLOC_IVSALLOC + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +#undef JEMALLOC_ZONE +#undef JEMALLOC_ZONE_VERSION + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, + * such that new pages will be demand-zeroed if + * the address region is later touched. + * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being + * unused, such that they will be discarded rather + * than swapped out. + */ +#undef JEMALLOC_PURGE_MADVISE_DONTNEED +#undef JEMALLOC_PURGE_MADVISE_FREE + +/* + * Define if operating system has alloca.h header. + */ +#undef JEMALLOC_HAS_ALLOCA_H + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#undef LG_SIZEOF_INT + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#undef LG_SIZEOF_LONG + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#undef LG_SIZEOF_INTMAX_T + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/include/jemalloc/internal/jemalloc_internal_macros.h b/include/jemalloc/internal/jemalloc_internal_macros.h new file mode 100644 index 00000000..70602ee8 --- /dev/null +++ b/include/jemalloc/internal/jemalloc_internal_macros.h @@ -0,0 +1,47 @@ +/* + * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for + * functions that are static inline functions if inlining is enabled, and + * single-definition library-private functions if inlining is disabled. + * + * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in + * which case the denoted functions are always static, regardless of whether + * inlining is enabled. + */ +#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) + /* Disable inlining to make debugging/profiling easier. */ +# define JEMALLOC_ALWAYS_INLINE +# define JEMALLOC_ALWAYS_INLINE_C static +# define JEMALLOC_INLINE +# define JEMALLOC_INLINE_C static +# define inline +#else +# define JEMALLOC_ENABLE_INLINE +# ifdef JEMALLOC_HAVE_ATTR +# define JEMALLOC_ALWAYS_INLINE \ + static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) +# define JEMALLOC_ALWAYS_INLINE_C \ + static inline JEMALLOC_ATTR(always_inline) +# else +# define JEMALLOC_ALWAYS_INLINE static inline +# define JEMALLOC_ALWAYS_INLINE_C static inline +# endif +# define JEMALLOC_INLINE static inline +# define JEMALLOC_INLINE_C static inline +# ifdef _MSC_VER +# define inline _inline +# endif +#endif + +#ifdef JEMALLOC_CC_SILENCE +# define UNUSED JEMALLOC_ATTR(unused) +#else +# define UNUSED +#endif + +#define ZU(z) ((size_t)z) +#define QU(q) ((uint64_t)q) +#define QI(q) ((int64_t)q) + +#ifndef __DECONST +# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) +#endif diff --git a/include/jemalloc/internal/private_namespace.h b/include/jemalloc/internal/private_namespace.h deleted file mode 100644 index cdb0b0eb..00000000 --- a/include/jemalloc/internal/private_namespace.h +++ /dev/null @@ -1,392 +0,0 @@ -#define a0calloc JEMALLOC_N(a0calloc) -#define a0free JEMALLOC_N(a0free) -#define a0malloc JEMALLOC_N(a0malloc) -#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) -#define arena_bin_index JEMALLOC_N(arena_bin_index) -#define arena_bin_info JEMALLOC_N(arena_bin_info) -#define arena_boot JEMALLOC_N(arena_boot) -#define arena_dalloc JEMALLOC_N(arena_dalloc) -#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) -#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked) -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) -#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) -#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked) -#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) -#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get) -#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set) -#define arena_malloc JEMALLOC_N(arena_malloc) -#define arena_malloc_large JEMALLOC_N(arena_malloc_large) -#define arena_malloc_small JEMALLOC_N(arena_malloc_small) -#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get) -#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get) -#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get) -#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get) -#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set) -#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get) -#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set) -#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get) -#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get) -#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set) -#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set) -#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get) -#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set) -#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) -#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set) -#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get) -#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read) -#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write) -#define arena_mapp_get JEMALLOC_N(arena_mapp_get) -#define arena_maxclass JEMALLOC_N(arena_maxclass) -#define arena_new JEMALLOC_N(arena_new) -#define arena_palloc JEMALLOC_N(arena_palloc) -#define arena_postfork_child JEMALLOC_N(arena_postfork_child) -#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) -#define arena_prefork JEMALLOC_N(arena_prefork) -#define arena_prof_accum JEMALLOC_N(arena_prof_accum) -#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl) -#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked) -#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get) -#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set) -#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) -#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get) -#define arena_purge_all JEMALLOC_N(arena_purge_all) -#define arena_ralloc JEMALLOC_N(arena_ralloc) -#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) -#define arena_run_regind JEMALLOC_N(arena_run_regind) -#define arena_salloc JEMALLOC_N(arena_salloc) -#define arena_stats_merge JEMALLOC_N(arena_stats_merge) -#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) -#define arenas JEMALLOC_N(arenas) -#define arenas_booted JEMALLOC_N(arenas_booted) -#define arenas_cleanup JEMALLOC_N(arenas_cleanup) -#define arenas_extend JEMALLOC_N(arenas_extend) -#define arenas_initialized JEMALLOC_N(arenas_initialized) -#define arenas_lock JEMALLOC_N(arenas_lock) -#define arenas_tls JEMALLOC_N(arenas_tls) -#define arenas_tsd JEMALLOC_N(arenas_tsd) -#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot) -#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper) -#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get) -#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper) -#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set) -#define atomic_add_u JEMALLOC_N(atomic_add_u) -#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) -#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) -#define atomic_add_z JEMALLOC_N(atomic_add_z) -#define atomic_sub_u JEMALLOC_N(atomic_sub_u) -#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) -#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) -#define atomic_sub_z JEMALLOC_N(atomic_sub_z) -#define base_alloc JEMALLOC_N(base_alloc) -#define base_boot JEMALLOC_N(base_boot) -#define base_calloc JEMALLOC_N(base_calloc) -#define base_node_alloc JEMALLOC_N(base_node_alloc) -#define base_node_dealloc JEMALLOC_N(base_node_dealloc) -#define base_postfork_child JEMALLOC_N(base_postfork_child) -#define base_postfork_parent JEMALLOC_N(base_postfork_parent) -#define base_prefork JEMALLOC_N(base_prefork) -#define bitmap_full JEMALLOC_N(bitmap_full) -#define bitmap_get JEMALLOC_N(bitmap_get) -#define bitmap_info_init JEMALLOC_N(bitmap_info_init) -#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups) -#define bitmap_init JEMALLOC_N(bitmap_init) -#define bitmap_set JEMALLOC_N(bitmap_set) -#define bitmap_sfu JEMALLOC_N(bitmap_sfu) -#define bitmap_size JEMALLOC_N(bitmap_size) -#define bitmap_unset JEMALLOC_N(bitmap_unset) -#define bt_init JEMALLOC_N(bt_init) -#define buferror JEMALLOC_N(buferror) -#define choose_arena JEMALLOC_N(choose_arena) -#define choose_arena_hard JEMALLOC_N(choose_arena_hard) -#define chunk_alloc JEMALLOC_N(chunk_alloc) -#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) -#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) -#define chunk_boot JEMALLOC_N(chunk_boot) -#define chunk_dealloc JEMALLOC_N(chunk_dealloc) -#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap) -#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) -#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child) -#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent) -#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get) -#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set) -#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork) -#define chunk_in_dss JEMALLOC_N(chunk_in_dss) -#define chunk_npages JEMALLOC_N(chunk_npages) -#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child) -#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent) -#define chunk_prefork JEMALLOC_N(chunk_prefork) -#define chunk_unmap JEMALLOC_N(chunk_unmap) -#define chunks_mtx JEMALLOC_N(chunks_mtx) -#define chunks_rtree JEMALLOC_N(chunks_rtree) -#define chunksize JEMALLOC_N(chunksize) -#define chunksize_mask JEMALLOC_N(chunksize_mask) -#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search) -#define ckh_count JEMALLOC_N(ckh_count) -#define ckh_delete JEMALLOC_N(ckh_delete) -#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert) -#define ckh_insert JEMALLOC_N(ckh_insert) -#define ckh_isearch JEMALLOC_N(ckh_isearch) -#define ckh_iter JEMALLOC_N(ckh_iter) -#define ckh_new JEMALLOC_N(ckh_new) -#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) -#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) -#define ckh_rebuild JEMALLOC_N(ckh_rebuild) -#define ckh_remove JEMALLOC_N(ckh_remove) -#define ckh_search JEMALLOC_N(ckh_search) -#define ckh_string_hash JEMALLOC_N(ckh_string_hash) -#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) -#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert) -#define ckh_try_insert JEMALLOC_N(ckh_try_insert) -#define ctl_boot JEMALLOC_N(ctl_boot) -#define ctl_bymib JEMALLOC_N(ctl_bymib) -#define ctl_byname JEMALLOC_N(ctl_byname) -#define ctl_nametomib JEMALLOC_N(ctl_nametomib) -#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child) -#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent) -#define ctl_prefork JEMALLOC_N(ctl_prefork) -#define dss_prec_names JEMALLOC_N(dss_prec_names) -#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) -#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) -#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) -#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse) -#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start) -#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last) -#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new) -#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next) -#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch) -#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev) -#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch) -#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove) -#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter) -#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) -#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) -#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) -#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first) -#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert) -#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter) -#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse) -#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start) -#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last) -#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new) -#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next) -#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch) -#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev) -#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch) -#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove) -#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter) -#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse) -#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start) -#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search) -#define get_errno JEMALLOC_N(get_errno) -#define hash JEMALLOC_N(hash) -#define hash_fmix_32 JEMALLOC_N(hash_fmix_32) -#define hash_fmix_64 JEMALLOC_N(hash_fmix_64) -#define hash_get_block_32 JEMALLOC_N(hash_get_block_32) -#define hash_get_block_64 JEMALLOC_N(hash_get_block_64) -#define hash_rotl_32 JEMALLOC_N(hash_rotl_32) -#define hash_rotl_64 JEMALLOC_N(hash_rotl_64) -#define hash_x64_128 JEMALLOC_N(hash_x64_128) -#define hash_x86_128 JEMALLOC_N(hash_x86_128) -#define hash_x86_32 JEMALLOC_N(hash_x86_32) -#define huge_allocated JEMALLOC_N(huge_allocated) -#define huge_boot JEMALLOC_N(huge_boot) -#define huge_dalloc JEMALLOC_N(huge_dalloc) -#define huge_malloc JEMALLOC_N(huge_malloc) -#define huge_mtx JEMALLOC_N(huge_mtx) -#define huge_ndalloc JEMALLOC_N(huge_ndalloc) -#define huge_nmalloc JEMALLOC_N(huge_nmalloc) -#define huge_palloc JEMALLOC_N(huge_palloc) -#define huge_postfork_child JEMALLOC_N(huge_postfork_child) -#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent) -#define huge_prefork JEMALLOC_N(huge_prefork) -#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get) -#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set) -#define huge_ralloc JEMALLOC_N(huge_ralloc) -#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) -#define huge_salloc JEMALLOC_N(huge_salloc) -#define iallocm JEMALLOC_N(iallocm) -#define icalloc JEMALLOC_N(icalloc) -#define icallocx JEMALLOC_N(icallocx) -#define idalloc JEMALLOC_N(idalloc) -#define idallocx JEMALLOC_N(idallocx) -#define imalloc JEMALLOC_N(imalloc) -#define imallocx JEMALLOC_N(imallocx) -#define ipalloc JEMALLOC_N(ipalloc) -#define ipallocx JEMALLOC_N(ipallocx) -#define iqalloc JEMALLOC_N(iqalloc) -#define iqallocx JEMALLOC_N(iqallocx) -#define iralloc JEMALLOC_N(iralloc) -#define irallocx JEMALLOC_N(irallocx) -#define isalloc JEMALLOC_N(isalloc) -#define isthreaded JEMALLOC_N(isthreaded) -#define ivsalloc JEMALLOC_N(ivsalloc) -#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) -#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) -#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) -#define malloc_cprintf JEMALLOC_N(malloc_cprintf) -#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) -#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) -#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) -#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent) -#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork) -#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock) -#define malloc_printf JEMALLOC_N(malloc_printf) -#define malloc_snprintf JEMALLOC_N(malloc_snprintf) -#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax) -#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot) -#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) -#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) -#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) -#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup) -#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf) -#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) -#define malloc_write JEMALLOC_N(malloc_write) -#define map_bias JEMALLOC_N(map_bias) -#define mb_write JEMALLOC_N(mb_write) -#define mutex_boot JEMALLOC_N(mutex_boot) -#define narenas_auto JEMALLOC_N(narenas_auto) -#define narenas_total JEMALLOC_N(narenas_total) -#define narenas_total_get JEMALLOC_N(narenas_total_get) -#define ncpus JEMALLOC_N(ncpus) -#define nhbins JEMALLOC_N(nhbins) -#define opt_abort JEMALLOC_N(opt_abort) -#define opt_junk JEMALLOC_N(opt_junk) -#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) -#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) -#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) -#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample) -#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max) -#define opt_narenas JEMALLOC_N(opt_narenas) -#define opt_prof JEMALLOC_N(opt_prof) -#define opt_prof_accum JEMALLOC_N(opt_prof_accum) -#define opt_prof_active JEMALLOC_N(opt_prof_active) -#define opt_prof_final JEMALLOC_N(opt_prof_final) -#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) -#define opt_prof_leak JEMALLOC_N(opt_prof_leak) -#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) -#define opt_quarantine JEMALLOC_N(opt_quarantine) -#define opt_redzone JEMALLOC_N(opt_redzone) -#define opt_stats_print JEMALLOC_N(opt_stats_print) -#define opt_tcache JEMALLOC_N(opt_tcache) -#define opt_utrace JEMALLOC_N(opt_utrace) -#define opt_valgrind JEMALLOC_N(opt_valgrind) -#define opt_xmalloc JEMALLOC_N(opt_xmalloc) -#define opt_zero JEMALLOC_N(opt_zero) -#define p2rz JEMALLOC_N(p2rz) -#define pages_purge JEMALLOC_N(pages_purge) -#define pow2_ceil JEMALLOC_N(pow2_ceil) -#define prof_backtrace JEMALLOC_N(prof_backtrace) -#define prof_boot0 JEMALLOC_N(prof_boot0) -#define prof_boot1 JEMALLOC_N(prof_boot1) -#define prof_boot2 JEMALLOC_N(prof_boot2) -#define prof_ctx_get JEMALLOC_N(prof_ctx_get) -#define prof_ctx_set JEMALLOC_N(prof_ctx_set) -#define prof_free JEMALLOC_N(prof_free) -#define prof_gdump JEMALLOC_N(prof_gdump) -#define prof_idump JEMALLOC_N(prof_idump) -#define prof_interval JEMALLOC_N(prof_interval) -#define prof_lookup JEMALLOC_N(prof_lookup) -#define prof_malloc JEMALLOC_N(prof_malloc) -#define prof_mdump JEMALLOC_N(prof_mdump) -#define prof_postfork_child JEMALLOC_N(prof_postfork_child) -#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent) -#define prof_prefork JEMALLOC_N(prof_prefork) -#define prof_promote JEMALLOC_N(prof_promote) -#define prof_realloc JEMALLOC_N(prof_realloc) -#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) -#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) -#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted) -#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) -#define prof_tdata_get JEMALLOC_N(prof_tdata_get) -#define prof_tdata_init JEMALLOC_N(prof_tdata_init) -#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized) -#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls) -#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd) -#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot) -#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper) -#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get) -#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper) -#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set) -#define quarantine JEMALLOC_N(quarantine) -#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook) -#define quarantine_boot JEMALLOC_N(quarantine_boot) -#define quarantine_booted JEMALLOC_N(quarantine_booted) -#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup) -#define quarantine_init JEMALLOC_N(quarantine_init) -#define quarantine_tls JEMALLOC_N(quarantine_tls) -#define quarantine_tsd JEMALLOC_N(quarantine_tsd) -#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot) -#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper) -#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get) -#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper) -#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set) -#define register_zone JEMALLOC_N(register_zone) -#define rtree_get JEMALLOC_N(rtree_get) -#define rtree_get_locked JEMALLOC_N(rtree_get_locked) -#define rtree_new JEMALLOC_N(rtree_new) -#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child) -#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent) -#define rtree_prefork JEMALLOC_N(rtree_prefork) -#define rtree_set JEMALLOC_N(rtree_set) -#define s2u JEMALLOC_N(s2u) -#define sa2u JEMALLOC_N(sa2u) -#define set_errno JEMALLOC_N(set_errno) -#define stats_cactive JEMALLOC_N(stats_cactive) -#define stats_cactive_add JEMALLOC_N(stats_cactive_add) -#define stats_cactive_get JEMALLOC_N(stats_cactive_get) -#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) -#define stats_chunks JEMALLOC_N(stats_chunks) -#define stats_print JEMALLOC_N(stats_print) -#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) -#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) -#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) -#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) -#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate) -#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate) -#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) -#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) -#define tcache_bin_info JEMALLOC_N(tcache_bin_info) -#define tcache_boot0 JEMALLOC_N(tcache_boot0) -#define tcache_boot1 JEMALLOC_N(tcache_boot1) -#define tcache_booted JEMALLOC_N(tcache_booted) -#define tcache_create JEMALLOC_N(tcache_create) -#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) -#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) -#define tcache_destroy JEMALLOC_N(tcache_destroy) -#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted) -#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get) -#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized) -#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) -#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls) -#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd) -#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot) -#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper) -#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get) -#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper) -#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set) -#define tcache_event JEMALLOC_N(tcache_event) -#define tcache_event_hard JEMALLOC_N(tcache_event_hard) -#define tcache_flush JEMALLOC_N(tcache_flush) -#define tcache_get JEMALLOC_N(tcache_get) -#define tcache_initialized JEMALLOC_N(tcache_initialized) -#define tcache_maxclass JEMALLOC_N(tcache_maxclass) -#define tcache_salloc JEMALLOC_N(tcache_salloc) -#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) -#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup) -#define tcache_tls JEMALLOC_N(tcache_tls) -#define tcache_tsd JEMALLOC_N(tcache_tsd) -#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot) -#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper) -#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get) -#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper) -#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set) -#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted) -#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized) -#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls) -#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd) -#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot) -#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper) -#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get) -#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper) -#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set) -#define u2rz JEMALLOC_N(u2rz) diff --git a/include/jemalloc/internal/private_namespace.sh b/include/jemalloc/internal/private_namespace.sh new file mode 100755 index 00000000..cd25eb30 --- /dev/null +++ b/include/jemalloc/internal/private_namespace.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +for symbol in `cat $1` ; do + echo "#define ${symbol} JEMALLOC_N(${symbol})" +done diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt new file mode 100644 index 00000000..1e64ed57 --- /dev/null +++ b/include/jemalloc/internal/private_symbols.txt @@ -0,0 +1,412 @@ +a0calloc +a0free +a0malloc +arena_alloc_junk_small +arena_bin_index +arena_bin_info +arena_boot +arena_dalloc +arena_dalloc_bin +arena_dalloc_bin_locked +arena_dalloc_junk_large +arena_dalloc_junk_small +arena_dalloc_large +arena_dalloc_large_locked +arena_dalloc_small +arena_dss_prec_get +arena_dss_prec_set +arena_malloc +arena_malloc_large +arena_malloc_small +arena_mapbits_allocated_get +arena_mapbits_binind_get +arena_mapbits_dirty_get +arena_mapbits_get +arena_mapbits_large_binind_set +arena_mapbits_large_get +arena_mapbits_large_set +arena_mapbits_large_size_get +arena_mapbits_small_runind_get +arena_mapbits_small_set +arena_mapbits_unallocated_set +arena_mapbits_unallocated_size_get +arena_mapbits_unallocated_size_set +arena_mapbits_unzeroed_get +arena_mapbits_unzeroed_set +arena_mapbitsp_get +arena_mapbitsp_read +arena_mapbitsp_write +arena_mapp_get +arena_maxclass +arena_new +arena_palloc +arena_postfork_child +arena_postfork_parent +arena_prefork +arena_prof_accum +arena_prof_accum_impl +arena_prof_accum_locked +arena_prof_ctx_get +arena_prof_ctx_set +arena_prof_promoted +arena_ptr_small_binind_get +arena_purge_all +arena_quarantine_junk_small +arena_ralloc +arena_ralloc_junk_large +arena_ralloc_no_move +arena_redzone_corruption +arena_run_regind +arena_salloc +arena_stats_merge +arena_tcache_fill_small +arenas +arenas_booted +arenas_cleanup +arenas_extend +arenas_initialized +arenas_lock +arenas_tls +arenas_tsd +arenas_tsd_boot +arenas_tsd_cleanup_wrapper +arenas_tsd_get +arenas_tsd_get_wrapper +arenas_tsd_init_head +arenas_tsd_set +atomic_add_u +atomic_add_uint32 +atomic_add_uint64 +atomic_add_z +atomic_sub_u +atomic_sub_uint32 +atomic_sub_uint64 +atomic_sub_z +base_alloc +base_boot +base_calloc +base_node_alloc +base_node_dealloc +base_postfork_child +base_postfork_parent +base_prefork +bitmap_full +bitmap_get +bitmap_info_init +bitmap_info_ngroups +bitmap_init +bitmap_set +bitmap_sfu +bitmap_size +bitmap_unset +bt_init +buferror +choose_arena +choose_arena_hard +chunk_alloc +chunk_alloc_dss +chunk_alloc_mmap +chunk_boot +chunk_dealloc +chunk_dealloc_mmap +chunk_dss_boot +chunk_dss_postfork_child +chunk_dss_postfork_parent +chunk_dss_prec_get +chunk_dss_prec_set +chunk_dss_prefork +chunk_in_dss +chunk_npages +chunk_postfork_child +chunk_postfork_parent +chunk_prefork +chunk_unmap +chunks_mtx +chunks_rtree +chunksize +chunksize_mask +ckh_bucket_search +ckh_count +ckh_delete +ckh_evict_reloc_insert +ckh_insert +ckh_isearch +ckh_iter +ckh_new +ckh_pointer_hash +ckh_pointer_keycomp +ckh_rebuild +ckh_remove +ckh_search +ckh_string_hash +ckh_string_keycomp +ckh_try_bucket_insert +ckh_try_insert +ctl_boot +ctl_bymib +ctl_byname +ctl_nametomib +ctl_postfork_child +ctl_postfork_parent +ctl_prefork +dss_prec_names +extent_tree_ad_first +extent_tree_ad_insert +extent_tree_ad_iter +extent_tree_ad_iter_recurse +extent_tree_ad_iter_start +extent_tree_ad_last +extent_tree_ad_new +extent_tree_ad_next +extent_tree_ad_nsearch +extent_tree_ad_prev +extent_tree_ad_psearch +extent_tree_ad_remove +extent_tree_ad_reverse_iter +extent_tree_ad_reverse_iter_recurse +extent_tree_ad_reverse_iter_start +extent_tree_ad_search +extent_tree_szad_first +extent_tree_szad_insert +extent_tree_szad_iter +extent_tree_szad_iter_recurse +extent_tree_szad_iter_start +extent_tree_szad_last +extent_tree_szad_new +extent_tree_szad_next +extent_tree_szad_nsearch +extent_tree_szad_prev +extent_tree_szad_psearch +extent_tree_szad_remove +extent_tree_szad_reverse_iter +extent_tree_szad_reverse_iter_recurse +extent_tree_szad_reverse_iter_start +extent_tree_szad_search +get_errno +hash +hash_fmix_32 +hash_fmix_64 +hash_get_block_32 +hash_get_block_64 +hash_rotl_32 +hash_rotl_64 +hash_x64_128 +hash_x86_128 +hash_x86_32 +huge_allocated +huge_boot +huge_dalloc +huge_dalloc_junk +huge_malloc +huge_mtx +huge_ndalloc +huge_nmalloc +huge_palloc +huge_postfork_child +huge_postfork_parent +huge_prefork +huge_prof_ctx_get +huge_prof_ctx_set +huge_ralloc +huge_ralloc_no_move +huge_salloc +iallocm +icalloc +icalloct +idalloc +idalloct +imalloc +imalloct +ipalloc +ipalloct +iqalloc +iqalloct +iralloc +iralloct +iralloct_realign +isalloc +isthreaded +ivsalloc +ixalloc +jemalloc_postfork_child +jemalloc_postfork_parent +jemalloc_prefork +malloc_cprintf +malloc_mutex_init +malloc_mutex_lock +malloc_mutex_postfork_child +malloc_mutex_postfork_parent +malloc_mutex_prefork +malloc_mutex_unlock +malloc_printf +malloc_snprintf +malloc_strtoumax +malloc_tsd_boot +malloc_tsd_cleanup_register +malloc_tsd_dalloc +malloc_tsd_malloc +malloc_tsd_no_cleanup +malloc_vcprintf +malloc_vsnprintf +malloc_write +map_bias +mb_write +mutex_boot +narenas_auto +narenas_total +narenas_total_get +ncpus +nhbins +opt_abort +opt_dss +opt_junk +opt_lg_chunk +opt_lg_dirty_mult +opt_lg_prof_interval +opt_lg_prof_sample +opt_lg_tcache_max +opt_narenas +opt_prof +opt_prof_accum +opt_prof_active +opt_prof_final +opt_prof_gdump +opt_prof_leak +opt_prof_prefix +opt_quarantine +opt_redzone +opt_stats_print +opt_tcache +opt_utrace +opt_valgrind +opt_xmalloc +opt_zero +p2rz +pages_purge +pow2_ceil +prof_backtrace +prof_boot0 +prof_boot1 +prof_boot2 +prof_bt_count +prof_ctx_get +prof_ctx_set +prof_dump_open +prof_free +prof_gdump +prof_idump +prof_interval +prof_lookup +prof_malloc +prof_mdump +prof_postfork_child +prof_postfork_parent +prof_prefork +prof_promote +prof_realloc +prof_sample_accum_update +prof_sample_threshold_update +prof_tdata_booted +prof_tdata_cleanup +prof_tdata_get +prof_tdata_init +prof_tdata_initialized +prof_tdata_tls +prof_tdata_tsd +prof_tdata_tsd_boot +prof_tdata_tsd_cleanup_wrapper +prof_tdata_tsd_get +prof_tdata_tsd_get_wrapper +prof_tdata_tsd_init_head +prof_tdata_tsd_set +quarantine +quarantine_alloc_hook +quarantine_boot +quarantine_booted +quarantine_cleanup +quarantine_init +quarantine_tls +quarantine_tsd +quarantine_tsd_boot +quarantine_tsd_cleanup_wrapper +quarantine_tsd_get +quarantine_tsd_get_wrapper +quarantine_tsd_init_head +quarantine_tsd_set +register_zone +rtree_delete +rtree_get +rtree_get_locked +rtree_new +rtree_postfork_child +rtree_postfork_parent +rtree_prefork +rtree_set +s2u +sa2u +set_errno +small_size2bin +stats_cactive +stats_cactive_add +stats_cactive_get +stats_cactive_sub +stats_chunks +stats_print +tcache_alloc_easy +tcache_alloc_large +tcache_alloc_small +tcache_alloc_small_hard +tcache_arena_associate +tcache_arena_dissociate +tcache_bin_flush_large +tcache_bin_flush_small +tcache_bin_info +tcache_boot0 +tcache_boot1 +tcache_booted +tcache_create +tcache_dalloc_large +tcache_dalloc_small +tcache_destroy +tcache_enabled_booted +tcache_enabled_get +tcache_enabled_initialized +tcache_enabled_set +tcache_enabled_tls +tcache_enabled_tsd +tcache_enabled_tsd_boot +tcache_enabled_tsd_cleanup_wrapper +tcache_enabled_tsd_get +tcache_enabled_tsd_get_wrapper +tcache_enabled_tsd_init_head +tcache_enabled_tsd_set +tcache_event +tcache_event_hard +tcache_flush +tcache_get +tcache_initialized +tcache_maxclass +tcache_salloc +tcache_stats_merge +tcache_thread_cleanup +tcache_tls +tcache_tsd +tcache_tsd_boot +tcache_tsd_cleanup_wrapper +tcache_tsd_get +tcache_tsd_get_wrapper +tcache_tsd_init_head +tcache_tsd_set +thread_allocated_booted +thread_allocated_initialized +thread_allocated_tls +thread_allocated_tsd +thread_allocated_tsd_boot +thread_allocated_tsd_cleanup_wrapper +thread_allocated_tsd_get +thread_allocated_tsd_get_wrapper +thread_allocated_tsd_init_head +thread_allocated_tsd_set +tsd_init_check_recursion +tsd_init_finish +u2rz diff --git a/include/jemalloc/internal/private_unnamespace.sh b/include/jemalloc/internal/private_unnamespace.sh new file mode 100755 index 00000000..23fed8e8 --- /dev/null +++ b/include/jemalloc/internal/private_unnamespace.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +for symbol in `cat $1` ; do + echo "#undef ${symbol}" +done diff --git a/include/jemalloc/internal/prng.h b/include/jemalloc/internal/prng.h index 83a5462b..7b2b0651 100644 --- a/include/jemalloc/internal/prng.h +++ b/include/jemalloc/internal/prng.h @@ -25,7 +25,7 @@ * uint32_t state : Seed value. * const uint32_t a, c : See above discussion. */ -#define prng32(r, lg_range, state, a, c) do { \ +#define prng32(r, lg_range, state, a, c) do { \ assert(lg_range > 0); \ assert(lg_range <= 32); \ \ @@ -35,7 +35,7 @@ } while (false) /* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ -#define prng64(r, lg_range, state, a, c) do { \ +#define prng64(r, lg_range, state, a, c) do { \ assert(lg_range > 0); \ assert(lg_range <= 64); \ \ diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h index 119a5b1b..db056fc4 100644 --- a/include/jemalloc/internal/prof.h +++ b/include/jemalloc/internal/prof.h @@ -129,6 +129,7 @@ struct prof_ctx_s { * limbo due to one of: * - Initializing per thread counters associated with this ctx. * - Preparing to destroy this ctx. + * - Dumping a heap profile that includes this ctx. * nlimbo must be 1 (single destroyer) in order to safely destroy the * ctx. */ @@ -145,7 +146,11 @@ struct prof_ctx_s { * this context. */ ql_head(prof_thr_cnt_t) cnts_ql; + + /* Linkage for list of contexts to be dumped. */ + ql_elm(prof_ctx_t) dump_link; }; +typedef ql_head(prof_ctx_t) prof_ctx_list_t; struct prof_tdata_s { /* @@ -195,7 +200,12 @@ extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_final; /* Final profile dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_accum; /* Report cumulative bytes. */ -extern char opt_prof_prefix[PATH_MAX + 1]; +extern char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; /* * Profile dump interval, measured in bytes allocated. Each arena triggers a @@ -215,6 +225,11 @@ extern bool prof_promote; void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(prof_bt_t *bt, unsigned nignore); prof_thr_cnt_t *prof_lookup(prof_bt_t *bt); +#ifdef JEMALLOC_JET +size_t prof_bt_count(void); +typedef int (prof_dump_open_t)(bool, const char *); +extern prof_dump_open_t *prof_dump_open; +#endif void prof_idump(void); bool prof_mdump(const char *filename); void prof_gdump(void); @@ -289,11 +304,11 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *) prof_tdata_t *prof_tdata_get(bool create); void prof_sample_threshold_update(prof_tdata_t *prof_tdata); prof_ctx_t *prof_ctx_get(const void *ptr); -void prof_ctx_set(const void *ptr, prof_ctx_t *ctx); +void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx); bool prof_sample_accum_update(size_t size); -void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt); -void prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, - size_t old_size, prof_ctx_t *old_ctx); +void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt); +void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, + size_t old_usize, prof_ctx_t *old_ctx); void prof_free(const void *ptr, size_t size); #endif @@ -320,6 +335,20 @@ prof_tdata_get(bool create) JEMALLOC_INLINE void prof_sample_threshold_update(prof_tdata_t *prof_tdata) { + /* + * The body of this function is compiled out unless heap profiling is + * enabled, so that it is possible to compile jemalloc with floating + * point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a + * workaround for versions of glibc that don't properly save/restore + * floating point registers during dynamic lazy symbol loading (which + * internally calls into whatever malloc implementation happens to be + * integrated into the application). Note that some compilers (e.g. + * gcc 4.8) may use floating point registers for fast memory moves, so + * jemalloc must be compiled with such optimizations disabled (e.g. + * -mno-sse) in order for the workaround to be complete. + */ +#ifdef JEMALLOC_PROF uint64_t r; double u; @@ -341,7 +370,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata) * Luc Devroye * Springer-Verlag, New York, 1986 * pp 500 - * (http://cg.scs.carleton.ca/~luc/rnbookindex.html) + * (http://luc.devroye.org/rnbookindex.html) */ prng64(r, 53, prof_tdata->prng_state, UINT64_C(6364136223846793005), UINT64_C(1442695040888963407)); @@ -349,6 +378,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata) prof_tdata->threshold = (uint64_t)(log(u) / log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample)))) + (uint64_t)1U; +#endif } JEMALLOC_INLINE prof_ctx_t * @@ -371,7 +401,7 @@ prof_ctx_get(const void *ptr) } JEMALLOC_INLINE void -prof_ctx_set(const void *ptr, prof_ctx_t *ctx) +prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx) { arena_chunk_t *chunk; @@ -381,7 +411,7 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx) chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) { /* Region. */ - arena_prof_ctx_set(ptr, ctx); + arena_prof_ctx_set(ptr, usize, ctx); } else huge_prof_ctx_set(ptr, ctx); } @@ -416,20 +446,20 @@ prof_sample_accum_update(size_t size) } JEMALLOC_INLINE void -prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt) +prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt) { cassert(config_prof); assert(ptr != NULL); - assert(size == isalloc(ptr, true)); + assert(usize == isalloc(ptr, true)); if (opt_lg_prof_sample != 0) { - if (prof_sample_accum_update(size)) { + if (prof_sample_accum_update(usize)) { /* * Don't sample. For malloc()-like allocation, it is * always possible to tell in advance how large an * object's usable size will be, so there should never - * be a difference between the size passed to + * be a difference between the usize passed to * PROF_ALLOC_PREP() and prof_malloc(). */ assert((uintptr_t)cnt == (uintptr_t)1U); @@ -437,17 +467,17 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt) } if ((uintptr_t)cnt > (uintptr_t)1U) { - prof_ctx_set(ptr, cnt->ctx); + prof_ctx_set(ptr, usize, cnt->ctx); cnt->epoch++; /*********/ mb_write(); /*********/ cnt->cnts.curobjs++; - cnt->cnts.curbytes += size; + cnt->cnts.curbytes += usize; if (opt_prof_accum) { cnt->cnts.accumobjs++; - cnt->cnts.accumbytes += size; + cnt->cnts.accumbytes += usize; } /*********/ mb_write(); @@ -457,12 +487,12 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt) mb_write(); /*********/ } else - prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); + prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U); } JEMALLOC_INLINE void -prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, - size_t old_size, prof_ctx_t *old_ctx) +prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, + size_t old_usize, prof_ctx_t *old_ctx) { prof_thr_cnt_t *told_cnt; @@ -470,15 +500,15 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); if (ptr != NULL) { - assert(size == isalloc(ptr, true)); + assert(usize == isalloc(ptr, true)); if (opt_lg_prof_sample != 0) { - if (prof_sample_accum_update(size)) { + if (prof_sample_accum_update(usize)) { /* - * Don't sample. The size passed to + * Don't sample. The usize passed to * PROF_ALLOC_PREP() was larger than what * actually got allocated, so a backtrace was * captured for this allocation, even though - * its actual size was insufficient to cross + * its actual usize was insufficient to cross * the sample threshold. */ cnt = (prof_thr_cnt_t *)(uintptr_t)1U; @@ -495,7 +525,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, */ malloc_mutex_lock(old_ctx->lock); old_ctx->cnt_merged.curobjs--; - old_ctx->cnt_merged.curbytes -= old_size; + old_ctx->cnt_merged.curbytes -= old_usize; malloc_mutex_unlock(old_ctx->lock); told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; } @@ -505,23 +535,23 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, if ((uintptr_t)told_cnt > (uintptr_t)1U) told_cnt->epoch++; if ((uintptr_t)cnt > (uintptr_t)1U) { - prof_ctx_set(ptr, cnt->ctx); + prof_ctx_set(ptr, usize, cnt->ctx); cnt->epoch++; } else if (ptr != NULL) - prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); + prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U); /*********/ mb_write(); /*********/ if ((uintptr_t)told_cnt > (uintptr_t)1U) { told_cnt->cnts.curobjs--; - told_cnt->cnts.curbytes -= old_size; + told_cnt->cnts.curbytes -= old_usize; } if ((uintptr_t)cnt > (uintptr_t)1U) { cnt->cnts.curobjs++; - cnt->cnts.curbytes += size; + cnt->cnts.curbytes += usize; if (opt_prof_accum) { cnt->cnts.accumobjs++; - cnt->cnts.accumbytes += size; + cnt->cnts.accumbytes += usize; } } /*********/ diff --git a/include/jemalloc/internal/public_namespace.sh b/include/jemalloc/internal/public_namespace.sh new file mode 100755 index 00000000..362109f7 --- /dev/null +++ b/include/jemalloc/internal/public_namespace.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for nm in `cat $1` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` + echo "#define je_${n} JEMALLOC_N(${n})" +done diff --git a/include/jemalloc/internal/public_unnamespace.sh b/include/jemalloc/internal/public_unnamespace.sh new file mode 100755 index 00000000..4239d177 --- /dev/null +++ b/include/jemalloc/internal/public_unnamespace.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for nm in `cat $1` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` + echo "#undef je_${n}" +done diff --git a/include/jemalloc/internal/ql.h b/include/jemalloc/internal/ql.h index a9ed2393..f70c5f6f 100644 --- a/include/jemalloc/internal/ql.h +++ b/include/jemalloc/internal/ql.h @@ -1,61 +1,61 @@ /* * List definitions. */ -#define ql_head(a_type) \ +#define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } -#define ql_head_initializer(a_head) {NULL} +#define ql_head_initializer(a_head) {NULL} -#define ql_elm(a_type) qr(a_type) +#define ql_elm(a_type) qr(a_type) /* List functions. */ -#define ql_new(a_head) do { \ +#define ql_new(a_head) do { \ (a_head)->qlh_first = NULL; \ } while (0) -#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) +#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) -#define ql_first(a_head) ((a_head)->qlh_first) +#define ql_first(a_head) ((a_head)->qlh_first) -#define ql_last(a_head, a_field) \ +#define ql_last(a_head, a_field) \ ((ql_first(a_head) != NULL) \ ? qr_prev(ql_first(a_head), a_field) : NULL) -#define ql_next(a_head, a_elm, a_field) \ +#define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) -#define ql_prev(a_head, a_elm, a_field) \ +#define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) -#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ +#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ ql_first(a_head) = (a_elm); \ } \ } while (0) -#define ql_after_insert(a_qlelm, a_elm, a_field) \ +#define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) -#define ql_head_insert(a_head, a_elm, a_field) do { \ +#define ql_head_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) -#define ql_tail_insert(a_head, a_elm, a_field) do { \ +#define ql_tail_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) -#define ql_remove(a_head, a_elm, a_field) do { \ +#define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ } \ @@ -66,18 +66,18 @@ struct { \ } \ } while (0) -#define ql_head_remove(a_head, a_type, a_field) do { \ +#define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) -#define ql_tail_remove(a_head, a_type, a_field) do { \ +#define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) -#define ql_foreach(a_var, a_head, a_field) \ +#define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) -#define ql_reverse_foreach(a_var, a_head, a_field) \ +#define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field) diff --git a/include/jemalloc/internal/qr.h b/include/jemalloc/internal/qr.h index fe22352f..602944b9 100644 --- a/include/jemalloc/internal/qr.h +++ b/include/jemalloc/internal/qr.h @@ -1,28 +1,28 @@ /* Ring definitions. */ -#define qr(a_type) \ +#define qr(a_type) \ struct { \ a_type *qre_next; \ a_type *qre_prev; \ } /* Ring functions. */ -#define qr_new(a_qr, a_field) do { \ +#define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) +#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) -#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) +#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) -#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ +#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qrelm); \ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ (a_qrelm)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_after_insert(a_qrelm, a_qr, a_field) \ +#define qr_after_insert(a_qrelm, a_qr, a_field) \ do \ { \ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ @@ -31,7 +31,7 @@ struct { \ (a_qrelm)->a_field.qre_next = (a_qr); \ } while (0) -#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ +#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ void *t; \ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ @@ -42,10 +42,10 @@ struct { \ /* qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ -#define qr_split(a_qr_a, a_qr_b, a_field) \ +#define qr_split(a_qr_a, a_qr_b, a_field) \ qr_meld((a_qr_a), (a_qr_b), a_field) -#define qr_remove(a_qr, a_field) do { \ +#define qr_remove(a_qr, a_field) do { \ (a_qr)->a_field.qre_prev->a_field.qre_next \ = (a_qr)->a_field.qre_next; \ (a_qr)->a_field.qre_next->a_field.qre_prev \ @@ -54,13 +54,13 @@ struct { \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_foreach(var, a_qr, a_field) \ +#define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) -#define qr_reverse_foreach(var, a_qr, a_field) \ +#define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ (var) = (((var) != (a_qr)) \ diff --git a/include/jemalloc/internal/rb.h b/include/jemalloc/internal/rb.h index 7b675f09..423802eb 100644 --- a/include/jemalloc/internal/rb.h +++ b/include/jemalloc/internal/rb.h @@ -22,10 +22,6 @@ #ifndef RB_H_ #define RB_H_ -#if 0 -__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 204493 2010-02-28 22:57:13Z jasone $"); -#endif - #ifdef RB_COMPACT /* Node structure. */ #define rb_node(a_type) \ diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h index 9bd98548..bc74769f 100644 --- a/include/jemalloc/internal/rtree.h +++ b/include/jemalloc/internal/rtree.h @@ -14,17 +14,18 @@ typedef struct rtree_s rtree_t; * Size of each radix tree node (must be a power of 2). This impacts tree * depth. */ -#if (LG_SIZEOF_PTR == 2) -# define RTREE_NODESIZE (1U << 14) -#else -# define RTREE_NODESIZE CACHELINE -#endif +#define RTREE_NODESIZE (1U << 16) + +typedef void *(rtree_alloc_t)(size_t); +typedef void (rtree_dalloc_t)(void *); #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct rtree_s { + rtree_alloc_t *alloc; + rtree_dalloc_t *dalloc; malloc_mutex_t mutex; void **root; unsigned height; @@ -35,7 +36,8 @@ struct rtree_s { /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -rtree_t *rtree_new(unsigned bits); +rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc); +void rtree_delete(rtree_t *rtree); void rtree_prefork(rtree_t *rtree); void rtree_postfork_parent(rtree_t *rtree); void rtree_postfork_child(rtree_t *rtree); @@ -45,20 +47,20 @@ void rtree_postfork_child(rtree_t *rtree); #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE -#ifndef JEMALLOC_DEBUG -void *rtree_get_locked(rtree_t *rtree, uintptr_t key); +#ifdef JEMALLOC_DEBUG +uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key); #endif -void *rtree_get(rtree_t *rtree, uintptr_t key); -bool rtree_set(rtree_t *rtree, uintptr_t key, void *val); +uint8_t rtree_get(rtree_t *rtree, uintptr_t key); +bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) #define RTREE_GET_GENERATE(f) \ /* The least significant bits of the key are ignored. */ \ -JEMALLOC_INLINE void * \ +JEMALLOC_INLINE uint8_t \ f(rtree_t *rtree, uintptr_t key) \ { \ - void *ret; \ + uint8_t ret; \ uintptr_t subkey; \ unsigned i, lshift, height, bits; \ void **node, **child; \ @@ -68,12 +70,12 @@ f(rtree_t *rtree, uintptr_t key) \ i < height - 1; \ i++, lshift += bits, node = child) { \ bits = rtree->level2bits[i]; \ - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \ + subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \ 3)) - bits); \ child = (void**)node[subkey]; \ if (child == NULL) { \ RTREE_UNLOCK(&rtree->mutex); \ - return (NULL); \ + return (0); \ } \ } \ \ @@ -84,7 +86,10 @@ f(rtree_t *rtree, uintptr_t key) \ bits = rtree->level2bits[i]; \ subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \ bits); \ - ret = node[subkey]; \ + { \ + uint8_t *leaf = (uint8_t *)node; \ + ret = leaf[subkey]; \ + } \ RTREE_UNLOCK(&rtree->mutex); \ \ RTREE_GET_VALIDATE \ @@ -123,7 +128,7 @@ RTREE_GET_GENERATE(rtree_get) #undef RTREE_GET_VALIDATE JEMALLOC_INLINE bool -rtree_set(rtree_t *rtree, uintptr_t key, void *val) +rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val) { uintptr_t subkey; unsigned i, lshift, height, bits; @@ -138,14 +143,14 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val) bits); child = (void**)node[subkey]; if (child == NULL) { - child = (void**)base_alloc(sizeof(void *) << - rtree->level2bits[i+1]); + size_t size = ((i + 1 < height - 1) ? sizeof(void *) + : (sizeof(uint8_t))) << rtree->level2bits[i+1]; + child = (void**)rtree->alloc(size); if (child == NULL) { malloc_mutex_unlock(&rtree->mutex); return (true); } - memset(child, 0, sizeof(void *) << - rtree->level2bits[i+1]); + memset(child, 0, size); node[subkey] = child; } } @@ -153,7 +158,10 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val) /* node is a leaf, so it contains values rather than node pointers. */ bits = rtree->level2bits[i]; subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits); - node[subkey] = val; + { + uint8_t *leaf = (uint8_t *)node; + leaf[subkey] = val; + } malloc_mutex_unlock(&rtree->mutex); return (false); diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h index d4eecdee..c3d4b58d 100644 --- a/include/jemalloc/internal/tcache.h +++ b/include/jemalloc/internal/tcache.h @@ -297,6 +297,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) binind = SMALL_SIZE2BIN(size); assert(binind < NBINS); tbin = &tcache->tbins[binind]; + size = arena_bin_info[binind].reg_size; ret = tcache_alloc_easy(tbin); if (ret == NULL) { ret = tcache_alloc_small_hard(tcache, tbin, binind); diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h index 0037cf35..9fb4a23e 100644 --- a/include/jemalloc/internal/tsd.h +++ b/include/jemalloc/internal/tsd.h @@ -6,6 +6,12 @@ typedef bool (*malloc_tsd_cleanup_t)(void); +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +typedef struct tsd_init_block_s tsd_init_block_t; +typedef struct tsd_init_head_s tsd_init_head_t; +#endif + /* * TLS/TSD-agnostic macro-based implementation of thread-specific data. There * are four macros that support (at least) three use cases: file-private, @@ -75,12 +81,13 @@ extern __thread a_type a_name##_tls; \ extern pthread_key_t a_name##_tsd; \ extern bool a_name##_booted; #elif (defined(_WIN32)) -#define malloc_tsd_externs(a_name, a_type) \ +#define malloc_tsd_externs(a_name, a_type) \ extern DWORD a_name##_tsd; \ extern bool a_name##_booted; #else #define malloc_tsd_externs(a_name, a_type) \ extern pthread_key_t a_name##_tsd; \ +extern tsd_init_head_t a_name##_tsd_init_head; \ extern bool a_name##_booted; #endif @@ -105,6 +112,10 @@ a_attr bool a_name##_booted = false; #else #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr pthread_key_t a_name##_tsd; \ +a_attr tsd_init_head_t a_name##_tsd_init_head = { \ + ql_head_initializer(blocks), \ + MALLOC_MUTEX_INITIALIZER \ +}; \ a_attr bool a_name##_booted = false; #endif @@ -333,8 +344,14 @@ a_name##_tsd_get_wrapper(void) \ pthread_getspecific(a_name##_tsd); \ \ if (wrapper == NULL) { \ + tsd_init_block_t block; \ + wrapper = tsd_init_check_recursion( \ + &a_name##_tsd_init_head, &block); \ + if (wrapper) \ + return (wrapper); \ wrapper = (a_name##_tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ + block.data = wrapper; \ if (wrapper == NULL) { \ malloc_write(": Error allocating" \ " TSD for "#a_name"\n"); \ @@ -350,6 +367,7 @@ a_name##_tsd_get_wrapper(void) \ " TSD for "#a_name"\n"); \ abort(); \ } \ + tsd_init_finish(&a_name##_tsd_init_head, &block); \ } \ return (wrapper); \ } \ @@ -379,6 +397,19 @@ a_name##_tsd_set(a_type *val) \ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +struct tsd_init_block_s { + ql_elm(tsd_init_block_t) link; + pthread_t thread; + void *data; +}; +struct tsd_init_head_s { + ql_head(tsd_init_block_t) blocks; + malloc_mutex_t lock; +}; +#endif + #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS @@ -388,6 +419,12 @@ void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_no_cleanup(void *); void malloc_tsd_cleanup_register(bool (*f)(void)); void malloc_tsd_boot(void); +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +void *tsd_init_check_recursion(tsd_init_head_t *head, + tsd_init_block_t *block); +void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); +#endif #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ diff --git a/include/jemalloc/internal/util.h b/include/jemalloc/internal/util.h index 84796936..6b938f74 100644 --- a/include/jemalloc/internal/util.h +++ b/include/jemalloc/internal/util.h @@ -14,7 +14,7 @@ * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ -#define JEMALLOC_CONCAT(...) __VA_ARGS__ +#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ /* * Silence compiler warnings due to uninitialized values. This is used @@ -42,12 +42,6 @@ } while (0) #endif -/* Use to assert a particular configuration, e.g., cassert(config_debug). */ -#define cassert(c) do { \ - if ((c) == false) \ - assert(false); \ -} while (0) - #ifndef not_reached #define not_reached() do { \ if (config_debug) { \ @@ -69,10 +63,18 @@ } while (0) #endif +#ifndef assert_not_implemented #define assert_not_implemented(e) do { \ if (config_debug && !(e)) \ not_implemented(); \ } while (0) +#endif + +/* Use to assert a particular configuration, e.g., cassert(config_debug). */ +#define cassert(c) do { \ + if ((c) == false) \ + not_reached(); \ +} while (0) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ @@ -82,8 +84,9 @@ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -int buferror(char *buf, size_t buflen); -uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base); +int buferror(int err, char *buf, size_t buflen); +uintmax_t malloc_strtoumax(const char *restrict nptr, + char **restrict endptr, int base); void malloc_write(const char *s); /* @@ -107,7 +110,6 @@ void malloc_printf(const char *format, ...) #ifndef JEMALLOC_ENABLE_INLINE size_t pow2_ceil(size_t x); -void malloc_write(const char *s); void set_errno(int errnum); int get_errno(void); #endif diff --git a/include/jemalloc/jemalloc.h.in b/include/jemalloc/jemalloc.h.in deleted file mode 100644 index 31b1304a..00000000 --- a/include/jemalloc/jemalloc.h.in +++ /dev/null @@ -1,157 +0,0 @@ -#ifndef JEMALLOC_H_ -#define JEMALLOC_H_ -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -#define JEMALLOC_VERSION "@jemalloc_version@" -#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ -#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ -#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ -#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ -#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" - -#include "jemalloc_defs@install_suffix@.h" - -#ifdef JEMALLOC_EXPERIMENTAL -#define ALLOCM_LG_ALIGN(la) (la) -#if LG_SIZEOF_PTR == 2 -#define ALLOCM_ALIGN(a) (ffs(a)-1) -#else -#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) -#endif -#define ALLOCM_ZERO ((int)0x40) -#define ALLOCM_NO_MOVE ((int)0x80) -/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */ -#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8)) - -#define ALLOCM_SUCCESS 0 -#define ALLOCM_ERR_OOM 1 -#define ALLOCM_ERR_NOT_MOVED 2 -#endif - -/* - * The je_ prefix on the following public symbol declarations is an artifact of - * namespace management, and should be omitted in application code unless - * JEMALLOC_NO_DEMANGLE is defined (see below). - */ -extern JEMALLOC_EXPORT const char *je_malloc_conf; -extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, - const char *s); - -JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size) - JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment, - size_t size) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size) - JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size); -JEMALLOC_EXPORT void je_free(void *ptr); - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size) - JEMALLOC_ATTR(malloc); -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc); -#endif - -JEMALLOC_EXPORT size_t je_malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr); -JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *, - const char *), void *je_cbopaque, const char *opts); -JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp, - size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp, - size_t *miblenp); -JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen); - -#ifdef JEMALLOC_EXPERIMENTAL -JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size, - int flags) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size, - size_t extra, int flags) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags) - JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags) - JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags); -#endif - -/* - * By default application code must explicitly refer to mangled symbol names, - * so that it is possible to use jemalloc in conjunction with another allocator - * in the same application. Define JEMALLOC_MANGLE in order to cause automatic - * name mangling that matches the API prefixing that happened as a result of - * --with-mangling and/or --with-jemalloc-prefix configuration settings. - */ -#ifdef JEMALLOC_MANGLE -#ifndef JEMALLOC_NO_DEMANGLE -#define JEMALLOC_NO_DEMANGLE -#endif -#define malloc_conf je_malloc_conf -#define malloc_message je_malloc_message -#define malloc je_malloc -#define calloc je_calloc -#define posix_memalign je_posix_memalign -#define aligned_alloc je_aligned_alloc -#define realloc je_realloc -#define free je_free -#define malloc_usable_size je_malloc_usable_size -#define malloc_stats_print je_malloc_stats_print -#define mallctl je_mallctl -#define mallctlnametomib je_mallctlnametomib -#define mallctlbymib je_mallctlbymib -#define memalign je_memalign -#define valloc je_valloc -#ifdef JEMALLOC_EXPERIMENTAL -#define allocm je_allocm -#define rallocm je_rallocm -#define sallocm je_sallocm -#define dallocm je_dallocm -#define nallocm je_nallocm -#endif -#endif - -/* - * The je_* macros can be used as stable alternative names for the public - * jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant - * for use in jemalloc itself, but it can be used by application code to - * provide isolation from the name mangling specified via --with-mangling - * and/or --with-jemalloc-prefix. - */ -#ifndef JEMALLOC_NO_DEMANGLE -#undef je_malloc_conf -#undef je_malloc_message -#undef je_malloc -#undef je_calloc -#undef je_posix_memalign -#undef je_aligned_alloc -#undef je_realloc -#undef je_free -#undef je_malloc_usable_size -#undef je_malloc_stats_print -#undef je_mallctl -#undef je_mallctlnametomib -#undef je_mallctlbymib -#undef je_memalign -#undef je_valloc -#ifdef JEMALLOC_EXPERIMENTAL -#undef je_allocm -#undef je_rallocm -#undef je_sallocm -#undef je_dallocm -#undef je_nallocm -#endif -#endif - -#ifdef __cplusplus -}; -#endif -#endif /* JEMALLOC_H_ */ diff --git a/include/jemalloc/jemalloc.sh b/include/jemalloc/jemalloc.sh new file mode 100755 index 00000000..e4738eba --- /dev/null +++ b/include/jemalloc/jemalloc.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +objroot=$1 + +cat < +#include + +#define JEMALLOC_VERSION "@jemalloc_version@" +#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ +#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ +#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ +#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ +#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" + +# define MALLOCX_LG_ALIGN(la) (la) +# if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) (ffs(a)-1) +# else +# define MALLOCX_ALIGN(a) \ + ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) +# endif +# define MALLOCX_ZERO ((int)0x40) +/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */ +# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8)) + +#ifdef JEMALLOC_EXPERIMENTAL +# define ALLOCM_LG_ALIGN(la) (la) +# if LG_SIZEOF_PTR == 2 +# define ALLOCM_ALIGN(a) (ffs(a)-1) +# else +# define ALLOCM_ALIGN(a) \ + ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) +# endif +# define ALLOCM_ZERO ((int)0x40) +# define ALLOCM_NO_MOVE ((int)0x80) +/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */ +# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8)) +# define ALLOCM_SUCCESS 0 +# define ALLOCM_ERR_OOM 1 +# define ALLOCM_ERR_NOT_MOVED 2 +#endif + +#ifdef JEMALLOC_HAVE_ATTR +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +#elif _MSC_VER +# define JEMALLOC_ATTR(s) +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_NOINLINE __declspec(noinline) +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_EXPORT +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_SECTION(s) +# define JEMALLOC_NOINLINE +#endif diff --git a/include/jemalloc/jemalloc_mangle.sh b/include/jemalloc/jemalloc_mangle.sh new file mode 100755 index 00000000..df328b78 --- /dev/null +++ b/include/jemalloc/jemalloc_mangle.sh @@ -0,0 +1,45 @@ +#!/bin/sh + +public_symbols_txt=$1 +symbol_prefix=$2 + +cat <nactive + + add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive - + sub_pages) << LG_PAGE); + if (cactive_diff != 0) + stats_cactive_add(cactive_diff); + } +} + +static void +arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, + size_t flag_dirty, size_t need_pages) +{ + size_t total_pages, rem_pages; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> LG_PAGE; assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == flag_dirty); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); assert(need_pages <= total_pages); rem_pages = total_pages - need_pages; arena_avail_remove(arena, chunk, run_ind, total_pages, true, true); - if (config_stats) { - /* - * Update stats_cactive if nactive is crossing a chunk - * multiple. - */ - size_t cactive_diff = CHUNK_CEILING((arena->nactive + - need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << - LG_PAGE); - if (cactive_diff != 0) - stats_cactive_add(cactive_diff); - } + arena_cactive_update(arena, need_pages, 0); arena->nactive += need_pages; /* Keep track of trailing unused pages for later use. */ if (rem_pages > 0) { if (flag_dirty != 0) { - arena_mapbits_unallocated_set(chunk, run_ind+need_pages, - (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY); + arena_mapbits_unallocated_set(chunk, + run_ind+need_pages, (rem_pages << LG_PAGE), + flag_dirty); arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, (rem_pages << LG_PAGE), - CHUNK_MAP_DIRTY); + flag_dirty); } else { arena_mapbits_unallocated_set(chunk, run_ind+need_pages, (rem_pages << LG_PAGE), @@ -445,166 +405,217 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages, false, true); } +} - /* - * Update the page map separately for large vs. small runs, since it is - * possible to avoid iteration for large mallocs. - */ - if (large) { - if (zero) { - if (flag_dirty == 0) { - /* - * The run is clean, so some pages may be - * zeroed (i.e. never before touched). - */ - for (i = 0; i < need_pages; i++) { - if (arena_mapbits_unzeroed_get(chunk, - run_ind+i) != 0) { - arena_run_zero(chunk, run_ind+i, - 1); - } else if (config_debug) { - arena_run_page_validate_zeroed( - chunk, run_ind+i); - } else { - arena_run_page_mark_zeroed( - chunk, run_ind+i); - } +static void +arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, + bool remove, bool zero) +{ + arena_chunk_t *chunk; + size_t flag_dirty, run_ind, need_pages, i; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); + flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); + need_pages = (size >> LG_PAGE); + assert(need_pages > 0); + + if (remove) { + arena_run_split_remove(arena, chunk, run_ind, flag_dirty, + need_pages); + } + + if (zero) { + if (flag_dirty == 0) { + /* + * The run is clean, so some pages may be zeroed (i.e. + * never before touched). + */ + for (i = 0; i < need_pages; i++) { + if (arena_mapbits_unzeroed_get(chunk, run_ind+i) + != 0) + arena_run_zero(chunk, run_ind+i, 1); + else if (config_debug) { + arena_run_page_validate_zeroed(chunk, + run_ind+i); + } else { + arena_run_page_mark_zeroed(chunk, + run_ind+i); } - } else { - /* - * The run is dirty, so all pages must be - * zeroed. - */ - arena_run_zero(chunk, run_ind, need_pages); } } else { - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); + /* The run is dirty, so all pages must be zeroed. */ + arena_run_zero(chunk, run_ind, need_pages); } - - /* - * Set the last element first, in case the run only contains one - * page (i.e. both statements set the same element). - */ - arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, - flag_dirty); - arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); } else { - assert(zero == false); - /* - * Propagate the dirty and unzeroed flags to the allocated - * small run, so that arena_dalloc_bin_run() has the ability to - * conditionally trim clean pages. - */ - arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty); - /* - * The first page will always be dirtied during small run - * initialization, so a validation failure here would not - * actually cause an observable failure. - */ - if (config_debug && flag_dirty == 0 && - arena_mapbits_unzeroed_get(chunk, run_ind) == 0) - arena_run_page_validate_zeroed(chunk, run_ind); - for (i = 1; i < need_pages - 1; i++) { - arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); - if (config_debug && flag_dirty == 0 && - arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) { - arena_run_page_validate_zeroed(chunk, - run_ind+i); - } - } - arena_mapbits_small_set(chunk, run_ind+need_pages-1, - need_pages-1, binind, flag_dirty); - if (config_debug && flag_dirty == 0 && - arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) == - 0) { - arena_run_page_validate_zeroed(chunk, - run_ind+need_pages-1); - } VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); } + + /* + * Set the last element first, in case the run only contains one page + * (i.e. both statements set the same element). + */ + arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); + arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); +} + +static void +arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) +{ + + arena_run_split_large_helper(arena, run, size, true, zero); +} + +static void +arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) +{ + + arena_run_split_large_helper(arena, run, size, false, zero); +} + +static void +arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, + size_t binind) +{ + arena_chunk_t *chunk; + size_t flag_dirty, run_ind, need_pages, i; + + assert(binind != BININD_INVALID); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); + flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); + need_pages = (size >> LG_PAGE); + assert(need_pages > 0); + + arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); + + /* + * Propagate the dirty and unzeroed flags to the allocated small run, + * so that arena_dalloc_bin_run() has the ability to conditionally trim + * clean pages. + */ + arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty); + /* + * The first page will always be dirtied during small run + * initialization, so a validation failure here would not actually + * cause an observable failure. + */ + if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, + run_ind) == 0) + arena_run_page_validate_zeroed(chunk, run_ind); + for (i = 1; i < need_pages - 1; i++) { + arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); + if (config_debug && flag_dirty == 0 && + arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) + arena_run_page_validate_zeroed(chunk, run_ind+i); + } + arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1, + binind, flag_dirty); + if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, + run_ind+need_pages-1) == 0) + arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1); + VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); +} + +static arena_chunk_t * +arena_chunk_init_spare(arena_t *arena) +{ + arena_chunk_t *chunk; + + assert(arena->spare != NULL); + + chunk = arena->spare; + arena->spare = NULL; + + assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); + assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); + assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == + arena_maxclass); + assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == + arena_maxclass); + assert(arena_mapbits_dirty_get(chunk, map_bias) == + arena_mapbits_dirty_get(chunk, chunk_npages-1)); + + return (chunk); +} + +static arena_chunk_t * +arena_chunk_init_hard(arena_t *arena) +{ + arena_chunk_t *chunk; + bool zero; + size_t unzeroed, i; + + assert(arena->spare == NULL); + + zero = false; + malloc_mutex_unlock(&arena->lock); + chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false, + &zero, arena->dss_prec); + malloc_mutex_lock(&arena->lock); + if (chunk == NULL) + return (NULL); + if (config_stats) + arena->stats.mapped += chunksize; + + chunk->arena = arena; + + /* + * Claim that no pages are in use, since the header is merely overhead. + */ + chunk->ndirty = 0; + + chunk->nruns_avail = 0; + chunk->nruns_adjac = 0; + + /* + * Initialize the map to contain one maximal free untouched run. Mark + * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. + */ + unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; + arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, + unzeroed); + /* + * There is no need to initialize the internal page map entries unless + * the chunk is not zeroed. + */ + if (zero == false) { + VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk, + map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, + chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, + map_bias+1))); + for (i = map_bias+1; i < chunk_npages-1; i++) + arena_mapbits_unzeroed_set(chunk, i, unzeroed); + } else { + VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk, + map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, + chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, + map_bias+1))); + if (config_debug) { + for (i = map_bias+1; i < chunk_npages-1; i++) { + assert(arena_mapbits_unzeroed_get(chunk, i) == + unzeroed); + } + } + } + arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass, + unzeroed); + + return (chunk); } static arena_chunk_t * arena_chunk_alloc(arena_t *arena) { arena_chunk_t *chunk; - size_t i; - if (arena->spare != NULL) { - chunk = arena->spare; - arena->spare = NULL; - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); - assert(arena_mapbits_unallocated_size_get(chunk, - chunk_npages-1) == arena_maxclass); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - } else { - bool zero; - size_t unzeroed; - - zero = false; - malloc_mutex_unlock(&arena->lock); - chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, - false, &zero, arena->dss_prec); - malloc_mutex_lock(&arena->lock); - if (chunk == NULL) - return (NULL); - if (config_stats) - arena->stats.mapped += chunksize; - - chunk->arena = arena; - - /* - * Claim that no pages are in use, since the header is merely - * overhead. - */ - chunk->ndirty = 0; - - chunk->nruns_avail = 0; - chunk->nruns_adjac = 0; - - /* - * Initialize the map to contain one maximal free untouched run. - * Mark the pages as zeroed iff chunk_alloc() returned a zeroed - * chunk. - */ - unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; - arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, - unzeroed); - /* - * There is no need to initialize the internal page map entries - * unless the chunk is not zeroed. - */ - if (zero == false) { - VALGRIND_MAKE_MEM_UNDEFINED( - (void *)arena_mapp_get(chunk, map_bias+1), - (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); - for (i = map_bias+1; i < chunk_npages-1; i++) - arena_mapbits_unzeroed_set(chunk, i, unzeroed); - } else { - VALGRIND_MAKE_MEM_DEFINED( - (void *)arena_mapp_get(chunk, map_bias+1), - (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); - if (config_debug) { - for (i = map_bias+1; i < chunk_npages-1; i++) { - assert(arena_mapbits_unzeroed_get(chunk, - i) == unzeroed); - } - } - } - arena_mapbits_unallocated_set(chunk, chunk_npages-1, - arena_maxclass, unzeroed); - } + if (arena->spare != NULL) + chunk = arena_chunk_init_spare(arena); + else + chunk = arena_chunk_init_hard(arena); /* Insert the run into the runs_avail tree. */ arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias, @@ -646,8 +657,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) } static arena_run_t * -arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind, - bool zero) +arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) { arena_run_t *run; arena_chunk_map_t *mapelm, key; @@ -662,7 +672,7 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind, run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << LG_PAGE)); - arena_run_split(arena, run, size, large, binind, zero); + arena_run_split_large(arena, run, size, zero); return (run); } @@ -670,19 +680,16 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind, } static arena_run_t * -arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind, - bool zero) +arena_run_alloc_large(arena_t *arena, size_t size, bool zero) { arena_chunk_t *chunk; arena_run_t *run; assert(size <= arena_maxclass); assert((size & PAGE_MASK) == 0); - assert((large && binind == BININD_INVALID) || (large == false && binind - != BININD_INVALID)); /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_helper(arena, size, large, binind, zero); + run = arena_run_alloc_large_helper(arena, size, zero); if (run != NULL) return (run); @@ -692,7 +699,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind, chunk = arena_chunk_alloc(arena); if (chunk != NULL) { run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split(arena, run, size, large, binind, zero); + arena_run_split_large(arena, run, size, zero); return (run); } @@ -701,7 +708,63 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind, * sufficient memory available while this one dropped arena->lock in * arena_chunk_alloc(), so search one more time. */ - return (arena_run_alloc_helper(arena, size, large, binind, zero)); + return (arena_run_alloc_large_helper(arena, size, zero)); +} + +static arena_run_t * +arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind) +{ + arena_run_t *run; + arena_chunk_map_t *mapelm, key; + + key.bits = size | CHUNK_MAP_KEY; + mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); + if (mapelm != NULL) { + arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); + size_t pageind = (((uintptr_t)mapelm - + (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + + map_bias; + + run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << + LG_PAGE)); + arena_run_split_small(arena, run, size, binind); + return (run); + } + + return (NULL); +} + +static arena_run_t * +arena_run_alloc_small(arena_t *arena, size_t size, size_t binind) +{ + arena_chunk_t *chunk; + arena_run_t *run; + + assert(size <= arena_maxclass); + assert((size & PAGE_MASK) == 0); + assert(binind != BININD_INVALID); + + /* Search the arena's chunks for the lowest best fit. */ + run = arena_run_alloc_small_helper(arena, size, binind); + if (run != NULL) + return (run); + + /* + * No usable runs. Create a new chunk from which to allocate the run. + */ + chunk = arena_chunk_alloc(arena); + if (chunk != NULL) { + run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); + arena_run_split_small(arena, run, size, binind); + return (run); + } + + /* + * arena_chunk_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped arena->lock in + * arena_chunk_alloc(), so search one more time. + */ + return (arena_run_alloc_small_helper(arena, size, binind)); } static inline void @@ -727,14 +790,169 @@ arena_maybe_purge(arena_t *arena) arena_purge(arena, false); } +static arena_chunk_t * +chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) +{ + size_t *ndirty = (size_t *)arg; + + assert(chunk->ndirty != 0); + *ndirty += chunk->ndirty; + return (NULL); +} + +static size_t +arena_compute_npurgatory(arena_t *arena, bool all) +{ + size_t npurgatory, npurgeable; + + /* + * Compute the minimum number of pages that this thread should try to + * purge. + */ + npurgeable = arena->ndirty - arena->npurgatory; + + if (all == false) { + size_t threshold = (arena->nactive >> opt_lg_dirty_mult); + + npurgatory = npurgeable - threshold; + } else + npurgatory = npurgeable; + + return (npurgatory); +} + +static void +arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all, + arena_chunk_mapelms_t *mapelms) +{ + size_t pageind, npages; + + /* + * Temporarily allocate free dirty runs within chunk. If all is false, + * only operate on dirty runs that are fragments; otherwise operate on + * all dirty runs. + */ + for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { + arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); + if (arena_mapbits_allocated_get(chunk, pageind) == 0) { + size_t run_size = + arena_mapbits_unallocated_size_get(chunk, pageind); + + npages = run_size >> LG_PAGE; + assert(pageind + npages <= chunk_npages); + assert(arena_mapbits_dirty_get(chunk, pageind) == + arena_mapbits_dirty_get(chunk, pageind+npages-1)); + + if (arena_mapbits_dirty_get(chunk, pageind) != 0 && + (all || arena_avail_adjac(chunk, pageind, + npages))) { + arena_run_t *run = (arena_run_t *)((uintptr_t) + chunk + (uintptr_t)(pageind << LG_PAGE)); + + arena_run_split_large(arena, run, run_size, + false); + /* Append to list for later processing. */ + ql_elm_new(mapelm, u.ql_link); + ql_tail_insert(mapelms, mapelm, u.ql_link); + } + } else { + /* Skip run. */ + if (arena_mapbits_large_get(chunk, pageind) != 0) { + npages = arena_mapbits_large_size_get(chunk, + pageind) >> LG_PAGE; + } else { + size_t binind; + arena_bin_info_t *bin_info; + arena_run_t *run = (arena_run_t *)((uintptr_t) + chunk + (uintptr_t)(pageind << LG_PAGE)); + + assert(arena_mapbits_small_runind_get(chunk, + pageind) == 0); + binind = arena_bin_index(arena, run->bin); + bin_info = &arena_bin_info[binind]; + npages = bin_info->run_size >> LG_PAGE; + } + } + } + assert(pageind == chunk_npages); + assert(chunk->ndirty == 0 || all == false); + assert(chunk->nruns_adjac == 0); +} + +static size_t +arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk, + arena_chunk_mapelms_t *mapelms) +{ + size_t npurged, pageind, npages, nmadvise; + arena_chunk_map_t *mapelm; + + malloc_mutex_unlock(&arena->lock); + if (config_stats) + nmadvise = 0; + npurged = 0; + ql_foreach(mapelm, mapelms, u.ql_link) { + bool unzeroed; + size_t flag_unzeroed, i; + + pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / + sizeof(arena_chunk_map_t)) + map_bias; + npages = arena_mapbits_large_size_get(chunk, pageind) >> + LG_PAGE; + assert(pageind + npages <= chunk_npages); + unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << + LG_PAGE)), (npages << LG_PAGE)); + flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; + /* + * Set the unzeroed flag for all pages, now that pages_purge() + * has returned whether the pages were zeroed as a side effect + * of purging. This chunk map modification is safe even though + * the arena mutex isn't currently owned by this thread, + * because the run is marked as allocated, thus protecting it + * from being modified by any other thread. As long as these + * writes don't perturb the first and last elements' + * CHUNK_MAP_ALLOCATED bits, behavior is well defined. + */ + for (i = 0; i < npages; i++) { + arena_mapbits_unzeroed_set(chunk, pageind+i, + flag_unzeroed); + } + npurged += npages; + if (config_stats) + nmadvise++; + } + malloc_mutex_lock(&arena->lock); + if (config_stats) + arena->stats.nmadvise += nmadvise; + + return (npurged); +} + +static void +arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk, + arena_chunk_mapelms_t *mapelms) +{ + arena_chunk_map_t *mapelm; + size_t pageind; + + /* Deallocate runs. */ + for (mapelm = ql_first(mapelms); mapelm != NULL; + mapelm = ql_first(mapelms)) { + arena_run_t *run; + + pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / + sizeof(arena_chunk_map_t)) + map_bias; + run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << + LG_PAGE)); + ql_remove(mapelms, mapelm, u.ql_link); + arena_run_dalloc(arena, run, false, true); + } +} + static inline size_t arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) { size_t npurged; - ql_head(arena_chunk_map_t) mapelms; - arena_chunk_map_t *mapelm; - size_t pageind, npages; - size_t nmadvise; + arena_chunk_mapelms_t mapelms; ql_new(&mapelms); @@ -770,121 +988,13 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) if (chunk->nruns_adjac == 0) all = true; - /* - * Temporarily allocate free dirty runs within chunk. If all is false, - * only operate on dirty runs that are fragments; otherwise operate on - * all dirty runs. - */ - for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { - mapelm = arena_mapp_get(chunk, pageind); - if (arena_mapbits_allocated_get(chunk, pageind) == 0) { - size_t run_size = - arena_mapbits_unallocated_size_get(chunk, pageind); - - npages = run_size >> LG_PAGE; - assert(pageind + npages <= chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+npages-1)); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0 && - (all || arena_avail_adjac(chunk, pageind, - npages))) { - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - arena_run_split(arena, run, run_size, true, - BININD_INVALID, false); - /* Append to list for later processing. */ - ql_elm_new(mapelm, u.ql_link); - ql_tail_insert(&mapelms, mapelm, u.ql_link); - } - } else { - /* Skip run. */ - if (arena_mapbits_large_get(chunk, pageind) != 0) { - npages = arena_mapbits_large_size_get(chunk, - pageind) >> LG_PAGE; - } else { - size_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - assert(arena_mapbits_small_runind_get(chunk, - pageind) == 0); - binind = arena_bin_index(arena, run->bin); - bin_info = &arena_bin_info[binind]; - npages = bin_info->run_size >> LG_PAGE; - } - } - } - assert(pageind == chunk_npages); - assert(chunk->ndirty == 0 || all == false); - assert(chunk->nruns_adjac == 0); - - malloc_mutex_unlock(&arena->lock); - if (config_stats) - nmadvise = 0; - npurged = 0; - ql_foreach(mapelm, &mapelms, u.ql_link) { - bool unzeroed; - size_t flag_unzeroed, i; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - npages = arena_mapbits_large_size_get(chunk, pageind) >> - LG_PAGE; - assert(pageind + npages <= chunk_npages); - unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << - LG_PAGE)), (npages << LG_PAGE)); - flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; - /* - * Set the unzeroed flag for all pages, now that pages_purge() - * has returned whether the pages were zeroed as a side effect - * of purging. This chunk map modification is safe even though - * the arena mutex isn't currently owned by this thread, - * because the run is marked as allocated, thus protecting it - * from being modified by any other thread. As long as these - * writes don't perturb the first and last elements' - * CHUNK_MAP_ALLOCATED bits, behavior is well defined. - */ - for (i = 0; i < npages; i++) { - arena_mapbits_unzeroed_set(chunk, pageind+i, - flag_unzeroed); - } - npurged += npages; - if (config_stats) - nmadvise++; - } - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena->stats.nmadvise += nmadvise; - - /* Deallocate runs. */ - for (mapelm = ql_first(&mapelms); mapelm != NULL; - mapelm = ql_first(&mapelms)) { - arena_run_t *run; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << - LG_PAGE)); - ql_remove(&mapelms, mapelm, u.ql_link); - arena_run_dalloc(arena, run, false, true); - } + arena_chunk_stash_dirty(arena, chunk, all, &mapelms); + npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms); + arena_chunk_unstash_purged(arena, chunk, &mapelms); return (npurged); } -static arena_chunk_t * -chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) -{ - size_t *ndirty = (size_t *)arg; - - assert(chunk->ndirty != 0); - *ndirty += chunk->ndirty; - return (NULL); -} - static void arena_purge(arena_t *arena, bool all) { @@ -905,21 +1015,11 @@ arena_purge(arena_t *arena, bool all) arena->stats.npurge++; /* - * Compute the minimum number of pages that this thread should try to - * purge, and add the result to arena->npurgatory. This will keep - * multiple threads from racing to reduce ndirty below the threshold. + * Add the minimum number of pages this thread should try to purge to + * arena->npurgatory. This will keep multiple threads from racing to + * reduce ndirty below the threshold. */ - { - size_t npurgeable = arena->ndirty - arena->npurgatory; - - if (all == false) { - size_t threshold = (arena->nactive >> - opt_lg_dirty_mult); - - npurgatory = npurgeable - threshold; - } else - npurgatory = npurgeable; - } + npurgatory = arena_compute_npurgatory(arena, all); arena->npurgatory += npurgatory; while (npurgatory > 0) { @@ -986,61 +1086,12 @@ arena_purge_all(arena_t *arena) } static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) +arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, + size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) { - arena_chunk_t *chunk; - size_t size, run_ind, run_pages, flag_dirty; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - if (arena_mapbits_large_get(chunk, run_ind) != 0) { - size = arena_mapbits_large_size_get(chunk, run_ind); - assert(size == PAGE || - arena_mapbits_large_size_get(chunk, - run_ind+(size>>LG_PAGE)-1) == 0); - } else { - size_t binind = arena_bin_index(arena, run->bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - size = bin_info->run_size; - } - run_pages = (size >> LG_PAGE); - if (config_stats) { - /* - * Update stats_cactive if nactive is crossing a chunk - * multiple. - */ - size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) - - CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE); - if (cactive_diff != 0) - stats_cactive_sub(cactive_diff); - } - arena->nactive -= run_pages; - - /* - * The run is dirty if the caller claims to have dirtied it, as well as - * if it was already dirty before being allocated and the caller - * doesn't claim to have cleaned it. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0) - dirty = true; - flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; - - /* Mark pages as unallocated in the chunk map. */ - if (dirty) { - arena_mapbits_unallocated_set(chunk, run_ind, size, - CHUNK_MAP_DIRTY); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - CHUNK_MAP_DIRTY); - } else { - arena_mapbits_unallocated_set(chunk, run_ind, size, - arena_mapbits_unzeroed_get(chunk, run_ind)); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); - } + size_t size = *p_size; + size_t run_ind = *p_run_ind; + size_t run_pages = *p_run_pages; /* Try to coalesce forward. */ if (run_ind + run_pages < chunk_npages && @@ -1070,8 +1121,9 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) } /* Try to coalesce backward. */ - if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1) - == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) { + if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, + run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == + flag_dirty) { size_t prun_size = arena_mapbits_unallocated_size_get(chunk, run_ind-1); size_t prun_pages = prun_size >> LG_PAGE; @@ -1096,6 +1148,62 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) size); } + *p_size = size; + *p_run_ind = run_ind; + *p_run_pages = run_pages; +} + +static void +arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) +{ + arena_chunk_t *chunk; + size_t size, run_ind, run_pages, flag_dirty; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); + assert(run_ind >= map_bias); + assert(run_ind < chunk_npages); + if (arena_mapbits_large_get(chunk, run_ind) != 0) { + size = arena_mapbits_large_size_get(chunk, run_ind); + assert(size == PAGE || + arena_mapbits_large_size_get(chunk, + run_ind+(size>>LG_PAGE)-1) == 0); + } else { + size_t binind = arena_bin_index(arena, run->bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + size = bin_info->run_size; + } + run_pages = (size >> LG_PAGE); + arena_cactive_update(arena, 0, run_pages); + arena->nactive -= run_pages; + + /* + * The run is dirty if the caller claims to have dirtied it, as well as + * if it was already dirty before being allocated and the caller + * doesn't claim to have cleaned it. + */ + assert(arena_mapbits_dirty_get(chunk, run_ind) == + arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); + if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0) + dirty = true; + flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; + + /* Mark pages as unallocated in the chunk map. */ + if (dirty) { + arena_mapbits_unallocated_set(chunk, run_ind, size, + CHUNK_MAP_DIRTY); + arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, + CHUNK_MAP_DIRTY); + } else { + arena_mapbits_unallocated_set(chunk, run_ind, size, + arena_mapbits_unzeroed_get(chunk, run_ind)); + arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, + arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); + } + + arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, + flag_dirty); + /* Insert into runs_avail, now that coalescing is complete. */ assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); @@ -1263,7 +1371,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) malloc_mutex_unlock(&bin->lock); /******************************/ malloc_mutex_lock(&arena->lock); - run = arena_run_alloc(arena, bin_info->run_size, false, binind, false); + run = arena_run_alloc_small(arena, bin_info->run_size, binind); if (run != NULL) { bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + (uintptr_t)bin_info->bitmap_offset); @@ -1286,7 +1394,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) } /* - * arena_run_alloc() failed, but another thread may have made + * arena_run_alloc_small() failed, but another thread may have made * sufficient memory available while this one dropped bin->lock above, * so search one more time. */ @@ -1321,12 +1429,12 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) arena_chunk_t *chunk; /* - * arena_run_alloc() may have allocated run, or it may - * have pulled run from the bin's run tree. Therefore - * it is unsafe to make any assumptions about how run - * has previously been used, and arena_bin_lower_run() - * must be called, as if a region were just deallocated - * from the run. + * arena_run_alloc_small() may have allocated run, or + * it may have pulled run from the bin's run tree. + * Therefore it is unsafe to make any assumptions about + * how run has previously been used, and + * arena_bin_lower_run() must be called, as if a region + * were just deallocated from the run. */ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); if (run->nfree == bin_info->nregs) @@ -1404,8 +1512,28 @@ arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) } } -void -arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) +#ifdef JEMALLOC_JET +#undef arena_redzone_corruption +#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) +#endif +static void +arena_redzone_corruption(void *ptr, size_t usize, bool after, + size_t offset, uint8_t byte) +{ + + malloc_printf(": Corrupt redzone %zu byte%s %s %p " + "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", + after ? "after" : "before", ptr, usize, byte); +} +#ifdef JEMALLOC_JET +#undef arena_redzone_corruption +#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) +arena_redzone_corruption_t *arena_redzone_corruption = + JEMALLOC_N(arena_redzone_corruption_impl); +#endif + +static void +arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) { size_t size = bin_info->reg_size; size_t redzone_size = bin_info->redzone_size; @@ -1413,29 +1541,61 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) bool error = false; for (i = 1; i <= redzone_size; i++) { - unsigned byte; - if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) { + uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); + if (*byte != 0xa5) { error = true; - malloc_printf(": Corrupt redzone " - "%zu byte%s before %p (size %zu), byte=%#x\n", i, - (i == 1) ? "" : "s", ptr, size, byte); + arena_redzone_corruption(ptr, size, false, i, *byte); + if (reset) + *byte = 0xa5; } } for (i = 0; i < redzone_size; i++) { - unsigned byte; - if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) { + uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); + if (*byte != 0xa5) { error = true; - malloc_printf(": Corrupt redzone " - "%zu byte%s after end of %p (size %zu), byte=%#x\n", - i, (i == 1) ? "" : "s", ptr, size, byte); + arena_redzone_corruption(ptr, size, true, i, *byte); + if (reset) + *byte = 0xa5; } } if (opt_abort && error) abort(); +} +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_small +#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) +#endif +void +arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) +{ + size_t redzone_size = bin_info->redzone_size; + + arena_redzones_validate(ptr, bin_info, false); memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, bin_info->reg_interval); } +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_small +#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) +arena_dalloc_junk_small_t *arena_dalloc_junk_small = + JEMALLOC_N(arena_dalloc_junk_small_impl); +#endif + +void +arena_quarantine_junk_small(void *ptr, size_t usize) +{ + size_t binind; + arena_bin_info_t *bin_info; + cassert(config_fill); + assert(opt_junk); + assert(opt_quarantine); + assert(usize <= SMALL_MAXCLASS); + + binind = SMALL_SIZE2BIN(usize); + bin_info = &arena_bin_info[binind]; + arena_redzones_validate(ptr, bin_info, true); +} void * arena_malloc_small(arena_t *arena, size_t size, bool zero) @@ -1500,7 +1660,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) /* Large allocation. */ size = PAGE_CEILING(size); malloc_mutex_lock(&arena->lock); - ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero); + ret = (void *)arena_run_alloc_large(arena, size, zero); if (ret == NULL) { malloc_mutex_unlock(&arena->lock); return (NULL); @@ -1546,7 +1706,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) alloc_size = size + alignment - PAGE; malloc_mutex_lock(&arena->lock); - run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero); + run = arena_run_alloc_large(arena, alloc_size, false); if (run == NULL) { malloc_mutex_unlock(&arena->lock); return (NULL); @@ -1566,6 +1726,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, false); } + arena_run_init_large(arena, (arena_run_t *)ret, size, zero); if (config_stats) { arena->stats.nmalloc_large++; @@ -1769,21 +1930,38 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); } +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_large +#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) +#endif +static void +arena_dalloc_junk_large(void *ptr, size_t usize) +{ + + if (config_fill && opt_junk) + memset(ptr, 0x5a, usize); +} +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_large +#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) +arena_dalloc_junk_large_t *arena_dalloc_junk_large = + JEMALLOC_N(arena_dalloc_junk_large_impl); +#endif + void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) { if (config_fill || config_stats) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t size = arena_mapbits_large_size_get(chunk, pageind); + size_t usize = arena_mapbits_large_size_get(chunk, pageind); - if (config_fill && config_stats && opt_junk) - memset(ptr, 0x5a, size); + arena_dalloc_junk_large(ptr, usize); if (config_stats) { arena->stats.ndalloc_large++; - arena->stats.allocated_large -= size; - arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--; + arena->stats.allocated_large -= usize; + arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++; + arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--; } } @@ -1854,9 +2032,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t flag_dirty; size_t splitsize = (oldsize + followsize <= size + extra) ? followsize : size + extra - oldsize; - arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk + - ((pageind+npages) << LG_PAGE)), splitsize, true, - BININD_INVALID, zero); + arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk + + ((pageind+npages) << LG_PAGE)), splitsize, zero); size = oldsize + splitsize; npages = size >> LG_PAGE; @@ -1895,6 +2072,26 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, return (true); } +#ifdef JEMALLOC_JET +#undef arena_ralloc_junk_large +#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) +#endif +static void +arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) +{ + + if (config_fill && opt_junk) { + memset((void *)((uintptr_t)ptr + usize), 0x5a, + old_usize - usize); + } +} +#ifdef JEMALLOC_JET +#undef arena_ralloc_junk_large +#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) +arena_ralloc_junk_large_t *arena_ralloc_junk_large = + JEMALLOC_N(arena_ralloc_junk_large_impl); +#endif + /* * Try to resize a large allocation, in order to avoid copying. This will * always fail if growing an object, and the following run is already in use. @@ -1908,10 +2105,6 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, psize = PAGE_CEILING(size + extra); if (psize == oldsize) { /* Same size class. */ - if (config_fill && opt_junk && size < oldsize) { - memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - - size); - } return (false); } else { arena_chunk_t *chunk; @@ -1922,10 +2115,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, if (psize < oldsize) { /* Fill before shrinking in order avoid a race. */ - if (config_fill && opt_junk) { - memset((void *)((uintptr_t)ptr + size), 0x5a, - oldsize - size); - } + arena_ralloc_junk_large(ptr, oldsize, psize); arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, psize); return (false); @@ -1933,17 +2123,23 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, PAGE_CEILING(size), psize - PAGE_CEILING(size), zero); - if (config_fill && ret == false && zero == false && - opt_zero) { - memset((void *)((uintptr_t)ptr + oldsize), 0, - size - oldsize); + if (config_fill && ret == false && zero == false) { + if (opt_junk) { + memset((void *)((uintptr_t)ptr + + oldsize), 0xa5, isalloc(ptr, + config_prof) - oldsize); + } else if (opt_zero) { + memset((void *)((uintptr_t)ptr + + oldsize), 0, isalloc(ptr, + config_prof) - oldsize); + } } return (ret); } } } -void * +bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { @@ -1958,25 +2154,20 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, if ((size + extra <= SMALL_MAXCLASS && SMALL_SIZE2BIN(size + extra) == SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && - size + extra >= oldsize)) { - if (config_fill && opt_junk && size < oldsize) { - memset((void *)((uintptr_t)ptr + size), - 0x5a, oldsize - size); - } - return (ptr); - } + size + extra >= oldsize)) + return (false); } else { assert(size <= arena_maxclass); if (size + extra > SMALL_MAXCLASS) { if (arena_ralloc_large(ptr, oldsize, size, extra, zero) == false) - return (ptr); + return (false); } } } /* Reallocation would require a move. */ - return (NULL); + return (true); } void * @@ -1988,9 +2179,8 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t copysize; /* Try to avoid moving the allocation. */ - ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero); - if (ret != NULL) - return (ret); + if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false) + return (ptr); /* * size and oldsize are different enough that we need to move the @@ -2001,7 +2191,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t usize = sa2u(size + extra, alignment); if (usize == 0) return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); + ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); } else ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); @@ -2013,7 +2203,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t usize = sa2u(size, alignment); if (usize == 0) return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, + ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); } else ret = arena_malloc(arena, size, zero, try_tcache_alloc); @@ -2031,7 +2221,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, copysize = (size < oldsize) ? size : oldsize; VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); memcpy(ret, ptr, copysize); - iqallocx(ptr, try_tcache_dalloc); + iqalloct(ptr, try_tcache_dalloc); return (ret); } diff --git a/src/bitmap.c b/src/bitmap.c index b47e2629..e2bd907d 100644 --- a/src/bitmap.c +++ b/src/bitmap.c @@ -1,4 +1,4 @@ -#define JEMALLOC_BITMAP_C_ +#define JEMALLOC_BITMAP_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ diff --git a/src/chunk.c b/src/chunk.c index b17f43f0..90ab116a 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -180,7 +180,7 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, label_return: if (ret != NULL) { if (config_ivsalloc && base == false) { - if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { + if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) { chunk_dealloc(ret, size, true); return (NULL); } @@ -321,7 +321,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap) assert((size & chunksize_mask) == 0); if (config_ivsalloc) - rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); + rtree_set(chunks_rtree, (uintptr_t)chunk, 0); if (config_stats || config_prof) { malloc_mutex_lock(&chunks_mtx); assert(stats_chunks.curchunks >= (size / chunksize)); @@ -356,7 +356,7 @@ chunk_boot(void) extent_tree_ad_new(&chunks_ad_dss); if (config_ivsalloc) { chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - - opt_lg_chunk); + opt_lg_chunk, base_alloc, NULL); if (chunks_rtree == NULL) return (true); } @@ -368,7 +368,7 @@ void chunk_prefork(void) { - malloc_mutex_lock(&chunks_mtx); + malloc_mutex_prefork(&chunks_mtx); if (config_ivsalloc) rtree_prefork(chunks_rtree); chunk_dss_prefork(); diff --git a/src/chunk_dss.c b/src/chunk_dss.c index 24781cc5..510bb8be 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -28,16 +28,17 @@ static void *dss_max; /******************************************************************************/ -#ifndef JEMALLOC_HAVE_SBRK static void * -sbrk(intptr_t increment) +chunk_dss_sbrk(intptr_t increment) { +#ifdef JEMALLOC_HAVE_SBRK + return (sbrk(increment)); +#else not_implemented(); - return (NULL); -} #endif +} dss_prec_t chunk_dss_prec_get(void) @@ -93,7 +94,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero) */ do { /* Get the current end of the DSS. */ - dss_max = sbrk(0); + dss_max = chunk_dss_sbrk(0); /* * Calculate how much padding is necessary to * chunk-align the end of the DSS. @@ -117,7 +118,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero) return (NULL); } incr = gap_size + cpad_size + size; - dss_prev = sbrk(incr); + dss_prev = chunk_dss_sbrk(incr); if (dss_prev == dss_max) { /* Success. */ dss_max = dss_next; @@ -163,7 +164,7 @@ chunk_dss_boot(void) if (malloc_mutex_init(&dss_mtx)) return (true); - dss_base = sbrk(0); + dss_base = chunk_dss_sbrk(0); dss_prev = dss_base; dss_max = dss_base; diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c index 8a42e759..2056d793 100644 --- a/src/chunk_mmap.c +++ b/src/chunk_mmap.c @@ -43,7 +43,7 @@ pages_map(void *addr, size_t size) if (munmap(ret, size) == -1) { char buf[BUFERROR_BUF]; - buferror(buf, sizeof(buf)); + buferror(get_errno(), buf, sizeof(buf)); malloc_printf(": Error in " #ifdef _WIN32 "VirtualFree" diff --git a/src/ckh.c b/src/ckh.c index 2f38348b..04c52966 100644 --- a/src/ckh.c +++ b/src/ckh.c @@ -49,7 +49,7 @@ static void ckh_shrink(ckh_t *ckh); * Search bucket for key and return the cell number if found; SIZE_T_MAX * otherwise. */ -JEMALLOC_INLINE size_t +JEMALLOC_INLINE_C size_t ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { ckhc_t *cell; @@ -67,7 +67,7 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) /* * Search table for key and return cell number if found; SIZE_T_MAX otherwise. */ -JEMALLOC_INLINE size_t +JEMALLOC_INLINE_C size_t ckh_isearch(ckh_t *ckh, const void *key) { size_t hashes[2], bucket, cell; @@ -88,7 +88,7 @@ ckh_isearch(ckh_t *ckh, const void *key) return (cell); } -JEMALLOC_INLINE bool +JEMALLOC_INLINE_C bool ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, const void *data) { @@ -120,7 +120,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * eviction/relocation procedure until either success or detection of an * eviction/relocation bucket cycle. */ -JEMALLOC_INLINE bool +JEMALLOC_INLINE_C bool ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, void const **argdata) { @@ -190,7 +190,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, } } -JEMALLOC_INLINE bool +JEMALLOC_INLINE_C bool ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { size_t hashes[2], bucket; @@ -219,7 +219,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) * Try to rebuild the hash table from scratch by inserting all items from the * old table into the new. */ -JEMALLOC_INLINE bool +JEMALLOC_INLINE_C bool ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { size_t count, i, nins; diff --git a/src/ctl.c b/src/ctl.c index ebba7c25..cc2c5aef 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -929,7 +929,7 @@ void ctl_prefork(void) { - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_prefork(&ctl_mtx); } void @@ -1110,6 +1110,8 @@ label_return: \ return (ret); \ } +/******************************************************************************/ + CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int @@ -1131,49 +1133,52 @@ label_return: return (ret); } -static int -thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; +/******************************************************************************/ - if (config_tcache == false) - return (ENOENT); +CTL_RO_BOOL_CONFIG_GEN(config_debug) +CTL_RO_BOOL_CONFIG_GEN(config_dss) +CTL_RO_BOOL_CONFIG_GEN(config_fill) +CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) +CTL_RO_BOOL_CONFIG_GEN(config_mremap) +CTL_RO_BOOL_CONFIG_GEN(config_munmap) +CTL_RO_BOOL_CONFIG_GEN(config_prof) +CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) +CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) +CTL_RO_BOOL_CONFIG_GEN(config_stats) +CTL_RO_BOOL_CONFIG_GEN(config_tcache) +CTL_RO_BOOL_CONFIG_GEN(config_tls) +CTL_RO_BOOL_CONFIG_GEN(config_utrace) +CTL_RO_BOOL_CONFIG_GEN(config_valgrind) +CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) - oldval = tcache_enabled_get(); - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - tcache_enabled_set(*(bool *)newp); - } - READ(oldval, bool); +/******************************************************************************/ - ret = 0; -label_return: - return (ret); -} +CTL_RO_NL_GEN(opt_abort, opt_abort, bool) +CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) +CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) +CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) +CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) +CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool) +CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) +CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) +CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) +CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) +CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool) +CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) +CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) +CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) +CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) -static int -thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - if (config_tcache == false) - return (ENOENT); - - READONLY(); - WRITEONLY(); - - tcache_flush(); - - ret = 0; -label_return: - return (ret); -} +/******************************************************************************/ static int thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, @@ -1235,50 +1240,49 @@ CTL_RO_NL_CGEN(config_stats, thread_deallocated, CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, &thread_allocated_tsd_get()->deallocated, uint64_t *) -/******************************************************************************/ +static int +thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; -CTL_RO_BOOL_CONFIG_GEN(config_debug) -CTL_RO_BOOL_CONFIG_GEN(config_dss) -CTL_RO_BOOL_CONFIG_GEN(config_fill) -CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) -CTL_RO_BOOL_CONFIG_GEN(config_mremap) -CTL_RO_BOOL_CONFIG_GEN(config_munmap) -CTL_RO_BOOL_CONFIG_GEN(config_prof) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) -CTL_RO_BOOL_CONFIG_GEN(config_stats) -CTL_RO_BOOL_CONFIG_GEN(config_tcache) -CTL_RO_BOOL_CONFIG_GEN(config_tls) -CTL_RO_BOOL_CONFIG_GEN(config_utrace) -CTL_RO_BOOL_CONFIG_GEN(config_valgrind) -CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) + if (config_tcache == false) + return (ENOENT); -/******************************************************************************/ + oldval = tcache_enabled_get(); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + tcache_enabled_set(*(bool *)newp); + } + READ(oldval, bool); -CTL_RO_NL_GEN(opt_abort, opt_abort, bool) -CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) -CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) -CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) -CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) -CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) -CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool) -CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) -CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) -CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) -CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) -CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool) -CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) -CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) -CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) -CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) + ret = 0; +label_return: + return (ret); +} + +static int +thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (config_tcache == false) + return (ENOENT); + + READONLY(); + WRITEONLY(); + + tcache_flush(); + + ret = 0; +label_return: + return (ret); +} /******************************************************************************/ @@ -1390,31 +1394,8 @@ label_return: return (ret); } - /******************************************************************************/ -CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) -CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) -CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) -static const ctl_named_node_t * -arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > NBINS) - return (NULL); - return (super_arenas_bin_i_node); -} - -CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t) -static const ctl_named_node_t * -arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > nlclasses) - return (NULL); - return (super_arenas_lrun_i_node); -} - static int arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) @@ -1468,7 +1449,28 @@ CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) +CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) +CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) +static const ctl_named_node_t * +arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > NBINS) + return (NULL); + return (super_arenas_bin_i_node); +} + CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) +CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t) +static const ctl_named_node_t * +arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > nlclasses) + return (NULL); + return (super_arenas_lrun_i_node); +} static int arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, @@ -1575,6 +1577,11 @@ CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) /******************************************************************************/ +CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) +CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) +CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) +CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) + CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current, size_t) CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t) @@ -1582,6 +1589,20 @@ CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t) CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t) CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t) + +CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) +CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) +CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, + ctl_stats.arenas[mib[2]].astats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, + ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, + ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_purged, + ctl_stats.arenas[mib[2]].astats.purged, uint64_t) + CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, ctl_stats.arenas[mib[2]].allocated_small, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, @@ -1645,19 +1666,6 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) return (super_stats_arenas_i_lruns_j_node); } -CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) -CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) -CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) -CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, - ctl_stats.arenas[mib[2]].astats.mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, - ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, - ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_purged, - ctl_stats.arenas[mib[2]].astats.purged, uint64_t) - static const ctl_named_node_t * stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) { @@ -1674,8 +1682,3 @@ label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } - -CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) -CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) -CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) -CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) diff --git a/src/huge.c b/src/huge.c index aa08d43d..cecaf2df 100644 --- a/src/huge.c +++ b/src/huge.c @@ -78,7 +78,7 @@ huge_palloc(size_t size, size_t alignment, bool zero) return (ret); } -void * +bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) { @@ -89,15 +89,11 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { assert(CHUNK_CEILING(oldsize) == oldsize); - if (config_fill && opt_junk && size < oldsize) { - memset((void *)((uintptr_t)ptr + size), 0x5a, - oldsize - size); - } - return (ptr); + return (false); } /* Reallocation would require a move. */ - return (NULL); + return (true); } void * @@ -108,9 +104,8 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t copysize; /* Try to avoid moving the allocation. */ - ret = huge_ralloc_no_move(ptr, oldsize, size, extra); - if (ret != NULL) - return (ret); + if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false) + return (ptr); /* * size and oldsize are different enough that we need to use a @@ -169,7 +164,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, */ char buf[BUFERROR_BUF]; - buferror(buf, sizeof(buf)); + buferror(get_errno(), buf, sizeof(buf)); malloc_printf(": Error in mremap(): %s\n", buf); if (opt_abort) @@ -181,11 +176,34 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, #endif { memcpy(ret, ptr, copysize); - iqallocx(ptr, try_tcache_dalloc); + iqalloct(ptr, try_tcache_dalloc); } return (ret); } +#ifdef JEMALLOC_JET +#undef huge_dalloc_junk +#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) +#endif +static void +huge_dalloc_junk(void *ptr, size_t usize) +{ + + if (config_fill && config_dss && opt_junk) { + /* + * Only bother junk filling if the chunk isn't about to be + * unmapped. + */ + if (config_munmap == false || (config_dss && chunk_in_dss(ptr))) + memset(ptr, 0x5a, usize); + } +} +#ifdef JEMALLOC_JET +#undef huge_dalloc_junk +#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) +huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); +#endif + void huge_dalloc(void *ptr, bool unmap) { @@ -208,8 +226,8 @@ huge_dalloc(void *ptr, bool unmap) malloc_mutex_unlock(&huge_mtx); - if (unmap && config_fill && config_dss && opt_junk) - memset(node->addr, 0x5a, node->size); + if (unmap) + huge_dalloc_junk(node->addr, node->size); chunk_dealloc(node->addr, node->size, unmap); diff --git a/src/jemalloc.c b/src/jemalloc.c index ae56db6b..563d99f8 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -100,18 +100,12 @@ typedef struct { #endif /******************************************************************************/ -/* Function prototypes for non-inline static functions. */ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ -static void stats_print_atexit(void); -static unsigned malloc_ncpus(void); -static bool malloc_conf_next(char const **opts_p, char const **k_p, - size_t *klen_p, char const **v_p, size_t *vlen_p); -static void malloc_conf_error(const char *msg, const char *k, size_t klen, - const char *v, size_t vlen); -static void malloc_conf_init(void); static bool malloc_init_hard(void); -static int imemalign(void **memptr, size_t alignment, size_t size, - size_t min_alignment); /******************************************************************************/ /* @@ -252,7 +246,6 @@ stats_print_atexit(void) static unsigned malloc_ncpus(void) { - unsigned ret; long result; #ifdef _WIN32 @@ -262,14 +255,7 @@ malloc_ncpus(void) #else result = sysconf(_SC_NPROCESSORS_ONLN); #endif - if (result == -1) { - /* Error. */ - ret = 1; - } else { - ret = (unsigned)result; - } - - return (ret); + return ((result == -1) ? 1 : (unsigned)result); } void @@ -484,8 +470,7 @@ malloc_conf_init(void) } break; } default: - /* NOTREACHED */ - assert(false); + not_reached(); buf[0] = '\0'; opts = buf; } @@ -522,14 +507,15 @@ malloc_conf_init(void) "Invalid conf value", \ k, klen, v, vlen); \ } else if (clip) { \ - if (um < min) \ + if (min != 0 && um < min) \ o = min; \ else if (um > max) \ o = max; \ else \ o = um; \ } else { \ - if (um < min || um > max) { \ + if ((min != 0 && um < min) || \ + um > max) { \ malloc_conf_error( \ "Out-of-range " \ "conf value", \ @@ -695,17 +681,6 @@ malloc_init_hard(void) malloc_conf_init(); -#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ - && !defined(_WIN32)) - /* Register fork handlers. */ - if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, - jemalloc_postfork_child) != 0) { - malloc_write(": Error in pthread_atfork()\n"); - if (opt_abort) - abort(); - } -#endif - if (opt_stats_print) { /* Print statistics at exit. */ if (atexit(stats_print_atexit) != 0) { @@ -745,8 +720,10 @@ malloc_init_hard(void) return (true); } - if (malloc_mutex_init(&arenas_lock)) + if (malloc_mutex_init(&arenas_lock)) { + malloc_mutex_unlock(&init_lock); return (true); + } /* * Create enough scaffolding to allow recursive allocation in @@ -792,9 +769,25 @@ malloc_init_hard(void) return (true); } - /* Get number of CPUs. */ malloc_mutex_unlock(&init_lock); + /**********************************************************************/ + /* Recursive allocation may follow. */ + ncpus = malloc_ncpus(); + +#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ + && !defined(_WIN32)) + /* LinuxThreads's pthread_atfork() allocates. */ + if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, + jemalloc_postfork_child) != 0) { + malloc_write(": Error in pthread_atfork()\n"); + if (opt_abort) + abort(); + } +#endif + + /* Done recursively allocating. */ + /**********************************************************************/ malloc_mutex_lock(&init_lock); if (mutex_boot()) { @@ -841,6 +834,7 @@ malloc_init_hard(void) malloc_initialized = true; malloc_mutex_unlock(&init_lock); + return (false); } @@ -852,42 +846,88 @@ malloc_init_hard(void) * Begin malloc(3)-compatible functions. */ +static void * +imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) +{ + void *p; + + if (cnt == NULL) + return (NULL); + if (prof_promote && usize <= SMALL_MAXCLASS) { + p = imalloc(SMALL_MAXCLASS+1); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else + p = imalloc(usize); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imalloc_prof(size_t usize, prof_thr_cnt_t *cnt) +{ + void *p; + + if ((uintptr_t)cnt != (uintptr_t)1U) + p = imalloc_prof_sample(usize, cnt); + else + p = imalloc(usize); + if (p == NULL) + return (NULL); + prof_malloc(p, usize, cnt); + + return (p); +} + +/* + * MALLOC_BODY() is a macro rather than a function because its contents are in + * the fast path, but inlining would cause reliability issues when determining + * how many frames to discard from heap profiling backtraces. + */ +#define MALLOC_BODY(ret, size, usize) do { \ + if (malloc_init()) \ + ret = NULL; \ + else { \ + if (config_prof && opt_prof) { \ + prof_thr_cnt_t *cnt; \ + \ + usize = s2u(size); \ + /* \ + * Call PROF_ALLOC_PREP() here rather than in \ + * imalloc_prof() so that imalloc_prof() can be \ + * inlined without introducing uncertainty \ + * about the number of backtrace frames to \ + * ignore. imalloc_prof() is in the fast path \ + * when heap profiling is enabled, so inlining \ + * is critical to performance. (For \ + * consistency all callers of PROF_ALLOC_PREP() \ + * are structured similarly, even though e.g. \ + * realloc() isn't called enough for inlining \ + * to be critical.) \ + */ \ + PROF_ALLOC_PREP(1, usize, cnt); \ + ret = imalloc_prof(usize, cnt); \ + } else { \ + if (config_stats || (config_valgrind && \ + opt_valgrind)) \ + usize = s2u(size); \ + ret = imalloc(size); \ + } \ + } \ +} while (0) + void * je_malloc(size_t size) { void *ret; size_t usize JEMALLOC_CC_SILENCE_INIT(0); - prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); - - if (malloc_init()) { - ret = NULL; - goto label_oom; - } if (size == 0) size = 1; - if (config_prof && opt_prof) { - usize = s2u(size); - PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) { - ret = NULL; - goto label_oom; - } - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= - SMALL_MAXCLASS) { - ret = imalloc(SMALL_MAXCLASS+1); - if (ret != NULL) - arena_prof_promoted(ret, usize); - } else - ret = imalloc(size); - } else { - if (config_stats || (config_valgrind && opt_valgrind)) - usize = s2u(size); - ret = imalloc(size); - } + MALLOC_BODY(ret, size, usize); -label_oom: if (ret == NULL) { if (config_xmalloc && opt_xmalloc) { malloc_write(": Error in malloc(): " @@ -896,8 +936,6 @@ label_oom: } set_errno(ENOMEM); } - if (config_prof && opt_prof && ret != NULL) - prof_malloc(ret, usize, cnt); if (config_stats && ret != NULL) { assert(usize == isalloc(ret, config_prof)); thread_allocated_tsd_get()->allocated += usize; @@ -907,6 +945,42 @@ label_oom: return (ret); } +static void * +imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) +{ + void *p; + + if (cnt == NULL) + return (NULL); + if (prof_promote && usize <= SMALL_MAXCLASS) { + assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0); + p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment, + false); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else + p = ipalloc(usize, alignment, false); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) +{ + void *p; + + if ((uintptr_t)cnt != (uintptr_t)1U) + p = imemalign_prof_sample(alignment, usize, cnt); + else + p = ipalloc(usize, alignment, false); + if (p == NULL) + return (NULL); + prof_malloc(p, usize, cnt); + + return (p); +} + JEMALLOC_ATTR(nonnull(1)) #ifdef JEMALLOC_PROF /* @@ -916,19 +990,18 @@ JEMALLOC_ATTR(nonnull(1)) JEMALLOC_NOINLINE #endif static int -imemalign(void **memptr, size_t alignment, size_t size, - size_t min_alignment) +imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) { int ret; size_t usize; void *result; - prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); assert(min_alignment != 0); - if (malloc_init()) + if (malloc_init()) { result = NULL; - else { + goto label_oom; + } else { if (size == 0) size = 1; @@ -948,57 +1021,38 @@ imemalign(void **memptr, size_t alignment, size_t size, usize = sa2u(size, alignment); if (usize == 0) { result = NULL; - ret = ENOMEM; - goto label_return; + goto label_oom; } if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + PROF_ALLOC_PREP(2, usize, cnt); - if (cnt == NULL) { - result = NULL; - ret = EINVAL; - } else { - if (prof_promote && (uintptr_t)cnt != - (uintptr_t)1U && usize <= SMALL_MAXCLASS) { - assert(sa2u(SMALL_MAXCLASS+1, - alignment) != 0); - result = ipalloc(sa2u(SMALL_MAXCLASS+1, - alignment), alignment, false); - if (result != NULL) { - arena_prof_promoted(result, - usize); - } - } else { - result = ipalloc(usize, alignment, - false); - } - } + result = imemalign_prof(alignment, usize, cnt); } else result = ipalloc(usize, alignment, false); - } - - if (result == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error allocating aligned " - "memory: out of memory\n"); - abort(); - } - ret = ENOMEM; - goto label_return; + if (result == NULL) + goto label_oom; } *memptr = result; ret = 0; - label_return: if (config_stats && result != NULL) { assert(usize == isalloc(result, config_prof)); thread_allocated_tsd_get()->allocated += usize; } - if (config_prof && opt_prof && result != NULL) - prof_malloc(result, usize, cnt); UTRACE(0, size, result); return (ret); +label_oom: + assert(result == NULL); + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error allocating aligned memory: " + "out of memory\n"); + abort(); + } + ret = ENOMEM; + goto label_return; } int @@ -1025,13 +1079,46 @@ je_aligned_alloc(size_t alignment, size_t size) return (ret); } +static void * +icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) +{ + void *p; + + if (cnt == NULL) + return (NULL); + if (prof_promote && usize <= SMALL_MAXCLASS) { + p = icalloc(SMALL_MAXCLASS+1); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else + p = icalloc(usize); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +icalloc_prof(size_t usize, prof_thr_cnt_t *cnt) +{ + void *p; + + if ((uintptr_t)cnt != (uintptr_t)1U) + p = icalloc_prof_sample(usize, cnt); + else + p = icalloc(usize); + if (p == NULL) + return (NULL); + prof_malloc(p, usize, cnt); + + return (p); +} + void * je_calloc(size_t num, size_t size) { void *ret; size_t num_size; size_t usize JEMALLOC_CC_SILENCE_INIT(0); - prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); if (malloc_init()) { num_size = 0; @@ -1060,19 +1147,11 @@ je_calloc(size_t num, size_t size) } if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + usize = s2u(num_size); PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) { - ret = NULL; - goto label_return; - } - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize - <= SMALL_MAXCLASS) { - ret = icalloc(SMALL_MAXCLASS+1); - if (ret != NULL) - arena_prof_promoted(ret, usize); - } else - ret = icalloc(num_size); + ret = icalloc_prof(usize, cnt); } else { if (config_stats || (config_valgrind && opt_valgrind)) usize = s2u(num_size); @@ -1088,9 +1167,6 @@ label_return: } set_errno(ENOMEM); } - - if (config_prof && opt_prof && ret != NULL) - prof_malloc(ret, usize, cnt); if (config_stats && ret != NULL) { assert(usize == isalloc(ret, config_prof)); thread_allocated_tsd_get()->allocated += usize; @@ -1100,152 +1176,126 @@ label_return: return (ret); } +static void * +irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt) +{ + void *p; + + if (cnt == NULL) + return (NULL); + if (prof_promote && usize <= SMALL_MAXCLASS) { + p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else + p = iralloc(oldptr, usize, 0, 0, false); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt) +{ + void *p; + prof_ctx_t *old_ctx; + + old_ctx = prof_ctx_get(oldptr); + if ((uintptr_t)cnt != (uintptr_t)1U) + p = irealloc_prof_sample(oldptr, usize, cnt); + else + p = iralloc(oldptr, usize, 0, 0, false); + if (p == NULL) + return (NULL); + prof_realloc(p, usize, cnt, old_usize, old_ctx); + + return (p); +} + +JEMALLOC_INLINE_C void +ifree(void *ptr) +{ + size_t usize; + UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + + assert(ptr != NULL); + assert(malloc_initialized || IS_INITIALIZER); + + if (config_prof && opt_prof) { + usize = isalloc(ptr, config_prof); + prof_free(ptr, usize); + } else if (config_stats || config_valgrind) + usize = isalloc(ptr, config_prof); + if (config_stats) + thread_allocated_tsd_get()->deallocated += usize; + if (config_valgrind && opt_valgrind) + rzsize = p2rz(ptr); + iqalloc(ptr); + JEMALLOC_VALGRIND_FREE(ptr, rzsize); +} + void * je_realloc(void *ptr, size_t size) { void *ret; size_t usize JEMALLOC_CC_SILENCE_INIT(0); - size_t old_size = 0; - size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); - prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL); + size_t old_usize = 0; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); if (size == 0) { if (ptr != NULL) { - /* realloc(ptr, 0) is equivalent to free(p). */ - assert(malloc_initialized || IS_INITIALIZER); - if (config_prof) { - old_size = isalloc(ptr, true); - if (config_valgrind && opt_valgrind) - old_rzsize = p2rz(ptr); - } else if (config_stats) { - old_size = isalloc(ptr, false); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_size); - } else if (config_valgrind && opt_valgrind) { - old_size = isalloc(ptr, false); - old_rzsize = u2rz(old_size); - } - if (config_prof && opt_prof) { - old_ctx = prof_ctx_get(ptr); - cnt = NULL; - } - iqalloc(ptr); - ret = NULL; - goto label_return; - } else - size = 1; + /* realloc(ptr, 0) is equivalent to free(ptr). */ + UTRACE(ptr, 0, 0); + ifree(ptr); + return (NULL); + } + size = 1; } if (ptr != NULL) { assert(malloc_initialized || IS_INITIALIZER); malloc_thread_init(); - if (config_prof) { - old_size = isalloc(ptr, true); - if (config_valgrind && opt_valgrind) - old_rzsize = p2rz(ptr); - } else if (config_stats) { - old_size = isalloc(ptr, false); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_size); - } else if (config_valgrind && opt_valgrind) { - old_size = isalloc(ptr, false); - old_rzsize = u2rz(old_size); - } + if ((config_prof && opt_prof) || config_stats || + (config_valgrind && opt_valgrind)) + old_usize = isalloc(ptr, config_prof); + if (config_valgrind && opt_valgrind) + old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); + if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + usize = s2u(size); - old_ctx = prof_ctx_get(ptr); PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) { - old_ctx = NULL; - ret = NULL; - goto label_oom; - } - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && - usize <= SMALL_MAXCLASS) { - ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0, - false, false); - if (ret != NULL) - arena_prof_promoted(ret, usize); - else - old_ctx = NULL; - } else { - ret = iralloc(ptr, size, 0, 0, false, false); - if (ret == NULL) - old_ctx = NULL; - } + ret = irealloc_prof(ptr, old_usize, usize, cnt); } else { if (config_stats || (config_valgrind && opt_valgrind)) usize = s2u(size); - ret = iralloc(ptr, size, 0, 0, false, false); - } - -label_oom: - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in realloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); + ret = iralloc(ptr, size, 0, 0, false); } } else { /* realloc(NULL, size) is equivalent to malloc(size). */ - if (config_prof && opt_prof) - old_ctx = NULL; - if (malloc_init()) { - if (config_prof && opt_prof) - cnt = NULL; - ret = NULL; - } else { - if (config_prof && opt_prof) { - usize = s2u(size); - PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) - ret = NULL; - else { - if (prof_promote && (uintptr_t)cnt != - (uintptr_t)1U && usize <= - SMALL_MAXCLASS) { - ret = imalloc(SMALL_MAXCLASS+1); - if (ret != NULL) { - arena_prof_promoted(ret, - usize); - } - } else - ret = imalloc(size); - } - } else { - if (config_stats || (config_valgrind && - opt_valgrind)) - usize = s2u(size); - ret = imalloc(size); - } - } - - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in realloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } + MALLOC_BODY(ret, size, usize); } -label_return: - if (config_prof && opt_prof) - prof_realloc(ret, usize, cnt, old_size, old_ctx); + if (ret == NULL) { + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in realloc(): " + "out of memory\n"); + abort(); + } + set_errno(ENOMEM); + } if (config_stats && ret != NULL) { thread_allocated_t *ta; assert(usize == isalloc(ret, config_prof)); ta = thread_allocated_tsd_get(); ta->allocated += usize; - ta->deallocated += old_size; + ta->deallocated += old_usize; } UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false); + JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize, + false); return (ret); } @@ -1254,24 +1304,8 @@ je_free(void *ptr) { UTRACE(ptr, 0, 0); - if (ptr != NULL) { - size_t usize; - size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - - assert(malloc_initialized || IS_INITIALIZER); - - if (config_prof && opt_prof) { - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); - } else if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqalloc(ptr); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); - } + if (ptr != NULL) + ifree(ptr); } /* @@ -1337,28 +1371,440 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = * Begin non-standard functions. */ -size_t -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) +JEMALLOC_ALWAYS_INLINE_C void * +imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, + arena_t *arena) { - size_t ret; + + assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, + alignment))); + + if (alignment != 0) + return (ipalloct(usize, alignment, zero, try_tcache, arena)); + else if (zero) + return (icalloct(usize, try_tcache, arena)); + else + return (imalloct(usize, try_tcache, arena)); +} + +static void * +imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache, + arena_t *arena, prof_thr_cnt_t *cnt) +{ + void *p; + + if (cnt == NULL) + return (NULL); + if (prof_promote && usize <= SMALL_MAXCLASS) { + size_t usize_promoted = (alignment == 0) ? + s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment); + assert(usize_promoted != 0); + p = imallocx(usize_promoted, alignment, zero, try_tcache, + arena); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else + p = imallocx(usize, alignment, zero, try_tcache, arena); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache, + arena_t *arena, prof_thr_cnt_t *cnt) +{ + void *p; + + if ((uintptr_t)cnt != (uintptr_t)1U) { + p = imallocx_prof_sample(usize, alignment, zero, try_tcache, + arena, cnt); + } else + p = imallocx(usize, alignment, zero, try_tcache, arena); + if (p == NULL) + return (NULL); + prof_malloc(p, usize, cnt); + + return (p); +} + +void * +je_mallocx(size_t size, int flags) +{ + void *p; + size_t usize; + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & MALLOCX_ZERO; + unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; + arena_t *arena; + bool try_tcache; + + assert(size != 0); + + if (malloc_init()) + goto label_oom; + + if (arena_ind != UINT_MAX) { + arena = arenas[arena_ind]; + try_tcache = false; + } else { + arena = NULL; + try_tcache = true; + } + + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + assert(usize != 0); + + if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + + PROF_ALLOC_PREP(1, usize, cnt); + p = imallocx_prof(usize, alignment, zero, try_tcache, arena, + cnt); + } else + p = imallocx(usize, alignment, zero, try_tcache, arena); + if (p == NULL) + goto label_oom; + + if (config_stats) { + assert(usize == isalloc(p, config_prof)); + thread_allocated_tsd_get()->allocated += usize; + } + UTRACE(0, size, p); + JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); + return (p); +label_oom: + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in mallocx(): out of memory\n"); + abort(); + } + UTRACE(0, size, 0); + return (NULL); +} + +static void * +irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize, + bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena, + prof_thr_cnt_t *cnt) +{ + void *p; + + if (cnt == NULL) + return (NULL); + if (prof_promote && usize <= SMALL_MAXCLASS) { + p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= + size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero, + try_tcache_alloc, try_tcache_dalloc, arena); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else { + p = iralloct(oldptr, size, 0, alignment, zero, + try_tcache_alloc, try_tcache_dalloc, arena); + } + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment, + size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, + arena_t *arena, prof_thr_cnt_t *cnt) +{ + void *p; + prof_ctx_t *old_ctx; + + old_ctx = prof_ctx_get(oldptr); + if ((uintptr_t)cnt != (uintptr_t)1U) + p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero, + try_tcache_alloc, try_tcache_dalloc, arena, cnt); + else { + p = iralloct(oldptr, size, 0, alignment, zero, + try_tcache_alloc, try_tcache_dalloc, arena); + } + if (p == NULL) + return (NULL); + + if (p == oldptr && alignment != 0) { + /* + * The allocation did not move, so it is possible that the size + * class is smaller than would guarantee the requested + * alignment, and that the alignment constraint was + * serendipitously satisfied. Additionally, old_usize may not + * be the same as the current usize because of in-place large + * reallocation. Therefore, query the actual value of usize. + */ + *usize = isalloc(p, config_prof); + } + prof_realloc(p, *usize, cnt, old_usize, old_ctx); + + return (p); +} + +void * +je_rallocx(void *ptr, size_t size, int flags) +{ + void *p; + size_t usize, old_usize; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & MALLOCX_ZERO; + unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; + bool try_tcache_alloc, try_tcache_dalloc; + arena_t *arena; + + assert(ptr != NULL); + assert(size != 0); + assert(malloc_initialized || IS_INITIALIZER); + malloc_thread_init(); + + if (arena_ind != UINT_MAX) { + arena_chunk_t *chunk; + try_tcache_alloc = false; + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + try_tcache_dalloc = (chunk == ptr || chunk->arena != + arenas[arena_ind]); + arena = arenas[arena_ind]; + } else { + try_tcache_alloc = true; + try_tcache_dalloc = true; + arena = NULL; + } + + if ((config_prof && opt_prof) || config_stats || + (config_valgrind && opt_valgrind)) + old_usize = isalloc(ptr, config_prof); + if (config_valgrind && opt_valgrind) + old_rzsize = u2rz(old_usize); + + if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + assert(usize != 0); + PROF_ALLOC_PREP(1, usize, cnt); + p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero, + try_tcache_alloc, try_tcache_dalloc, arena, cnt); + if (p == NULL) + goto label_oom; + } else { + p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc, + try_tcache_dalloc, arena); + if (p == NULL) + goto label_oom; + if (config_stats || (config_valgrind && opt_valgrind)) + usize = isalloc(p, config_prof); + } + + if (config_stats) { + thread_allocated_t *ta; + ta = thread_allocated_tsd_get(); + ta->allocated += usize; + ta->deallocated += old_usize; + } + UTRACE(ptr, size, p); + JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero); + return (p); +label_oom: + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in rallocx(): out of memory\n"); + abort(); + } + UTRACE(ptr, size, 0); + return (NULL); +} + +JEMALLOC_ALWAYS_INLINE_C size_t +ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, + size_t alignment, bool zero, arena_t *arena) +{ + size_t usize; + + if (ixalloc(ptr, size, extra, alignment, zero)) + return (old_usize); + usize = isalloc(ptr, config_prof); + + return (usize); +} + +static size_t +ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, + size_t alignment, size_t max_usize, bool zero, arena_t *arena, + prof_thr_cnt_t *cnt) +{ + size_t usize; + + if (cnt == NULL) + return (old_usize); + /* Use minimum usize to determine whether promotion may happen. */ + if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size, + alignment)) <= SMALL_MAXCLASS) { + if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= + size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), + alignment, zero)) + return (old_usize); + usize = isalloc(ptr, config_prof); + if (max_usize < PAGE) + arena_prof_promoted(ptr, usize); + } else { + usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, + zero, arena); + } + + return (usize); +} + +JEMALLOC_ALWAYS_INLINE_C size_t +ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra, + size_t alignment, size_t max_usize, bool zero, arena_t *arena, + prof_thr_cnt_t *cnt) +{ + size_t usize; + prof_ctx_t *old_ctx; + + old_ctx = prof_ctx_get(ptr); + if ((uintptr_t)cnt != (uintptr_t)1U) { + usize = ixallocx_prof_sample(ptr, old_usize, size, extra, + alignment, zero, max_usize, arena, cnt); + } else { + usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, + zero, arena); + } + if (usize == old_usize) + return (usize); + prof_realloc(ptr, usize, cnt, old_usize, old_ctx); + + return (usize); +} + +size_t +je_xallocx(void *ptr, size_t size, size_t extra, int flags) +{ + size_t usize, old_usize; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & MALLOCX_ZERO; + unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; + arena_t *arena; + + assert(ptr != NULL); + assert(size != 0); + assert(SIZE_T_MAX - size >= extra); + assert(malloc_initialized || IS_INITIALIZER); + malloc_thread_init(); + + if (arena_ind != UINT_MAX) + arena = arenas[arena_ind]; + else + arena = NULL; + + old_usize = isalloc(ptr, config_prof); + if (config_valgrind && opt_valgrind) + old_rzsize = u2rz(old_usize); + + if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + /* + * usize isn't knowable before ixalloc() returns when extra is + * non-zero. Therefore, compute its maximum possible value and + * use that in PROF_ALLOC_PREP() to decide whether to capture a + * backtrace. prof_realloc() will use the actual usize to + * decide whether to sample. + */ + size_t max_usize = (alignment == 0) ? s2u(size+extra) : + sa2u(size+extra, alignment); + PROF_ALLOC_PREP(1, max_usize, cnt); + usize = ixallocx_prof(ptr, old_usize, size, extra, alignment, + max_usize, zero, arena, cnt); + } else { + usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, + zero, arena); + } + if (usize == old_usize) + goto label_not_resized; + + if (config_stats) { + thread_allocated_t *ta; + ta = thread_allocated_tsd_get(); + ta->allocated += usize; + ta->deallocated += old_usize; + } + JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero); +label_not_resized: + UTRACE(ptr, size, ptr); + return (usize); +} + +size_t +je_sallocx(const void *ptr, int flags) +{ + size_t usize; assert(malloc_initialized || IS_INITIALIZER); malloc_thread_init(); if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); - else - ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; + usize = ivsalloc(ptr, config_prof); + else { + assert(ptr != NULL); + usize = isalloc(ptr, config_prof); + } - return (ret); + return (usize); } void -je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) +je_dallocx(void *ptr, int flags) { + size_t usize; + UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; + bool try_tcache; - stats_print(write_cb, cbopaque, opts); + assert(ptr != NULL); + assert(malloc_initialized || IS_INITIALIZER); + + if (arena_ind != UINT_MAX) { + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + try_tcache = (chunk == ptr || chunk->arena != + arenas[arena_ind]); + } else + try_tcache = true; + + UTRACE(ptr, 0, 0); + if (config_stats || config_valgrind) + usize = isalloc(ptr, config_prof); + if (config_prof && opt_prof) { + if (config_stats == false && config_valgrind == false) + usize = isalloc(ptr, config_prof); + prof_free(ptr, usize); + } + if (config_stats) + thread_allocated_tsd_get()->deallocated += usize; + if (config_valgrind && opt_valgrind) + rzsize = p2rz(ptr); + iqalloct(ptr, try_tcache); + JEMALLOC_VALGRIND_FREE(ptr, rzsize); +} + +size_t +je_nallocx(size_t size, int flags) +{ + size_t usize; + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + + assert(size != 0); + + if (malloc_init()) + return (0); + + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + assert(usize != 0); + return (usize); } int @@ -1393,6 +1839,30 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); } +void +je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + + stats_print(write_cb, cbopaque, opts); +} + +size_t +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) +{ + size_t ret; + + assert(malloc_initialized || IS_INITIALIZER); + malloc_thread_init(); + + if (config_ivsalloc) + ret = ivsalloc(ptr, config_prof); + else + ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; + + return (ret); +} + /* * End non-standard functions. */ @@ -1402,284 +1872,65 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, */ #ifdef JEMALLOC_EXPERIMENTAL -JEMALLOC_ALWAYS_INLINE_C void * -iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena) -{ - - assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, - alignment))); - - if (alignment != 0) - return (ipallocx(usize, alignment, zero, try_tcache, arena)); - else if (zero) - return (icallocx(usize, try_tcache, arena)); - else - return (imallocx(usize, try_tcache, arena)); -} - int je_allocm(void **ptr, size_t *rsize, size_t size, int flags) { void *p; - size_t usize; - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & ALLOCM_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - arena_t *arena; - bool try_tcache; assert(ptr != NULL); - assert(size != 0); - if (malloc_init()) - goto label_oom; - - if (arena_ind != UINT_MAX) { - arena = arenas[arena_ind]; - try_tcache = false; - } else { - arena = NULL; - try_tcache = true; - } - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - if (usize == 0) - goto label_oom; - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) - goto label_oom; - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= - SMALL_MAXCLASS) { - size_t usize_promoted = (alignment == 0) ? - s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, - alignment); - assert(usize_promoted != 0); - p = iallocm(usize_promoted, alignment, zero, - try_tcache, arena); - if (p == NULL) - goto label_oom; - arena_prof_promoted(p, usize); - } else { - p = iallocm(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - goto label_oom; - } - prof_malloc(p, usize, cnt); - } else { - p = iallocm(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - goto label_oom; - } + p = je_mallocx(size, flags); + if (p == NULL) + return (ALLOCM_ERR_OOM); if (rsize != NULL) - *rsize = usize; - + *rsize = isalloc(p, config_prof); *ptr = p; - if (config_stats) { - assert(usize == isalloc(p, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); return (ALLOCM_SUCCESS); -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in allocm(): " - "out of memory\n"); - abort(); - } - *ptr = NULL; - UTRACE(0, size, 0); - return (ALLOCM_ERR_OOM); } int je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) { - void *p, *q; - size_t usize; - size_t old_size; - size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & ALLOCM_ZERO; + int ret; bool no_move = flags & ALLOCM_NO_MOVE; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache_alloc, try_tcache_dalloc; - arena_t *arena; assert(ptr != NULL); assert(*ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk; - try_tcache_alloc = true; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr); - try_tcache_dalloc = (chunk == *ptr || chunk->arena != - arenas[arena_ind]); - arena = arenas[arena_ind]; - } else { - try_tcache_alloc = true; - try_tcache_dalloc = true; - arena = NULL; - } - - p = *ptr; - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - /* - * usize isn't knowable before iralloc() returns when extra is - * non-zero. Therefore, compute its maximum possible value and - * use that in PROF_ALLOC_PREP() to decide whether to capture a - * backtrace. prof_realloc() will use the actual usize to - * decide whether to sample. - */ - size_t max_usize = (alignment == 0) ? s2u(size+extra) : - sa2u(size+extra, alignment); - prof_ctx_t *old_ctx = prof_ctx_get(p); - old_size = isalloc(p, true); - if (config_valgrind && opt_valgrind) - old_rzsize = p2rz(p); - PROF_ALLOC_PREP(1, max_usize, cnt); - if (cnt == NULL) - goto label_oom; - /* - * Use minimum usize to determine whether promotion may happen. - */ - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U - && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) - <= SMALL_MAXCLASS) { - q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= - size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), - alignment, zero, no_move, try_tcache_alloc, - try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - if (max_usize < PAGE) { - usize = max_usize; - arena_prof_promoted(q, usize); - } else - usize = isalloc(q, config_prof); - } else { - q = irallocx(p, size, extra, alignment, zero, no_move, - try_tcache_alloc, try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - usize = isalloc(q, config_prof); - } - prof_realloc(q, usize, cnt, old_size, old_ctx); + if (no_move) { + size_t usize = je_xallocx(*ptr, size, extra, flags); + ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; if (rsize != NULL) *rsize = usize; } else { - if (config_stats) { - old_size = isalloc(p, false); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_size); - } else if (config_valgrind && opt_valgrind) { - old_size = isalloc(p, false); - old_rzsize = u2rz(old_size); - } - q = irallocx(p, size, extra, alignment, zero, no_move, - try_tcache_alloc, try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - if (config_stats) - usize = isalloc(q, config_prof); - if (rsize != NULL) { - if (config_stats == false) - usize = isalloc(q, config_prof); - *rsize = usize; - } + void *p = je_rallocx(*ptr, size+extra, flags); + if (p != NULL) { + *ptr = p; + ret = ALLOCM_SUCCESS; + } else + ret = ALLOCM_ERR_OOM; + if (rsize != NULL) + *rsize = isalloc(*ptr, config_prof); } - - *ptr = q; - if (config_stats) { - thread_allocated_t *ta; - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_size; - } - UTRACE(p, size, q); - JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); - return (ALLOCM_SUCCESS); -label_err: - if (no_move) { - UTRACE(p, size, q); - return (ALLOCM_ERR_NOT_MOVED); - } -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in rallocm(): " - "out of memory\n"); - abort(); - } - UTRACE(p, size, 0); - return (ALLOCM_ERR_OOM); + return (ret); } int je_sallocm(const void *ptr, size_t *rsize, int flags) { - size_t sz; - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - sz = ivsalloc(ptr, config_prof); - else { - assert(ptr != NULL); - sz = isalloc(ptr, config_prof); - } assert(rsize != NULL); - *rsize = sz; - + *rsize = je_sallocx(ptr, flags); return (ALLOCM_SUCCESS); } int je_dallocm(void *ptr, int flags) { - size_t usize; - size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache; - - assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); - - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - try_tcache = (chunk == ptr || chunk->arena != - arenas[arena_ind]); - } else - try_tcache = true; - - UTRACE(ptr, 0, 0); - if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_prof && opt_prof) { - if (config_stats == false && config_valgrind == false) - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); - } - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqallocx(ptr, try_tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); + je_dallocx(ptr, flags); return (ALLOCM_SUCCESS); } @@ -1687,18 +1938,10 @@ int je_nallocm(size_t *rsize, size_t size, int flags) { size_t usize; - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - assert(size != 0); - - if (malloc_init()) - return (ALLOCM_ERR_OOM); - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + usize = je_nallocx(size, flags); if (usize == 0) return (ALLOCM_ERR_OOM); - if (rsize != NULL) *rsize = usize; return (ALLOCM_SUCCESS); diff --git a/src/mutex.c b/src/mutex.c index 55e18c23..788eca38 100644 --- a/src/mutex.c +++ b/src/mutex.c @@ -6,7 +6,7 @@ #endif #ifndef _CRT_SPINCOUNT -#define _CRT_SPINCOUNT 4000 +#define _CRT_SPINCOUNT 4000 #endif /******************************************************************************/ diff --git a/src/prof.c b/src/prof.c index c133b95c..1d8ccbd6 100644 --- a/src/prof.c +++ b/src/prof.c @@ -24,7 +24,12 @@ bool opt_prof_gdump = false; bool opt_prof_final = true; bool opt_prof_leak = false; bool opt_prof_accum = false; -char opt_prof_prefix[PATH_MAX + 1]; +char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; uint64_t prof_interval = 0; bool prof_promote; @@ -54,46 +59,23 @@ static uint64_t prof_dump_useq; /* * This buffer is rather large for stack allocation, so use a single buffer for - * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since - * it must be locked anyway during dumping. + * all profile dumps. */ -static char prof_dump_buf[PROF_DUMP_BUFSIZE]; +static malloc_mutex_t prof_dump_mtx; +static char prof_dump_buf[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PROF_DUMP_BUFSIZE +#else + 1 +#endif +]; static unsigned prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ static bool prof_booted = false; -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static prof_bt_t *bt_dup(prof_bt_t *bt); -static void bt_destroy(prof_bt_t *bt); -#ifdef JEMALLOC_PROF_LIBGCC -static _Unwind_Reason_Code prof_unwind_init_callback( - struct _Unwind_Context *context, void *arg); -static _Unwind_Reason_Code prof_unwind_callback( - struct _Unwind_Context *context, void *arg); -#endif -static bool prof_flush(bool propagate_err); -static bool prof_write(bool propagate_err, const char *s); -static bool prof_printf(bool propagate_err, const char *format, ...) - JEMALLOC_ATTR(format(printf, 2, 3)); -static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, - size_t *leak_nctx); -static void prof_ctx_destroy(prof_ctx_t *ctx); -static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt); -static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, - prof_bt_t *bt); -static bool prof_dump_maps(bool propagate_err); -static bool prof_dump(bool propagate_err, const char *filename, - bool leakcheck); -static void prof_dump_filename(char *filename, char v, int64_t vseq); -static void prof_fdump(void); -static void prof_bt_hash(const void *key, size_t r_hash[2]); -static bool prof_bt_keycomp(const void *k1, const void *k2); -static malloc_mutex_t *prof_ctx_mutex_choose(void); - /******************************************************************************/ void @@ -423,251 +405,32 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore) { cassert(config_prof); - assert(false); + not_reached(); } #endif -prof_thr_cnt_t * -prof_lookup(prof_bt_t *bt) +static malloc_mutex_t * +prof_ctx_mutex_choose(void) { - union { - prof_thr_cnt_t *p; - void *v; - } ret; - prof_tdata_t *prof_tdata; + unsigned nctxs = atomic_add_u(&cum_ctxs, 1); - cassert(config_prof); - - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (NULL); - - if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) { - union { - prof_bt_t *p; - void *v; - } btkey; - union { - prof_ctx_t *p; - void *v; - } ctx; - bool new_ctx; - - /* - * This thread's cache lacks bt. Look for it in the global - * cache. - */ - prof_enter(prof_tdata); - if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) { - /* bt has never been seen before. Insert it. */ - ctx.v = imalloc(sizeof(prof_ctx_t)); - if (ctx.v == NULL) { - prof_leave(prof_tdata); - return (NULL); - } - btkey.p = bt_dup(bt); - if (btkey.v == NULL) { - prof_leave(prof_tdata); - idalloc(ctx.v); - return (NULL); - } - ctx.p->bt = btkey.p; - ctx.p->lock = prof_ctx_mutex_choose(); - /* - * Set nlimbo to 1, in order to avoid a race condition - * with prof_ctx_merge()/prof_ctx_destroy(). - */ - ctx.p->nlimbo = 1; - memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t)); - ql_new(&ctx.p->cnts_ql); - if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) { - /* OOM. */ - prof_leave(prof_tdata); - idalloc(btkey.v); - idalloc(ctx.v); - return (NULL); - } - new_ctx = true; - } else { - /* - * Increment nlimbo, in order to avoid a race condition - * with prof_ctx_merge()/prof_ctx_destroy(). - */ - malloc_mutex_lock(ctx.p->lock); - ctx.p->nlimbo++; - malloc_mutex_unlock(ctx.p->lock); - new_ctx = false; - } - prof_leave(prof_tdata); - - /* Link a prof_thd_cnt_t into ctx for this thread. */ - if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { - assert(ckh_count(&prof_tdata->bt2cnt) > 0); - /* - * Flush the least recently used cnt in order to keep - * bt2cnt from becoming too large. - */ - ret.p = ql_last(&prof_tdata->lru_ql, lru_link); - assert(ret.v != NULL); - if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, - NULL, NULL)) - assert(false); - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - prof_ctx_merge(ret.p->ctx, ret.p); - /* ret can now be re-used. */ - } else { - assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX); - /* Allocate and partially initialize a new cnt. */ - ret.v = imalloc(sizeof(prof_thr_cnt_t)); - if (ret.p == NULL) { - if (new_ctx) - prof_ctx_destroy(ctx.p); - return (NULL); - } - ql_elm_new(ret.p, cnts_link); - ql_elm_new(ret.p, lru_link); - } - /* Finish initializing ret. */ - ret.p->ctx = ctx.p; - ret.p->epoch = 0; - memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); - if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) { - if (new_ctx) - prof_ctx_destroy(ctx.p); - idalloc(ret.v); - return (NULL); - } - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); - malloc_mutex_lock(ctx.p->lock); - ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link); - ctx.p->nlimbo--; - malloc_mutex_unlock(ctx.p->lock); - } else { - /* Move ret to the front of the LRU. */ - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); - } - - return (ret.p); -} - -static bool -prof_flush(bool propagate_err) -{ - bool ret = false; - ssize_t err; - - cassert(config_prof); - - err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); - if (err == -1) { - if (propagate_err == false) { - malloc_write(": write() failed during heap " - "profile flush\n"); - if (opt_abort) - abort(); - } - ret = true; - } - prof_dump_buf_end = 0; - - return (ret); -} - -static bool -prof_write(bool propagate_err, const char *s) -{ - unsigned i, slen, n; - - cassert(config_prof); - - i = 0; - slen = strlen(s); - while (i < slen) { - /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) - if (prof_flush(propagate_err) && propagate_err) - return (true); - - if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { - /* Finish writing. */ - n = slen - i; - } else { - /* Write as much of s as will fit. */ - n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; - } - memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); - prof_dump_buf_end += n; - i += n; - } - - return (false); -} - -JEMALLOC_ATTR(format(printf, 2, 3)) -static bool -prof_printf(bool propagate_err, const char *format, ...) -{ - bool ret; - va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; - - va_start(ap, format); - malloc_vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); - ret = prof_write(propagate_err, buf); - - return (ret); + return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]); } static void -prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx) +prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt) { - prof_thr_cnt_t *thr_cnt; - prof_cnt_t tcnt; - cassert(config_prof); - - malloc_mutex_lock(ctx->lock); - - memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t)); - ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) { - volatile unsigned *epoch = &thr_cnt->epoch; - - while (true) { - unsigned epoch0 = *epoch; - - /* Make sure epoch is even. */ - if (epoch0 & 1U) - continue; - - memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t)); - - /* Terminate if epoch didn't change while reading. */ - if (*epoch == epoch0) - break; - } - - ctx->cnt_summed.curobjs += tcnt.curobjs; - ctx->cnt_summed.curbytes += tcnt.curbytes; - if (opt_prof_accum) { - ctx->cnt_summed.accumobjs += tcnt.accumobjs; - ctx->cnt_summed.accumbytes += tcnt.accumbytes; - } - } - - if (ctx->cnt_summed.curobjs != 0) - (*leak_nctx)++; - - /* Add to cnt_all. */ - cnt_all->curobjs += ctx->cnt_summed.curobjs; - cnt_all->curbytes += ctx->cnt_summed.curbytes; - if (opt_prof_accum) { - cnt_all->accumobjs += ctx->cnt_summed.accumobjs; - cnt_all->accumbytes += ctx->cnt_summed.accumbytes; - } - - malloc_mutex_unlock(ctx->lock); + ctx->bt = bt; + ctx->lock = prof_ctx_mutex_choose(); + /* + * Set nlimbo to 1, in order to avoid a race condition with + * prof_ctx_merge()/prof_ctx_destroy(). + */ + ctx->nlimbo = 1; + ql_elm_new(ctx, dump_link); + memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t)); + ql_new(&ctx->cnts_ql); } static void @@ -695,7 +458,7 @@ prof_ctx_destroy(prof_ctx_t *ctx) assert(ctx->cnt_merged.accumbytes == 0); /* Remove ctx from bt2ctx. */ if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL)) - assert(false); + not_reached(); prof_leave(prof_tdata); /* Destroy ctx. */ malloc_mutex_unlock(ctx->lock); @@ -751,8 +514,369 @@ prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt) } static bool -prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt) +prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey, + prof_ctx_t **p_ctx, bool *p_new_ctx) { + union { + prof_ctx_t *p; + void *v; + } ctx; + union { + prof_bt_t *p; + void *v; + } btkey; + bool new_ctx; + + prof_enter(prof_tdata); + if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) { + /* bt has never been seen before. Insert it. */ + ctx.v = imalloc(sizeof(prof_ctx_t)); + if (ctx.v == NULL) { + prof_leave(prof_tdata); + return (true); + } + btkey.p = bt_dup(bt); + if (btkey.v == NULL) { + prof_leave(prof_tdata); + idalloc(ctx.v); + return (true); + } + prof_ctx_init(ctx.p, btkey.p); + if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) { + /* OOM. */ + prof_leave(prof_tdata); + idalloc(btkey.v); + idalloc(ctx.v); + return (true); + } + new_ctx = true; + } else { + /* + * Increment nlimbo, in order to avoid a race condition with + * prof_ctx_merge()/prof_ctx_destroy(). + */ + malloc_mutex_lock(ctx.p->lock); + ctx.p->nlimbo++; + malloc_mutex_unlock(ctx.p->lock); + new_ctx = false; + } + prof_leave(prof_tdata); + + *p_btkey = btkey.v; + *p_ctx = ctx.p; + *p_new_ctx = new_ctx; + return (false); +} + +prof_thr_cnt_t * +prof_lookup(prof_bt_t *bt) +{ + union { + prof_thr_cnt_t *p; + void *v; + } ret; + prof_tdata_t *prof_tdata; + + cassert(config_prof); + + prof_tdata = prof_tdata_get(false); + if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) + return (NULL); + + if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) { + void *btkey; + prof_ctx_t *ctx; + bool new_ctx; + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ + if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx)) + return (NULL); + + /* Link a prof_thd_cnt_t into ctx for this thread. */ + if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { + assert(ckh_count(&prof_tdata->bt2cnt) > 0); + /* + * Flush the least recently used cnt in order to keep + * bt2cnt from becoming too large. + */ + ret.p = ql_last(&prof_tdata->lru_ql, lru_link); + assert(ret.v != NULL); + if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, + NULL, NULL)) + not_reached(); + ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); + prof_ctx_merge(ret.p->ctx, ret.p); + /* ret can now be re-used. */ + } else { + assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX); + /* Allocate and partially initialize a new cnt. */ + ret.v = imalloc(sizeof(prof_thr_cnt_t)); + if (ret.p == NULL) { + if (new_ctx) + prof_ctx_destroy(ctx); + return (NULL); + } + ql_elm_new(ret.p, cnts_link); + ql_elm_new(ret.p, lru_link); + } + /* Finish initializing ret. */ + ret.p->ctx = ctx; + ret.p->epoch = 0; + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) { + if (new_ctx) + prof_ctx_destroy(ctx); + idalloc(ret.v); + return (NULL); + } + ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); + malloc_mutex_lock(ctx->lock); + ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link); + ctx->nlimbo--; + malloc_mutex_unlock(ctx->lock); + } else { + /* Move ret to the front of the LRU. */ + ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); + ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); + } + + return (ret.p); +} + +#ifdef JEMALLOC_JET +size_t +prof_bt_count(void) +{ + size_t bt_count; + prof_tdata_t *prof_tdata; + + prof_tdata = prof_tdata_get(false); + if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) + return (0); + + prof_enter(prof_tdata); + bt_count = ckh_count(&bt2ctx); + prof_leave(prof_tdata); + + return (bt_count); +} +#endif + +#ifdef JEMALLOC_JET +#undef prof_dump_open +#define prof_dump_open JEMALLOC_N(prof_dump_open_impl) +#endif +static int +prof_dump_open(bool propagate_err, const char *filename) +{ + int fd; + + fd = creat(filename, 0644); + if (fd == -1 && propagate_err == false) { + malloc_printf(": creat(\"%s\"), 0644) failed\n", + filename); + if (opt_abort) + abort(); + } + + return (fd); +} +#ifdef JEMALLOC_JET +#undef prof_dump_open +#define prof_dump_open JEMALLOC_N(prof_dump_open) +prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); +#endif + +static bool +prof_dump_flush(bool propagate_err) +{ + bool ret = false; + ssize_t err; + + cassert(config_prof); + + err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); + if (err == -1) { + if (propagate_err == false) { + malloc_write(": write() failed during heap " + "profile flush\n"); + if (opt_abort) + abort(); + } + ret = true; + } + prof_dump_buf_end = 0; + + return (ret); +} + +static bool +prof_dump_close(bool propagate_err) +{ + bool ret; + + assert(prof_dump_fd != -1); + ret = prof_dump_flush(propagate_err); + close(prof_dump_fd); + prof_dump_fd = -1; + + return (ret); +} + +static bool +prof_dump_write(bool propagate_err, const char *s) +{ + unsigned i, slen, n; + + cassert(config_prof); + + i = 0; + slen = strlen(s); + while (i < slen) { + /* Flush the buffer if it is full. */ + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) + if (prof_dump_flush(propagate_err) && propagate_err) + return (true); + + if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { + /* Finish writing. */ + n = slen - i; + } else { + /* Write as much of s as will fit. */ + n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; + } + memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); + prof_dump_buf_end += n; + i += n; + } + + return (false); +} + +JEMALLOC_ATTR(format(printf, 2, 3)) +static bool +prof_dump_printf(bool propagate_err, const char *format, ...) +{ + bool ret; + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + ret = prof_dump_write(propagate_err, buf); + + return (ret); +} + +static void +prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx, + prof_ctx_list_t *ctx_ql) +{ + prof_thr_cnt_t *thr_cnt; + prof_cnt_t tcnt; + + cassert(config_prof); + + malloc_mutex_lock(ctx->lock); + + /* + * Increment nlimbo so that ctx won't go away before dump. + * Additionally, link ctx into the dump list so that it is included in + * prof_dump()'s second pass. + */ + ctx->nlimbo++; + ql_tail_insert(ctx_ql, ctx, dump_link); + + memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t)); + ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) { + volatile unsigned *epoch = &thr_cnt->epoch; + + while (true) { + unsigned epoch0 = *epoch; + + /* Make sure epoch is even. */ + if (epoch0 & 1U) + continue; + + memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t)); + + /* Terminate if epoch didn't change while reading. */ + if (*epoch == epoch0) + break; + } + + ctx->cnt_summed.curobjs += tcnt.curobjs; + ctx->cnt_summed.curbytes += tcnt.curbytes; + if (opt_prof_accum) { + ctx->cnt_summed.accumobjs += tcnt.accumobjs; + ctx->cnt_summed.accumbytes += tcnt.accumbytes; + } + } + + if (ctx->cnt_summed.curobjs != 0) + (*leak_nctx)++; + + /* Add to cnt_all. */ + cnt_all->curobjs += ctx->cnt_summed.curobjs; + cnt_all->curbytes += ctx->cnt_summed.curbytes; + if (opt_prof_accum) { + cnt_all->accumobjs += ctx->cnt_summed.accumobjs; + cnt_all->accumbytes += ctx->cnt_summed.accumbytes; + } + + malloc_mutex_unlock(ctx->lock); +} + +static bool +prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) +{ + + if (opt_lg_prof_sample == 0) { + if (prof_dump_printf(propagate_err, + "heap profile: %"PRId64": %"PRId64 + " [%"PRIu64": %"PRIu64"] @ heapprofile\n", + cnt_all->curobjs, cnt_all->curbytes, + cnt_all->accumobjs, cnt_all->accumbytes)) + return (true); + } else { + if (prof_dump_printf(propagate_err, + "heap profile: %"PRId64": %"PRId64 + " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n", + cnt_all->curobjs, cnt_all->curbytes, + cnt_all->accumobjs, cnt_all->accumbytes, + ((uint64_t)1U << opt_lg_prof_sample))) + return (true); + } + + return (false); +} + +static void +prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql) +{ + + ctx->nlimbo--; + ql_remove(ctx_ql, ctx, dump_link); +} + +static void +prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql) +{ + + malloc_mutex_lock(ctx->lock); + prof_dump_ctx_cleanup_locked(ctx, ctx_ql); + malloc_mutex_unlock(ctx->lock); +} + +static bool +prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt, + prof_ctx_list_t *ctx_ql) +{ + bool ret; unsigned i; cassert(config_prof); @@ -764,36 +888,49 @@ prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt) * filled in. Avoid dumping any ctx that is an artifact of either * implementation detail. */ + malloc_mutex_lock(ctx->lock); if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) || (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) { assert(ctx->cnt_summed.curobjs == 0); assert(ctx->cnt_summed.curbytes == 0); assert(ctx->cnt_summed.accumobjs == 0); assert(ctx->cnt_summed.accumbytes == 0); - return (false); + ret = false; + goto label_return; } - if (prof_printf(propagate_err, "%"PRId64": %"PRId64 + if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64 " [%"PRIu64": %"PRIu64"] @", ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes, - ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) - return (true); - - for (i = 0; i < bt->len; i++) { - if (prof_printf(propagate_err, " %#"PRIxPTR, - (uintptr_t)bt->vec[i])) - return (true); + ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) { + ret = true; + goto label_return; } - if (prof_write(propagate_err, "\n")) - return (true); + for (i = 0; i < bt->len; i++) { + if (prof_dump_printf(propagate_err, " %#"PRIxPTR, + (uintptr_t)bt->vec[i])) { + ret = true; + goto label_return; + } + } - return (false); + if (prof_dump_write(propagate_err, "\n")) { + ret = true; + goto label_return; + } + + ret = false; +label_return: + prof_dump_ctx_cleanup_locked(ctx, ctx_ql); + malloc_mutex_unlock(ctx->lock); + return (ret); } static bool prof_dump_maps(bool propagate_err) { + bool ret; int mfd; char filename[PATH_MAX + 1]; @@ -805,25 +942,52 @@ prof_dump_maps(bool propagate_err) if (mfd != -1) { ssize_t nread; - if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && - propagate_err) - return (true); + if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && + propagate_err) { + ret = true; + goto label_return; + } nread = 0; do { prof_dump_buf_end += nread; if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { /* Make space in prof_dump_buf before read(). */ - if (prof_flush(propagate_err) && propagate_err) - return (true); + if (prof_dump_flush(propagate_err) && + propagate_err) { + ret = true; + goto label_return; + } } nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE - prof_dump_buf_end); } while (nread > 0); - close(mfd); - } else - return (true); + } else { + ret = true; + goto label_return; + } - return (false); + ret = false; +label_return: + if (mfd != -1) + close(mfd); + return (ret); +} + +static void +prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx, + const char *filename) +{ + + if (cnt_all->curbytes != 0) { + malloc_printf(": Leak summary: %"PRId64" byte%s, %" + PRId64" object%s, %zu context%s\n", + cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "", + cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "", + leak_nctx, (leak_nctx != 1) ? "s" : ""); + malloc_printf( + ": Run pprof on \"%s\" for leak detail\n", + filename); + } } static bool @@ -832,99 +996,75 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck) prof_tdata_t *prof_tdata; prof_cnt_t cnt_all; size_t tabind; - union { - prof_bt_t *p; - void *v; - } bt; union { prof_ctx_t *p; void *v; } ctx; size_t leak_nctx; + prof_ctx_list_t ctx_ql; cassert(config_prof); prof_tdata = prof_tdata_get(false); if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) return (true); - prof_enter(prof_tdata); - prof_dump_fd = creat(filename, 0644); - if (prof_dump_fd == -1) { - if (propagate_err == false) { - malloc_printf( - ": creat(\"%s\"), 0644) failed\n", - filename); - if (opt_abort) - abort(); - } - goto label_error; - } + + malloc_mutex_lock(&prof_dump_mtx); /* Merge per thread profile stats, and sum them in cnt_all. */ memset(&cnt_all, 0, sizeof(prof_cnt_t)); leak_nctx = 0; + ql_new(&ctx_ql); + prof_enter(prof_tdata); for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;) - prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx); + prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql); + prof_leave(prof_tdata); + + /* Create dump file. */ + if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) + goto label_open_close_error; /* Dump profile header. */ - if (opt_lg_prof_sample == 0) { - if (prof_printf(propagate_err, - "heap profile: %"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @ heapprofile\n", - cnt_all.curobjs, cnt_all.curbytes, - cnt_all.accumobjs, cnt_all.accumbytes)) - goto label_error; - } else { - if (prof_printf(propagate_err, - "heap profile: %"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n", - cnt_all.curobjs, cnt_all.curbytes, - cnt_all.accumobjs, cnt_all.accumbytes, - ((uint64_t)1U << opt_lg_prof_sample))) - goto label_error; - } + if (prof_dump_header(propagate_err, &cnt_all)) + goto label_write_error; - /* Dump per ctx profile stats. */ - for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v) - == false;) { - if (prof_dump_ctx(propagate_err, ctx.p, bt.p)) - goto label_error; + /* Dump per ctx profile stats. */ + while ((ctx.p = ql_first(&ctx_ql)) != NULL) { + if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql)) + goto label_write_error; } /* Dump /proc//maps if possible. */ if (prof_dump_maps(propagate_err)) - goto label_error; + goto label_write_error; - if (prof_flush(propagate_err)) - goto label_error; - close(prof_dump_fd); - prof_leave(prof_tdata); + if (prof_dump_close(propagate_err)) + goto label_open_close_error; - if (leakcheck && cnt_all.curbytes != 0) { - malloc_printf(": Leak summary: %"PRId64" byte%s, %" - PRId64" object%s, %zu context%s\n", - cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "", - cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "", - leak_nctx, (leak_nctx != 1) ? "s" : ""); - malloc_printf( - ": Run pprof on \"%s\" for leak detail\n", - filename); - } + malloc_mutex_unlock(&prof_dump_mtx); + + if (leakcheck) + prof_leakcheck(&cnt_all, leak_nctx, filename); return (false); -label_error: - prof_leave(prof_tdata); +label_write_error: + prof_dump_close(propagate_err); +label_open_close_error: + while ((ctx.p = ql_first(&ctx_ql)) != NULL) + prof_dump_ctx_cleanup(ctx.p, &ctx_ql); + malloc_mutex_unlock(&prof_dump_mtx); return (true); } #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) static void prof_dump_filename(char *filename, char v, int64_t vseq) { cassert(config_prof); - if (vseq != UINT64_C(0xffffffffffffffff)) { + if (vseq != VSEQ_INVALID) { /* "...v.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"PRIu64".%c%"PRId64".heap", @@ -950,7 +1090,7 @@ prof_fdump(void) if (opt_prof_final && opt_prof_prefix[0] != '\0') { malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff)); + prof_dump_filename(filename, 'f', VSEQ_INVALID); malloc_mutex_unlock(&prof_dump_seq_mtx); prof_dump(false, filename, opt_prof_leak); } @@ -1056,14 +1196,6 @@ prof_bt_keycomp(const void *k1, const void *k2) return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } -static malloc_mutex_t * -prof_ctx_mutex_choose(void) -{ - unsigned nctxs = atomic_add_u(&cum_ctxs, 1); - - return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]); -} - prof_tdata_t * prof_tdata_init(void) { @@ -1208,6 +1340,8 @@ prof_boot2(void) if (malloc_mutex_init(&prof_dump_seq_mtx)) return (true); + if (malloc_mutex_init(&prof_dump_mtx)) + return (true); if (atexit(prof_fdump) != 0) { malloc_write(": Error in atexit()\n"); @@ -1245,10 +1379,10 @@ prof_prefork(void) if (opt_prof) { unsigned i; - malloc_mutex_lock(&bt2ctx_mtx); - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_prefork(&bt2ctx_mtx); + malloc_mutex_prefork(&prof_dump_seq_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_lock(&ctx_locks[i]); + malloc_mutex_prefork(&ctx_locks[i]); } } diff --git a/src/quarantine.c b/src/quarantine.c index f96a948d..54315116 100644 --- a/src/quarantine.c +++ b/src/quarantine.c @@ -141,8 +141,17 @@ quarantine(void *ptr) obj->usize = usize; quarantine->curbytes += usize; quarantine->curobjs++; - if (opt_junk) - memset(ptr, 0x5a, usize); + if (config_fill && opt_junk) { + /* + * Only do redzone validation if Valgrind isn't in + * operation. + */ + if ((config_valgrind == false || opt_valgrind == false) + && usize <= SMALL_MAXCLASS) + arena_quarantine_junk_small(ptr, usize); + else + memset(ptr, 0x5a, usize); + } } else { assert(quarantine->curbytes == 0); idalloc(ptr); diff --git a/src/rtree.c b/src/rtree.c index 90c6935a..205957ac 100644 --- a/src/rtree.c +++ b/src/rtree.c @@ -2,42 +2,55 @@ #include "jemalloc/internal/jemalloc_internal.h" rtree_t * -rtree_new(unsigned bits) +rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc) { rtree_t *ret; - unsigned bits_per_level, height, i; + unsigned bits_per_level, bits_in_leaf, height, i; + + assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1; - height = bits / bits_per_level; - if (height * bits_per_level != bits) - height++; - assert(height * bits_per_level >= bits); + bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1; + if (bits > bits_in_leaf) { + height = 1 + (bits - bits_in_leaf) / bits_per_level; + if ((height-1) * bits_per_level + bits_in_leaf != bits) + height++; + } else { + height = 1; + } + assert((height-1) * bits_per_level + bits_in_leaf >= bits); - ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) + + ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) + (sizeof(unsigned) * height)); if (ret == NULL) return (NULL); memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) * height)); + ret->alloc = alloc; + ret->dalloc = dalloc; if (malloc_mutex_init(&ret->mutex)) { - /* Leak the rtree. */ + if (dalloc != NULL) + dalloc(ret); return (NULL); } ret->height = height; - if (bits_per_level * height > bits) - ret->level2bits[0] = bits % bits_per_level; - else - ret->level2bits[0] = bits_per_level; - for (i = 1; i < height; i++) - ret->level2bits[i] = bits_per_level; + if (height > 1) { + if ((height-1) * bits_per_level + bits_in_leaf > bits) { + ret->level2bits[0] = (bits - bits_in_leaf) % + bits_per_level; + } else + ret->level2bits[0] = bits_per_level; + for (i = 1; i < height-1; i++) + ret->level2bits[i] = bits_per_level; + ret->level2bits[height-1] = bits_in_leaf; + } else + ret->level2bits[0] = bits; - ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]); + ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]); if (ret->root == NULL) { - /* - * We leak the rtree here, since there's no generic base - * deallocation. - */ + if (dalloc != NULL) + dalloc(ret); return (NULL); } memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]); @@ -45,6 +58,31 @@ rtree_new(unsigned bits) return (ret); } +static void +rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level) +{ + + if (level < rtree->height - 1) { + size_t nchildren, i; + + nchildren = ZU(1) << rtree->level2bits[level]; + for (i = 0; i < nchildren; i++) { + void **child = (void **)node[i]; + if (child != NULL) + rtree_delete_subtree(rtree, child, level + 1); + } + } + rtree->dalloc(node); +} + +void +rtree_delete(rtree_t *rtree) +{ + + rtree_delete_subtree(rtree, rtree->root, 0); + rtree->dalloc(rtree); +} + void rtree_prefork(rtree_t *rtree) { diff --git a/src/stats.c b/src/stats.c index 43f87af6..bef2ab33 100644 --- a/src/stats.c +++ b/src/stats.c @@ -345,25 +345,25 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", bv ? "enabled" : "disabled"); -#define OPT_WRITE_BOOL(n) \ +#define OPT_WRITE_BOOL(n) \ if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %s\n", bv ? "true" : "false"); \ } -#define OPT_WRITE_SIZE_T(n) \ +#define OPT_WRITE_SIZE_T(n) \ if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zu\n", sv); \ } -#define OPT_WRITE_SSIZE_T(n) \ +#define OPT_WRITE_SSIZE_T(n) \ if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zd\n", ssv); \ } -#define OPT_WRITE_CHAR_P(n) \ +#define OPT_WRITE_CHAR_P(n) \ if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ diff --git a/src/tcache.c b/src/tcache.c index 98ed19ed..6de92960 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -260,8 +260,8 @@ tcache_arena_dissociate(tcache_t *tcache) /* Unlink from list of extant tcaches. */ malloc_mutex_lock(&tcache->arena->lock); ql_remove(&tcache->arena->tcache_ql, tcache, link); - malloc_mutex_unlock(&tcache->arena->lock); tcache_stats_merge(tcache, tcache->arena); + malloc_mutex_unlock(&tcache->arena->lock); } } @@ -292,7 +292,7 @@ tcache_create(arena_t *arena) else if (size <= tcache_maxclass) tcache = (tcache_t *)arena_malloc_large(arena, size, true); else - tcache = (tcache_t *)icallocx(size, false, arena); + tcache = (tcache_t *)icalloct(size, false, arena); if (tcache == NULL) return (NULL); @@ -366,7 +366,7 @@ tcache_destroy(tcache_t *tcache) arena_dalloc_large(arena, chunk, tcache); } else - idallocx(tcache, false); + idalloct(tcache, false); } void @@ -399,11 +399,14 @@ tcache_thread_cleanup(void *arg) } } +/* Caller must own arena->lock. */ void tcache_stats_merge(tcache_t *tcache, arena_t *arena) { unsigned i; + cassert(config_stats); + /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; diff --git a/src/tsd.c b/src/tsd.c index 961a5463..700caabf 100644 --- a/src/tsd.c +++ b/src/tsd.c @@ -21,7 +21,7 @@ void malloc_tsd_dalloc(void *wrapper) { - idalloc(wrapper); + idalloct(wrapper, false); } void @@ -105,3 +105,37 @@ JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; #endif + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +void * +tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) +{ + pthread_t self = pthread_self(); + tsd_init_block_t *iter; + + /* Check whether this thread has already inserted into the list. */ + malloc_mutex_lock(&head->lock); + ql_foreach(iter, &head->blocks, link) { + if (iter->thread == self) { + malloc_mutex_unlock(&head->lock); + return (iter->data); + } + } + /* Insert block into list. */ + ql_elm_new(block, link); + block->thread = self; + ql_tail_insert(&head->blocks, block, link); + malloc_mutex_unlock(&head->lock); + return (NULL); +} + +void +tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) +{ + + malloc_mutex_lock(&head->lock); + ql_remove(&head->blocks, block, link); + malloc_mutex_unlock(&head->lock); +} +#endif diff --git a/src/util.c b/src/util.c index b3a01143..93a19fd1 100644 --- a/src/util.c +++ b/src/util.c @@ -77,7 +77,7 @@ malloc_write(const char *s) * provide a wrapper. */ int -buferror(char *buf, size_t buflen) +buferror(int err, char *buf, size_t buflen) { #ifdef _WIN32 @@ -85,34 +85,36 @@ buferror(char *buf, size_t buflen) (LPSTR)buf, buflen, NULL); return (0); #elif defined(_GNU_SOURCE) - char *b = strerror_r(errno, buf, buflen); + char *b = strerror_r(err, buf, buflen); if (b != buf) { strncpy(buf, b, buflen); buf[buflen-1] = '\0'; } return (0); #else - return (strerror_r(errno, buf, buflen)); + return (strerror_r(err, buf, buflen)); #endif } uintmax_t -malloc_strtoumax(const char *nptr, char **endptr, int base) +malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { uintmax_t ret, digit; int b; bool neg; const char *p, *ns; + p = nptr; if (base < 0 || base == 1 || base > 36) { + ns = p; set_errno(EINVAL); - return (UINTMAX_MAX); + ret = UINTMAX_MAX; + goto label_return; } b = base; /* Swallow leading whitespace and get sign, if any. */ neg = false; - p = nptr; while (true) { switch (*p) { case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': @@ -146,7 +148,7 @@ malloc_strtoumax(const char *nptr, char **endptr, int base) if (b == 8) p++; break; - case 'x': + case 'X': case 'x': switch (p[2]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': @@ -164,7 +166,9 @@ malloc_strtoumax(const char *nptr, char **endptr, int base) } break; default: - break; + p++; + ret = 0; + goto label_return; } } if (b == 0) @@ -181,13 +185,22 @@ malloc_strtoumax(const char *nptr, char **endptr, int base) if (ret < pret) { /* Overflow. */ set_errno(ERANGE); - return (UINTMAX_MAX); + ret = UINTMAX_MAX; + goto label_return; } p++; } if (neg) ret = -ret; + if (p == ns) { + /* No conversion performed. */ + set_errno(EINVAL); + ret = UINTMAX_MAX; + goto label_return; + } + +label_return: if (endptr != NULL) { if (p == ns) { /* No characters were converted. */ @@ -195,7 +208,6 @@ malloc_strtoumax(const char *nptr, char **endptr, int base) } else *endptr = (char *)p; } - return (ret); } @@ -331,7 +343,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) APPEND_C(' '); \ } \ } while (0) -#define GET_ARG_NUMERIC(val, len) do { \ +#define GET_ARG_NUMERIC(val, len) do { \ switch (len) { \ case '?': \ val = va_arg(ap, int); \ @@ -354,6 +366,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) case 'j': \ val = va_arg(ap, intmax_t); \ break; \ + case 'j' | 0x80: \ + val = va_arg(ap, uintmax_t); \ + break; \ case 't': \ val = va_arg(ap, ptrdiff_t); \ break; \ @@ -385,11 +400,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) unsigned char len = '?'; f++; - if (*f == '%') { - /* %% */ - APPEND_C(*f); - break; - } /* Flags. */ while (true) { switch (*f) { @@ -419,6 +429,10 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) case '*': width = va_arg(ap, int); f++; + if (width < 0) { + left_justify = true; + width = -width; + } break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { @@ -428,19 +442,16 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) assert(uwidth != UINTMAX_MAX || get_errno() != ERANGE); width = (int)uwidth; - if (*f == '.') { - f++; - goto label_precision; - } else - goto label_length; break; - } case '.': - f++; - goto label_precision; - default: goto label_length; + } default: + break; } + /* Width/precision separator. */ + if (*f == '.') + f++; + else + goto label_length; /* Precision. */ - label_precision: switch (*f) { case '*': prec = va_arg(ap, int); @@ -469,16 +480,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) } else len = 'l'; break; - case 'j': - len = 'j'; - f++; - break; - case 't': - len = 't'; - f++; - break; - case 'z': - len = 'z'; + case 'q': case 'j': case 't': case 'z': + len = *f; f++; break; default: break; @@ -487,6 +490,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) switch (*f) { char *s; size_t slen; + case '%': + /* %% */ + APPEND_C(*f); + f++; + break; case 'd': case 'i': { intmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[D2S_BUFSIZE]; @@ -540,7 +548,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); s = va_arg(ap, char *); - slen = (prec == -1) ? strlen(s) : prec; + slen = (prec < 0) ? strlen(s) : prec; APPEND_PADDED_S(s, slen, width, left_justify); f++; break; @@ -553,8 +561,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) APPEND_PADDED_S(s, slen, width, left_justify); f++; break; - } - default: not_implemented(); + } default: not_reached(); } break; } default: { diff --git a/src/zone.c b/src/zone.c index c62c183f..e0302ef4 100644 --- a/src/zone.c +++ b/src/zone.c @@ -137,7 +137,7 @@ zone_destroy(malloc_zone_t *zone) { /* This function should never be called. */ - assert(false); + not_reached(); return (NULL); } diff --git a/test/ALLOCM_ARENA.c b/test/ALLOCM_ARENA.c deleted file mode 100644 index ca91b621..00000000 --- a/test/ALLOCM_ARENA.c +++ /dev/null @@ -1,68 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -#define NTHREADS 10 - -void * -je_thread_start(void *arg) -{ - unsigned thread_ind = (unsigned)(uintptr_t)arg; - unsigned arena_ind; - int r; - void *p; - size_t rsz, sz; - - sz = sizeof(arena_ind); - if (mallctl("arenas.extend", &arena_ind, &sz, NULL, 0) - != 0) { - malloc_printf("Error in arenas.extend\n"); - abort(); - } - - if (thread_ind % 4 != 3) { - size_t mib[3]; - size_t miblen = sizeof(mib) / sizeof(size_t); - const char *dss_precs[] = {"disabled", "primary", "secondary"}; - const char *dss = dss_precs[thread_ind % - (sizeof(dss_precs)/sizeof(char*))]; - if (mallctlnametomib("arena.0.dss", mib, &miblen) != 0) { - malloc_printf("Error in mallctlnametomib()\n"); - abort(); - } - mib[1] = arena_ind; - if (mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, - sizeof(const char *))) { - malloc_printf("Error in mallctlbymib()\n"); - abort(); - } - } - - r = allocm(&p, &rsz, 1, ALLOCM_ARENA(arena_ind)); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected allocm() error\n"); - abort(); - } - dallocm(p, 0); - - return (NULL); -} - -int -main(void) -{ - je_thread_t threads[NTHREADS]; - unsigned i; - - malloc_printf("Test begin\n"); - - for (i = 0; i < NTHREADS; i++) { - je_thread_create(&threads[i], je_thread_start, - (void *)(uintptr_t)i); - } - - for (i = 0; i < NTHREADS; i++) - je_thread_join(threads[i], NULL); - - malloc_printf("Test end\n"); - return (0); -} diff --git a/test/ALLOCM_ARENA.exp b/test/ALLOCM_ARENA.exp deleted file mode 100644 index 369a88dd..00000000 --- a/test/ALLOCM_ARENA.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/test/aligned_alloc.exp b/test/aligned_alloc.exp deleted file mode 100644 index b5061c72..00000000 --- a/test/aligned_alloc.exp +++ /dev/null @@ -1,25 +0,0 @@ -Test begin -Alignment: 8 -Alignment: 16 -Alignment: 32 -Alignment: 64 -Alignment: 128 -Alignment: 256 -Alignment: 512 -Alignment: 1024 -Alignment: 2048 -Alignment: 4096 -Alignment: 8192 -Alignment: 16384 -Alignment: 32768 -Alignment: 65536 -Alignment: 131072 -Alignment: 262144 -Alignment: 524288 -Alignment: 1048576 -Alignment: 2097152 -Alignment: 4194304 -Alignment: 8388608 -Alignment: 16777216 -Alignment: 33554432 -Test end diff --git a/test/allocated.c b/test/allocated.c deleted file mode 100644 index b1a9cfd9..00000000 --- a/test/allocated.c +++ /dev/null @@ -1,118 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -void * -je_thread_start(void *arg) -{ - int err; - void *p; - uint64_t a0, a1, d0, d1; - uint64_t *ap0, *ap1, *dp0, *dp1; - size_t sz, usize; - - sz = sizeof(a0); - if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) { - if (err == ENOENT) { -#ifdef JEMALLOC_STATS - assert(false); -#endif - goto label_return; - } - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - exit(1); - } - sz = sizeof(ap0); - if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) { - if (err == ENOENT) { -#ifdef JEMALLOC_STATS - assert(false); -#endif - goto label_return; - } - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - exit(1); - } - assert(*ap0 == a0); - - sz = sizeof(d0); - if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) { - if (err == ENOENT) { -#ifdef JEMALLOC_STATS - assert(false); -#endif - goto label_return; - } - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - exit(1); - } - sz = sizeof(dp0); - if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) { - if (err == ENOENT) { -#ifdef JEMALLOC_STATS - assert(false); -#endif - goto label_return; - } - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - exit(1); - } - assert(*dp0 == d0); - - p = malloc(1); - if (p == NULL) { - malloc_printf("%s(): Error in malloc()\n", __func__); - exit(1); - } - - sz = sizeof(a1); - mallctl("thread.allocated", &a1, &sz, NULL, 0); - sz = sizeof(ap1); - mallctl("thread.allocatedp", &ap1, &sz, NULL, 0); - assert(*ap1 == a1); - assert(ap0 == ap1); - - usize = malloc_usable_size(p); - assert(a0 + usize <= a1); - - free(p); - - sz = sizeof(d1); - mallctl("thread.deallocated", &d1, &sz, NULL, 0); - sz = sizeof(dp1); - mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0); - assert(*dp1 == d1); - assert(dp0 == dp1); - - assert(d0 + usize <= d1); - -label_return: - return (NULL); -} - -int -main(void) -{ - int ret = 0; - je_thread_t thread; - - malloc_printf("Test begin\n"); - - je_thread_start(NULL); - - je_thread_create(&thread, je_thread_start, NULL); - je_thread_join(thread, NULL); - - je_thread_start(NULL); - - je_thread_create(&thread, je_thread_start, NULL); - je_thread_join(thread, NULL); - - je_thread_start(NULL); - - malloc_printf("Test end\n"); - return (ret); -} diff --git a/test/allocated.exp b/test/allocated.exp deleted file mode 100644 index 369a88dd..00000000 --- a/test/allocated.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/test/allocm.c b/test/allocm.c deleted file mode 100644 index 80be673b..00000000 --- a/test/allocm.c +++ /dev/null @@ -1,194 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -#define CHUNK 0x400000 -/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 - -int -main(void) -{ - int r; - void *p; - size_t nsz, rsz, sz, alignment, total; - unsigned i; - void *ps[NITER]; - - malloc_printf("Test begin\n"); - - sz = 42; - nsz = 0; - r = nallocm(&nsz, sz, 0); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected nallocm() error\n"); - abort(); - } - rsz = 0; - r = allocm(&p, &rsz, sz, 0); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected allocm() error\n"); - abort(); - } - if (rsz < sz) - malloc_printf("Real size smaller than expected\n"); - if (nsz != rsz) - malloc_printf("nallocm()/allocm() rsize mismatch\n"); - if (dallocm(p, 0) != ALLOCM_SUCCESS) - malloc_printf("Unexpected dallocm() error\n"); - - r = allocm(&p, NULL, sz, 0); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected allocm() error\n"); - abort(); - } - if (dallocm(p, 0) != ALLOCM_SUCCESS) - malloc_printf("Unexpected dallocm() error\n"); - - nsz = 0; - r = nallocm(&nsz, sz, ALLOCM_ZERO); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected nallocm() error\n"); - abort(); - } - rsz = 0; - r = allocm(&p, &rsz, sz, ALLOCM_ZERO); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected allocm() error\n"); - abort(); - } - if (nsz != rsz) - malloc_printf("nallocm()/allocm() rsize mismatch\n"); - if (dallocm(p, 0) != ALLOCM_SUCCESS) - malloc_printf("Unexpected dallocm() error\n"); - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x8000000000000000); - sz = UINT64_C(0x8000000000000000); -#else - alignment = 0x80000000LU; - sz = 0x80000000LU; -#endif - nsz = 0; - r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)); - if (r == ALLOCM_SUCCESS) { - malloc_printf( - "Expected error for nallocm(&nsz, %zu, %#x)\n", - sz, ALLOCM_ALIGN(alignment)); - } - rsz = 0; - r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)); - if (r == ALLOCM_SUCCESS) { - malloc_printf( - "Expected error for allocm(&p, %zu, %#x)\n", - sz, ALLOCM_ALIGN(alignment)); - } - if (nsz != rsz) - malloc_printf("nallocm()/allocm() rsize mismatch\n"); - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x4000000000000000); - sz = UINT64_C(0x8400000000000001); -#else - alignment = 0x40000000LU; - sz = 0x84000001LU; -#endif - nsz = 0; - r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected nallocm() error\n"); - rsz = 0; - r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)); - if (r == ALLOCM_SUCCESS) { - malloc_printf( - "Expected error for allocm(&p, %zu, %#x)\n", - sz, ALLOCM_ALIGN(alignment)); - } - - alignment = 0x10LU; -#if LG_SIZEOF_PTR == 3 - sz = UINT64_C(0xfffffffffffffff0); -#else - sz = 0xfffffff0LU; -#endif - nsz = 0; - r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)); - if (r == ALLOCM_SUCCESS) { - malloc_printf( - "Expected error for nallocm(&nsz, %zu, %#x)\n", - sz, ALLOCM_ALIGN(alignment)); - } - rsz = 0; - r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)); - if (r == ALLOCM_SUCCESS) { - malloc_printf( - "Expected error for allocm(&p, %zu, %#x)\n", - sz, ALLOCM_ALIGN(alignment)); - } - if (nsz != rsz) - malloc_printf("nallocm()/allocm() rsize mismatch\n"); - - for (i = 0; i < NITER; i++) - ps[i] = NULL; - - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { - total = 0; - malloc_printf("Alignment: %zu\n", alignment); - for (sz = 1; - sz < 3 * alignment && sz < (1U << 31); - sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { - for (i = 0; i < NITER; i++) { - nsz = 0; - r = nallocm(&nsz, sz, - ALLOCM_ALIGN(alignment) | ALLOCM_ZERO); - if (r != ALLOCM_SUCCESS) { - malloc_printf( - "nallocm() error for size %zu" - " (%#zx): %d\n", - sz, sz, r); - exit(1); - } - rsz = 0; - r = allocm(&ps[i], &rsz, sz, - ALLOCM_ALIGN(alignment) | ALLOCM_ZERO); - if (r != ALLOCM_SUCCESS) { - malloc_printf( - "allocm() error for size %zu" - " (%#zx): %d\n", - sz, sz, r); - exit(1); - } - if (rsz < sz) { - malloc_printf( - "Real size smaller than" - " expected\n"); - } - if (nsz != rsz) { - malloc_printf( - "nallocm()/allocm() rsize" - " mismatch\n"); - } - if ((uintptr_t)p & (alignment-1)) { - malloc_printf( - "%p inadequately aligned for" - " alignment: %zu\n", p, alignment); - } - sallocm(ps[i], &rsz, 0); - total += rsz; - if (total >= (MAXALIGN << 1)) - break; - } - for (i = 0; i < NITER; i++) { - if (ps[i] != NULL) { - dallocm(ps[i], 0); - ps[i] = NULL; - } - } - } - } - - malloc_printf("Test end\n"); - return (0); -} diff --git a/test/allocm.exp b/test/allocm.exp deleted file mode 100644 index b5061c72..00000000 --- a/test/allocm.exp +++ /dev/null @@ -1,25 +0,0 @@ -Test begin -Alignment: 8 -Alignment: 16 -Alignment: 32 -Alignment: 64 -Alignment: 128 -Alignment: 256 -Alignment: 512 -Alignment: 1024 -Alignment: 2048 -Alignment: 4096 -Alignment: 8192 -Alignment: 16384 -Alignment: 32768 -Alignment: 65536 -Alignment: 131072 -Alignment: 262144 -Alignment: 524288 -Alignment: 1048576 -Alignment: 2097152 -Alignment: 4194304 -Alignment: 8388608 -Alignment: 16777216 -Alignment: 33554432 -Test end diff --git a/test/bitmap.exp b/test/bitmap.exp deleted file mode 100644 index 369a88dd..00000000 --- a/test/bitmap.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/test/include/test/SFMT-alti.h b/test/include/test/SFMT-alti.h new file mode 100644 index 00000000..2f86f67d --- /dev/null +++ b/test/include/test/SFMT-alti.h @@ -0,0 +1,186 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT-alti.h + * + * @brief SIMD oriented Fast Mersenne Twister(SFMT) + * pseudorandom number generator + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software. + * see LICENSE.txt + */ + +#ifndef SFMT_ALTI_H +#define SFMT_ALTI_H + +/** + * This function represents the recursion formula in AltiVec and BIG ENDIAN. + * @param a a 128-bit part of the interal state array + * @param b a 128-bit part of the interal state array + * @param c a 128-bit part of the interal state array + * @param d a 128-bit part of the interal state array + * @return output + */ +JEMALLOC_ALWAYS_INLINE +static vector unsigned int vec_recursion(vector unsigned int a, + vector unsigned int b, + vector unsigned int c, + vector unsigned int d) { + + const vector unsigned int sl1 = ALTI_SL1; + const vector unsigned int sr1 = ALTI_SR1; +#ifdef ONLY64 + const vector unsigned int mask = ALTI_MSK64; + const vector unsigned char perm_sl = ALTI_SL2_PERM64; + const vector unsigned char perm_sr = ALTI_SR2_PERM64; +#else + const vector unsigned int mask = ALTI_MSK; + const vector unsigned char perm_sl = ALTI_SL2_PERM; + const vector unsigned char perm_sr = ALTI_SR2_PERM; +#endif + vector unsigned int v, w, x, y, z; + x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); + v = a; + y = vec_sr(b, sr1); + z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); + w = vec_sl(d, sl1); + z = vec_xor(z, w); + y = vec_and(y, mask); + v = vec_xor(v, x); + z = vec_xor(z, y); + z = vec_xor(z, v); + return z; +} + +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { + int i; + vector unsigned int r, r1, r2; + + r1 = ctx->sfmt[N - 2].s; + r2 = ctx->sfmt[N - 1].s; + for (i = 0; i < N - POS1; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); + ctx->sfmt[i].s = r; + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); + ctx->sfmt[i].s = r; + r1 = r2; + r2 = r; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pesudorandom numbers to be generated. + */ +JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + vector unsigned int r, r1, r2; + + r1 = ctx->sfmt[N - 2].s; + r2 = ctx->sfmt[N - 1].s; + for (i = 0; i < N - POS1; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + /* main loop */ + for (; i < size - N; i++) { + r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + for (j = 0; j < 2 * N - size; j++) { + ctx->sfmt[j].s = array[j + size - N].s; + } + for (; i < size; i++) { + r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + ctx->sfmt[j++].s = r; + r1 = r2; + r2 = r; + } +} + +#ifndef ONLY64 +#if defined(__APPLE__) +#define ALTI_SWAP (vector unsigned char) \ + (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) +#else +#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} +#endif +/** + * This function swaps high and low 32-bit of 64-bit integers in user + * specified array. + * + * @param array an 128-bit array to be swaped. + * @param size size of 128-bit array. + */ +JEMALLOC_INLINE void swap(w128_t *array, int size) { + int i; + const vector unsigned char perm = ALTI_SWAP; + + for (i = 0; i < size; i++) { + array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); + } +} +#endif + +#endif diff --git a/test/include/test/SFMT-params.h b/test/include/test/SFMT-params.h new file mode 100644 index 00000000..ade66222 --- /dev/null +++ b/test/include/test/SFMT-params.h @@ -0,0 +1,132 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS_H +#define SFMT_PARAMS_H + +#if !defined(MEXP) +#ifdef __GNUC__ + #warning "MEXP is not defined. I assume MEXP is 19937." +#endif + #define MEXP 19937 +#endif +/*----------------- + BASIC DEFINITIONS + -----------------*/ +/** Mersenne Exponent. The period of the sequence + * is a multiple of 2^MEXP-1. + * #define MEXP 19937 */ +/** SFMT generator has an internal state array of 128-bit integers, + * and N is its size. */ +#define N (MEXP / 128 + 1) +/** N32 is the size of internal state array when regarded as an array + * of 32-bit integers.*/ +#define N32 (N * 4) +/** N64 is the size of internal state array when regarded as an array + * of 64-bit integers.*/ +#define N64 (N * 2) + +/*---------------------- + the parameters of SFMT + following definitions are in paramsXXXX.h file. + ----------------------*/ +/** the pick up position of the array. +#define POS1 122 +*/ + +/** the parameter of shift left as four 32-bit registers. +#define SL1 18 + */ + +/** the parameter of shift left as one 128-bit register. + * The 128-bit integer is shifted by (SL2 * 8) bits. +#define SL2 1 +*/ + +/** the parameter of shift right as four 32-bit registers. +#define SR1 11 +*/ + +/** the parameter of shift right as one 128-bit register. + * The 128-bit integer is shifted by (SL2 * 8) bits. +#define SR2 1 +*/ + +/** A bitmask, used in the recursion. These parameters are introduced + * to break symmetry of SIMD. +#define MSK1 0xdfffffefU +#define MSK2 0xddfecb7fU +#define MSK3 0xbffaffffU +#define MSK4 0xbffffff6U +*/ + +/** These definitions are part of a 128-bit period certification vector. +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0xc98e126aU +*/ + +#if MEXP == 607 + #include "test/SFMT-params607.h" +#elif MEXP == 1279 + #include "test/SFMT-params1279.h" +#elif MEXP == 2281 + #include "test/SFMT-params2281.h" +#elif MEXP == 4253 + #include "test/SFMT-params4253.h" +#elif MEXP == 11213 + #include "test/SFMT-params11213.h" +#elif MEXP == 19937 + #include "test/SFMT-params19937.h" +#elif MEXP == 44497 + #include "test/SFMT-params44497.h" +#elif MEXP == 86243 + #include "test/SFMT-params86243.h" +#elif MEXP == 132049 + #include "test/SFMT-params132049.h" +#elif MEXP == 216091 + #include "test/SFMT-params216091.h" +#else +#ifdef __GNUC__ + #error "MEXP is not valid." + #undef MEXP +#else + #undef MEXP +#endif + +#endif + +#endif /* SFMT_PARAMS_H */ diff --git a/test/include/test/SFMT-params11213.h b/test/include/test/SFMT-params11213.h new file mode 100644 index 00000000..2994bd21 --- /dev/null +++ b/test/include/test/SFMT-params11213.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS11213_H +#define SFMT_PARAMS11213_H + +#define POS1 68 +#define SL1 14 +#define SL2 3 +#define SR1 7 +#define SR2 3 +#define MSK1 0xeffff7fbU +#define MSK2 0xffffffefU +#define MSK3 0xdfdfbfffU +#define MSK4 0x7fffdbfdU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xe8148000U +#define PARITY4 0xd0c7afa3U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" + +#endif /* SFMT_PARAMS11213_H */ diff --git a/test/include/test/SFMT-params1279.h b/test/include/test/SFMT-params1279.h new file mode 100644 index 00000000..d7959f98 --- /dev/null +++ b/test/include/test/SFMT-params1279.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS1279_H +#define SFMT_PARAMS1279_H + +#define POS1 7 +#define SL1 14 +#define SL2 3 +#define SR1 5 +#define SR2 1 +#define MSK1 0xf7fefffdU +#define MSK2 0x7fefcfffU +#define MSK3 0xaff3ef3fU +#define MSK4 0xb5ffff7fU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x20000000U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" + +#endif /* SFMT_PARAMS1279_H */ diff --git a/test/include/test/SFMT-params132049.h b/test/include/test/SFMT-params132049.h new file mode 100644 index 00000000..a1dcec39 --- /dev/null +++ b/test/include/test/SFMT-params132049.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS132049_H +#define SFMT_PARAMS132049_H + +#define POS1 110 +#define SL1 19 +#define SL2 1 +#define SR1 21 +#define SR2 1 +#define MSK1 0xffffbb5fU +#define MSK2 0xfb6ebf95U +#define MSK3 0xfffefffaU +#define MSK4 0xcff77fffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xcb520000U +#define PARITY4 0xc7e91c7dU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" + +#endif /* SFMT_PARAMS132049_H */ diff --git a/test/include/test/SFMT-params19937.h b/test/include/test/SFMT-params19937.h new file mode 100644 index 00000000..fb92b4c9 --- /dev/null +++ b/test/include/test/SFMT-params19937.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS19937_H +#define SFMT_PARAMS19937_H + +#define POS1 122 +#define SL1 18 +#define SL2 1 +#define SR1 11 +#define SR2 1 +#define MSK1 0xdfffffefU +#define MSK2 0xddfecb7fU +#define MSK3 0xbffaffffU +#define MSK4 0xbffffff6U +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x13c9e684U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" + +#endif /* SFMT_PARAMS19937_H */ diff --git a/test/include/test/SFMT-params216091.h b/test/include/test/SFMT-params216091.h new file mode 100644 index 00000000..125ce282 --- /dev/null +++ b/test/include/test/SFMT-params216091.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS216091_H +#define SFMT_PARAMS216091_H + +#define POS1 627 +#define SL1 11 +#define SL2 3 +#define SR1 10 +#define SR2 1 +#define MSK1 0xbff7bff7U +#define MSK2 0xbfffffffU +#define MSK3 0xbffffa7fU +#define MSK4 0xffddfbfbU +#define PARITY1 0xf8000001U +#define PARITY2 0x89e80709U +#define PARITY3 0x3bd2b64bU +#define PARITY4 0x0c64b1e4U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" + +#endif /* SFMT_PARAMS216091_H */ diff --git a/test/include/test/SFMT-params2281.h b/test/include/test/SFMT-params2281.h new file mode 100644 index 00000000..0ef85c40 --- /dev/null +++ b/test/include/test/SFMT-params2281.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS2281_H +#define SFMT_PARAMS2281_H + +#define POS1 12 +#define SL1 19 +#define SL2 1 +#define SR1 5 +#define SR2 1 +#define MSK1 0xbff7ffbfU +#define MSK2 0xfdfffffeU +#define MSK3 0xf7ffef7fU +#define MSK4 0xf2f7cbbfU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x41dfa600U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" + +#endif /* SFMT_PARAMS2281_H */ diff --git a/test/include/test/SFMT-params4253.h b/test/include/test/SFMT-params4253.h new file mode 100644 index 00000000..9f07bc67 --- /dev/null +++ b/test/include/test/SFMT-params4253.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS4253_H +#define SFMT_PARAMS4253_H + +#define POS1 17 +#define SL1 20 +#define SL2 1 +#define SR1 7 +#define SR2 1 +#define MSK1 0x9f7bffffU +#define MSK2 0x9fffff5fU +#define MSK3 0x3efffffbU +#define MSK4 0xfffff7bbU +#define PARITY1 0xa8000001U +#define PARITY2 0xaf5390a3U +#define PARITY3 0xb740b3f8U +#define PARITY4 0x6c11486dU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" + +#endif /* SFMT_PARAMS4253_H */ diff --git a/test/include/test/SFMT-params44497.h b/test/include/test/SFMT-params44497.h new file mode 100644 index 00000000..85598fed --- /dev/null +++ b/test/include/test/SFMT-params44497.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS44497_H +#define SFMT_PARAMS44497_H + +#define POS1 330 +#define SL1 5 +#define SL2 3 +#define SR1 9 +#define SR2 3 +#define MSK1 0xeffffffbU +#define MSK2 0xdfbebfffU +#define MSK3 0xbfbf7befU +#define MSK4 0x9ffd7bffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xa3ac4000U +#define PARITY4 0xecc1327aU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" + +#endif /* SFMT_PARAMS44497_H */ diff --git a/test/include/test/SFMT-params607.h b/test/include/test/SFMT-params607.h new file mode 100644 index 00000000..bc76485f --- /dev/null +++ b/test/include/test/SFMT-params607.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS607_H +#define SFMT_PARAMS607_H + +#define POS1 2 +#define SL1 15 +#define SL2 3 +#define SR1 13 +#define SR2 3 +#define MSK1 0xfdff37ffU +#define MSK2 0xef7f3f7dU +#define MSK3 0xff777b7dU +#define MSK4 0x7ff7fb2fU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x5986f054U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" + +#endif /* SFMT_PARAMS607_H */ diff --git a/test/include/test/SFMT-params86243.h b/test/include/test/SFMT-params86243.h new file mode 100644 index 00000000..5e4d783c --- /dev/null +++ b/test/include/test/SFMT-params86243.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS86243_H +#define SFMT_PARAMS86243_H + +#define POS1 366 +#define SL1 6 +#define SL2 7 +#define SR1 19 +#define SR2 1 +#define MSK1 0xfdbffbffU +#define MSK2 0xbff7ff3fU +#define MSK3 0xfd77efffU +#define MSK4 0xbf9ff3ffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0xe9528d85U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} + #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" + +#endif /* SFMT_PARAMS86243_H */ diff --git a/test/include/test/SFMT-sse2.h b/test/include/test/SFMT-sse2.h new file mode 100644 index 00000000..0314a163 --- /dev/null +++ b/test/include/test/SFMT-sse2.h @@ -0,0 +1,157 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT-sse2.h + * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * @note We assume LITTLE ENDIAN in this file + * + * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software, see LICENSE.txt + */ + +#ifndef SFMT_SSE2_H +#define SFMT_SSE2_H + +/** + * This function represents the recursion formula. + * @param a a 128-bit part of the interal state array + * @param b a 128-bit part of the interal state array + * @param c a 128-bit part of the interal state array + * @param d a 128-bit part of the interal state array + * @param mask 128-bit mask + * @return output + */ +JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, + __m128i c, __m128i d, __m128i mask) { + __m128i v, x, y, z; + + x = _mm_load_si128(a); + y = _mm_srli_epi32(*b, SR1); + z = _mm_srli_si128(c, SR2); + v = _mm_slli_epi32(d, SL1); + z = _mm_xor_si128(z, x); + z = _mm_xor_si128(z, v); + x = _mm_slli_si128(x, SL2); + y = _mm_and_si128(y, mask); + z = _mm_xor_si128(z, x); + z = _mm_xor_si128(z, y); + return z; +} + +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { + int i; + __m128i r, r1, r2, mask; + mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); + + r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); + r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); + for (i = 0; i < N - POS1; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, + mask); + _mm_store_si128(&ctx->sfmt[i].si, r); + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&ctx->sfmt[i].si, r); + r1 = r2; + r2 = r; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pesudorandom numbers to be generated. + */ +JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + __m128i r, r1, r2, mask; + mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); + + r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); + r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); + for (i = 0; i < N - POS1; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + /* main loop */ + for (; i < size - N; i++) { + r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + for (j = 0; j < 2 * N - size; j++) { + r = _mm_load_si128(&array[j + size - N].si); + _mm_store_si128(&ctx->sfmt[j].si, r); + } + for (; i < size; i++) { + r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + _mm_store_si128(&ctx->sfmt[j++].si, r); + r1 = r2; + r2 = r; + } +} + +#endif diff --git a/test/include/test/SFMT.h b/test/include/test/SFMT.h new file mode 100644 index 00000000..09c1607d --- /dev/null +++ b/test/include/test/SFMT.h @@ -0,0 +1,171 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT.h + * + * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom + * number generator + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software. + * see LICENSE.txt + * + * @note We assume that your system has inttypes.h. If your system + * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t, + * and you have to define PRIu64 and PRIx64 in this file as follows: + * @verbatim + typedef unsigned int uint32_t + typedef unsigned long long uint64_t + #define PRIu64 "llu" + #define PRIx64 "llx" +@endverbatim + * uint32_t must be exactly 32-bit unsigned integer type (no more, no + * less), and uint64_t must be exactly 64-bit unsigned integer type. + * PRIu64 and PRIx64 are used for printf function to print 64-bit + * unsigned int and 64-bit unsigned int in hexadecimal format. + */ + +#ifndef SFMT_H +#define SFMT_H + +typedef struct sfmt_s sfmt_t; + +uint32_t gen_rand32(sfmt_t *ctx); +uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); +uint64_t gen_rand64(sfmt_t *ctx); +uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); +void fill_array32(sfmt_t *ctx, uint32_t *array, int size); +void fill_array64(sfmt_t *ctx, uint64_t *array, int size); +sfmt_t *init_gen_rand(uint32_t seed); +sfmt_t *init_by_array(uint32_t *init_key, int key_length); +void fini_gen_rand(sfmt_t *ctx); +const char *get_idstring(void); +int get_min_array_size32(void); +int get_min_array_size64(void); + +#ifndef JEMALLOC_ENABLE_INLINE +double to_real1(uint32_t v); +double genrand_real1(sfmt_t *ctx); +double to_real2(uint32_t v); +double genrand_real2(sfmt_t *ctx); +double to_real3(uint32_t v); +double genrand_real3(sfmt_t *ctx); +double to_res53(uint64_t v); +double to_res53_mix(uint32_t x, uint32_t y); +double genrand_res53(sfmt_t *ctx); +double genrand_res53_mix(sfmt_t *ctx); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_)) +/* These real versions are due to Isaku Wada */ +/** generates a random number on [0,1]-real-interval */ +JEMALLOC_INLINE double to_real1(uint32_t v) +{ + return v * (1.0/4294967295.0); + /* divided by 2^32-1 */ +} + +/** generates a random number on [0,1]-real-interval */ +JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) +{ + return to_real1(gen_rand32(ctx)); +} + +/** generates a random number on [0,1)-real-interval */ +JEMALLOC_INLINE double to_real2(uint32_t v) +{ + return v * (1.0/4294967296.0); + /* divided by 2^32 */ +} + +/** generates a random number on [0,1)-real-interval */ +JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) +{ + return to_real2(gen_rand32(ctx)); +} + +/** generates a random number on (0,1)-real-interval */ +JEMALLOC_INLINE double to_real3(uint32_t v) +{ + return (((double)v) + 0.5)*(1.0/4294967296.0); + /* divided by 2^32 */ +} + +/** generates a random number on (0,1)-real-interval */ +JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) +{ + return to_real3(gen_rand32(ctx)); +} +/** These real versions are due to Isaku Wada */ + +/** generates a random number on [0,1) with 53-bit resolution*/ +JEMALLOC_INLINE double to_res53(uint64_t v) +{ + return v * (1.0/18446744073709551616.0L); +} + +/** generates a random number on [0,1) with 53-bit resolution from two + * 32 bit integers */ +JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) +{ + return to_res53(x | ((uint64_t)y << 32)); +} + +/** generates a random number on [0,1) with 53-bit resolution + */ +JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) +{ + return to_res53(gen_rand64(ctx)); +} + +/** generates a random number on [0,1) with 53-bit resolution + using 32bit integer. + */ +JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) +{ + uint32_t x, y; + + x = gen_rand32(ctx); + y = gen_rand32(ctx); + return to_res53_mix(x, y); +} +#endif +#endif diff --git a/test/include/test/jemalloc_test.h.in b/test/include/test/jemalloc_test.h.in new file mode 100644 index 00000000..730a55db --- /dev/null +++ b/test/include/test/jemalloc_test.h.in @@ -0,0 +1,141 @@ +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +# include +#else +# include +#endif + +/******************************************************************************/ +/* + * Define always-enabled assertion macros, so that test assertions execute even + * if assertions are disabled in the library code. These definitions must + * exist prior to including "jemalloc/internal/util.h". + */ +#define assert(e) do { \ + if (!(e)) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define not_implemented() do { \ + malloc_printf(": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define assert_not_implemented(e) do { \ + if (!(e)) \ + not_implemented(); \ +} while (0) + +#include "test/jemalloc_test_defs.h" + +#ifdef JEMALLOC_OSSPIN +# include +#endif + +#if defined(HAVE_ALTIVEC) && !defined(__APPLE__) +# include +#endif +#ifdef HAVE_SSE2 +# include +#endif + +/******************************************************************************/ +/* + * For unit tests, expose all public and private interfaces. + */ +#ifdef JEMALLOC_UNIT_TEST +# define JEMALLOC_JET +# define JEMALLOC_MANGLE +# include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* + * For integration tests, expose the public jemalloc interfaces, but only + * expose the minimum necessary internal utility code (to avoid re-implementing + * essentially identical code within the test infrastructure). + */ +#elif defined(JEMALLOC_INTEGRATION_TEST) +# define JEMALLOC_MANGLE +# include "jemalloc/jemalloc@install_suffix@.h" +# include "jemalloc/internal/jemalloc_internal_defs.h" +# include "jemalloc/internal/jemalloc_internal_macros.h" + +# define JEMALLOC_N(n) @private_namespace@##n +# include "jemalloc/internal/private_namespace.h" + +# define JEMALLOC_H_TYPES +# define JEMALLOC_H_STRUCTS +# define JEMALLOC_H_EXTERNS +# define JEMALLOC_H_INLINES +# include "jemalloc/internal/util.h" +# include "jemalloc/internal/qr.h" +# include "jemalloc/internal/ql.h" +# undef JEMALLOC_H_TYPES +# undef JEMALLOC_H_STRUCTS +# undef JEMALLOC_H_EXTERNS +# undef JEMALLOC_H_INLINES + +/******************************************************************************/ +/* + * For stress tests, expose the public jemalloc interfaces with name mangling + * so that they can be tested as e.g. malloc() and free(). Also expose the + * public jemalloc interfaces with jet_ prefixes, so that stress tests can use + * a separate allocator for their internal data structures. + */ +#elif defined(JEMALLOC_STRESS_TEST) +# include "jemalloc/jemalloc@install_suffix@.h" + +# include "jemalloc/jemalloc_protos_jet.h" + +# define JEMALLOC_JET +# include "jemalloc/internal/jemalloc_internal.h" +# include "jemalloc/internal/public_unnamespace.h" +# undef JEMALLOC_JET + +# include "jemalloc/jemalloc_rename.h" +# define JEMALLOC_MANGLE +# ifdef JEMALLOC_STRESS_TESTLIB +# include "jemalloc/jemalloc_mangle_jet.h" +# else +# include "jemalloc/jemalloc_mangle.h" +# endif + +/******************************************************************************/ +/* + * This header does dangerous things, the effects of which only test code + * should be subject to. + */ +#else +# error "This header cannot be included outside a testing context" +#endif + +/******************************************************************************/ +/* + * Common test utilities. + */ +#include "test/math.h" +#include "test/mtx.h" +#include "test/mq.h" +#include "test/test.h" +#include "test/thd.h" +#define MEXP 19937 +#include "test/SFMT.h" diff --git a/test/include/test/jemalloc_test_defs.h.in b/test/include/test/jemalloc_test_defs.h.in new file mode 100644 index 00000000..18a9773d --- /dev/null +++ b/test/include/test/jemalloc_test_defs.h.in @@ -0,0 +1,5 @@ +#include "jemalloc/internal/jemalloc_internal_defs.h" + +/* For use by SFMT. */ +#undef HAVE_SSE2 +#undef HAVE_ALTIVEC diff --git a/test/include/test/math.h b/test/include/test/math.h new file mode 100644 index 00000000..a862ed7d --- /dev/null +++ b/test/include/test/math.h @@ -0,0 +1,311 @@ +#ifndef JEMALLOC_ENABLE_INLINE +double ln_gamma(double x); +double i_gamma(double x, double p, double ln_gamma_p); +double pt_norm(double p); +double pt_chi2(double p, double df, double ln_gamma_df_2); +double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_)) +/* + * Compute the natural log of Gamma(x), accurate to 10 decimal places. + * + * This implementation is based on: + * + * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function + * [S14]. Communications of the ACM 9(9):684. + */ +JEMALLOC_INLINE double +ln_gamma(double x) +{ + double f, z; + + assert(x > 0.0); + + if (x < 7.0) { + f = 1.0; + z = x; + while (z < 7.0) { + f *= z; + z += 1.0; + } + x = z; + f = -log(f); + } else + f = 0.0; + + z = 1.0 / (x * x); + + return (f + (x-0.5) * log(x) - x + 0.918938533204673 + + (((-0.000595238095238 * z + 0.000793650793651) * z - + 0.002777777777778) * z + 0.083333333333333) / x); +} + +/* + * Compute the incomplete Gamma ratio for [0..x], where p is the shape + * parameter, and ln_gamma_p is ln_gamma(p). + * + * This implementation is based on: + * + * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. + * Applied Statistics 19:285-287. + */ +JEMALLOC_INLINE double +i_gamma(double x, double p, double ln_gamma_p) +{ + double acu, factor, oflo, gin, term, rn, a, b, an, dif; + double pn[6]; + unsigned i; + + assert(p > 0.0); + assert(x >= 0.0); + + if (x == 0.0) + return (0.0); + + acu = 1.0e-10; + oflo = 1.0e30; + gin = 0.0; + factor = exp(p * log(x) - x - ln_gamma_p); + + if (x <= 1.0 || x < p) { + /* Calculation by series expansion. */ + gin = 1.0; + term = 1.0; + rn = p; + + while (true) { + rn += 1.0; + term *= x / rn; + gin += term; + if (term <= acu) { + gin *= factor / p; + return (gin); + } + } + } else { + /* Calculation by continued fraction. */ + a = 1.0 - p; + b = a + x + 1.0; + term = 0.0; + pn[0] = 1.0; + pn[1] = x; + pn[2] = x + 1.0; + pn[3] = x * b; + gin = pn[2] / pn[3]; + + while (true) { + a += 1.0; + b += 2.0; + term += 1.0; + an = a * term; + for (i = 0; i < 2; i++) + pn[i+4] = b * pn[i+2] - an * pn[i]; + if (pn[5] != 0.0) { + rn = pn[4] / pn[5]; + dif = fabs(gin - rn); + if (dif <= acu && dif <= acu * rn) { + gin = 1.0 - factor * gin; + return (gin); + } + gin = rn; + } + for (i = 0; i < 4; i++) + pn[i] = pn[i+2]; + + if (fabs(pn[4]) >= oflo) { + for (i = 0; i < 4; i++) + pn[i] /= oflo; + } + } + } +} + +/* + * Given a value p in [0..1] of the lower tail area of the normal distribution, + * compute the limit on the definite integral from [-inf..z] that satisfies p, + * accurate to 16 decimal places. + * + * This implementation is based on: + * + * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal + * distribution. Applied Statistics 37(3):477-484. + */ +JEMALLOC_INLINE double +pt_norm(double p) +{ + double q, r, ret; + + assert(p > 0.0 && p < 1.0); + + q = p - 0.5; + if (fabs(q) <= 0.425) { + /* p close to 1/2. */ + r = 0.180625 - q * q; + return (q * (((((((2.5090809287301226727e3 * r + + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * + r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) + * r + 3.3871328727963666080e0) / + (((((((5.2264952788528545610e3 * r + + 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * + r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) + * r + 1.0)); + } else { + if (q < 0.0) + r = p; + else + r = 1.0 - p; + assert(r > 0.0); + + r = sqrt(-log(r)); + if (r <= 5.0) { + /* p neither close to 1/2 nor 0 or 1. */ + r -= 1.6; + ret = ((((((((7.74545014278341407640e-4 * r + + 2.27238449892691845833e-2) * r + + 2.41780725177450611770e-1) * r + + 1.27045825245236838258e0) * r + + 3.64784832476320460504e0) * r + + 5.76949722146069140550e0) * r + + 4.63033784615654529590e0) * r + + 1.42343711074968357734e0) / + (((((((1.05075007164441684324e-9 * r + + 5.47593808499534494600e-4) * r + + 1.51986665636164571966e-2) + * r + 1.48103976427480074590e-1) * r + + 6.89767334985100004550e-1) * r + + 1.67638483018380384940e0) * r + + 2.05319162663775882187e0) * r + 1.0)); + } else { + /* p near 0 or 1. */ + r -= 5.0; + ret = ((((((((2.01033439929228813265e-7 * r + + 2.71155556874348757815e-5) * r + + 1.24266094738807843860e-3) * r + + 2.65321895265761230930e-2) * r + + 2.96560571828504891230e-1) * r + + 1.78482653991729133580e0) * r + + 5.46378491116411436990e0) * r + + 6.65790464350110377720e0) / + (((((((2.04426310338993978564e-15 * r + + 1.42151175831644588870e-7) * r + + 1.84631831751005468180e-5) * r + + 7.86869131145613259100e-4) * r + + 1.48753612908506148525e-2) * r + + 1.36929880922735805310e-1) * r + + 5.99832206555887937690e-1) + * r + 1.0)); + } + if (q < 0.0) + ret = -ret; + return (ret); + } +} + +/* + * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution + * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute + * the upper limit on the definite integral from [0..z] that satisfies p, + * accurate to 12 decimal places. + * + * This implementation is based on: + * + * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of + * the Chi^2 distribution. Applied Statistics 24(3):385-388. + * + * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage + * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. + */ +JEMALLOC_INLINE double +pt_chi2(double p, double df, double ln_gamma_df_2) +{ + double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; + unsigned i; + + assert(p >= 0.0 && p < 1.0); + assert(df > 0.0); + + e = 5.0e-7; + aa = 0.6931471805; + + xx = 0.5 * df; + c = xx - 1.0; + + if (df < -1.24 * log(p)) { + /* Starting approximation for small Chi^2. */ + ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); + if (ch - e < 0.0) + return (ch); + } else { + if (df > 0.32) { + x = pt_norm(p); + /* + * Starting approximation using Wilson and Hilferty + * estimate. + */ + p1 = 0.222222 / df; + ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); + /* Starting approximation for p tending to 1. */ + if (ch > 2.2 * df + 6.0) { + ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + + ln_gamma_df_2); + } + } else { + ch = 0.4; + a = log(1.0 - p); + while (true) { + q = ch; + p1 = 1.0 + ch * (4.67 + ch); + p2 = ch * (6.73 + ch * (6.66 + ch)); + t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch + * (13.32 + 3.0 * ch)) / p2; + ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + + c * aa) * p2 / p1) / t; + if (fabs(q / ch - 1.0) - 0.01 <= 0.0) + break; + } + } + } + + for (i = 0; i < 20; i++) { + /* Calculation of seven-term Taylor series. */ + q = ch; + p1 = 0.5 * ch; + if (p1 < 0.0) + return (-1.0); + p2 = p - i_gamma(p1, xx, ln_gamma_df_2); + t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); + b = t / ch; + a = 0.5 * t - b * c; + s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + + 60.0 * a))))) / 420.0; + s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * + a)))) / 2520.0; + s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; + s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * + (889.0 + 1740.0 * a))) / 5040.0; + s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; + s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; + ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 + - b * (s4 - b * (s5 - b * s6)))))); + if (fabs(q / ch - 1.0) <= e) + break; + } + + return (ch); +} + +/* + * Given a value p in [0..1] and Gamma distribution shape and scale parameters, + * compute the upper limit on the definite integeral from [0..z] that satisfies + * p. + */ +JEMALLOC_INLINE double +pt_gamma(double p, double shape, double scale, double ln_gamma_shape) +{ + + return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale); +} +#endif diff --git a/test/include/test/mq.h b/test/include/test/mq.h new file mode 100644 index 00000000..11188653 --- /dev/null +++ b/test/include/test/mq.h @@ -0,0 +1,110 @@ +/* + * Simple templated message queue implementation that relies on only mutexes for + * synchronization (which reduces portability issues). Given the following + * setup: + * + * typedef struct mq_msg_s mq_msg_t; + * struct mq_msg_s { + * mq_msg(mq_msg_t) link; + * [message data] + * }; + * mq_gen(, mq_, mq_t, mq_msg_t, link) + * + * The API is as follows: + * + * bool mq_init(mq_t *mq); + * void mq_fini(mq_t *mq); + * unsigned mq_count(mq_t *mq); + * mq_msg_t *mq_tryget(mq_t *mq); + * mq_msg_t *mq_get(mq_t *mq); + * void mq_put(mq_t *mq, mq_msg_t *msg); + * + * The message queue linkage embedded in each message is to be treated as + * externally opaque (no need to initialize or clean up externally). mq_fini() + * does not perform any cleanup of messages, since it knows nothing of their + * payloads. + */ +#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) + +#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ +typedef struct { \ + mtx_t lock; \ + ql_head(a_mq_msg_type) msgs; \ + unsigned count; \ +} a_mq_type; \ +a_attr bool \ +a_prefix##init(a_mq_type *mq) { \ + \ + if (mtx_init(&mq->lock)) \ + return (true); \ + ql_new(&mq->msgs); \ + mq->count = 0; \ + return (false); \ +} \ +a_attr void \ +a_prefix##fini(a_mq_type *mq) \ +{ \ + \ + mtx_fini(&mq->lock); \ +} \ +a_attr unsigned \ +a_prefix##count(a_mq_type *mq) \ +{ \ + unsigned count; \ + \ + mtx_lock(&mq->lock); \ + count = mq->count; \ + mtx_unlock(&mq->lock); \ + return (count); \ +} \ +a_attr a_mq_msg_type * \ +a_prefix##tryget(a_mq_type *mq) \ +{ \ + a_mq_msg_type *msg; \ + \ + mtx_lock(&mq->lock); \ + msg = ql_first(&mq->msgs); \ + if (msg != NULL) { \ + ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ + mq->count--; \ + } \ + mtx_unlock(&mq->lock); \ + return (msg); \ +} \ +a_attr a_mq_msg_type * \ +a_prefix##get(a_mq_type *mq) \ +{ \ + a_mq_msg_type *msg; \ + struct timespec timeout; \ + \ + msg = a_prefix##tryget(mq); \ + if (msg != NULL) \ + return (msg); \ + \ + timeout.tv_sec = 0; \ + timeout.tv_nsec = 1; \ + while (true) { \ + nanosleep(&timeout, NULL); \ + msg = a_prefix##tryget(mq); \ + if (msg != NULL) \ + return (msg); \ + if (timeout.tv_sec == 0) { \ + /* Double sleep time, up to max 1 second. */ \ + timeout.tv_nsec <<= 1; \ + if (timeout.tv_nsec >= 1000*1000*1000) { \ + timeout.tv_sec = 1; \ + timeout.tv_nsec = 0; \ + } \ + } \ + } \ +} \ +a_attr void \ +a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \ +{ \ + \ + mtx_lock(&mq->lock); \ + ql_elm_new(msg, a_field); \ + ql_tail_insert(&mq->msgs, msg, a_field); \ + mq->count++; \ + mtx_unlock(&mq->lock); \ +} diff --git a/test/include/test/mtx.h b/test/include/test/mtx.h new file mode 100644 index 00000000..bbe822f5 --- /dev/null +++ b/test/include/test/mtx.h @@ -0,0 +1,21 @@ +/* + * mtx is a slightly simplified version of malloc_mutex. This code duplication + * is unfortunate, but there are allocator bootstrapping considerations that + * would leak into the test infrastructure if malloc_mutex were used directly + * in tests. + */ + +typedef struct { +#ifdef _WIN32 + CRITICAL_SECTION lock; +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLock lock; +#else + pthread_mutex_t lock; +#endif +} mtx_t; + +bool mtx_init(mtx_t *mtx); +void mtx_fini(mtx_t *mtx); +void mtx_lock(mtx_t *mtx); +void mtx_unlock(mtx_t *mtx); diff --git a/test/include/test/test.h b/test/include/test/test.h new file mode 100644 index 00000000..8cc97af5 --- /dev/null +++ b/test/include/test/test.h @@ -0,0 +1,302 @@ +#define assert_cmp(t, a, b, cmp, neg_cmp, pri, fmt...) do { \ + t a_ = (a); \ + t b_ = (b); \ + if (!(a_ cmp b_)) { \ + p_test_fail( \ + "%s:%s:%d: Failed assertion: " \ + "(%s) "#cmp" (%s) --> " \ + "%"pri" "#neg_cmp" %"pri": ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_, b_, fmt); \ + } \ +} while (0) + +#define assert_ptr_eq(a, b, fmt...) assert_cmp(void *, a, b, ==, \ + !=, "p", fmt) +#define assert_ptr_ne(a, b, fmt...) assert_cmp(void *, a, b, !=, \ + ==, "p", fmt) +#define assert_ptr_null(a, fmt...) assert_cmp(void *, a, NULL, ==, \ + !=, "p", fmt) +#define assert_ptr_not_null(a, fmt...) assert_cmp(void *, a, NULL, !=, \ + ==, "p", fmt) + +#define assert_c_eq(a, b, fmt...) assert_cmp(char, a, b, ==, !=, "c", fmt) +#define assert_c_ne(a, b, fmt...) assert_cmp(char, a, b, !=, ==, "c", fmt) +#define assert_c_lt(a, b, fmt...) assert_cmp(char, a, b, <, >=, "c", fmt) +#define assert_c_le(a, b, fmt...) assert_cmp(char, a, b, <=, >, "c", fmt) +#define assert_c_ge(a, b, fmt...) assert_cmp(char, a, b, >=, <, "c", fmt) +#define assert_c_gt(a, b, fmt...) assert_cmp(char, a, b, >, <=, "c", fmt) + +#define assert_x_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "#x", fmt) +#define assert_x_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "#x", fmt) +#define assert_x_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "#x", fmt) +#define assert_x_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "#x", fmt) +#define assert_x_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "#x", fmt) +#define assert_x_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "#x", fmt) + +#define assert_d_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "d", fmt) +#define assert_d_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "d", fmt) +#define assert_d_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "d", fmt) +#define assert_d_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "d", fmt) +#define assert_d_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "d", fmt) +#define assert_d_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "d", fmt) + +#define assert_u_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "u", fmt) +#define assert_u_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "u", fmt) +#define assert_u_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "u", fmt) +#define assert_u_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "u", fmt) +#define assert_u_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "u", fmt) +#define assert_u_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "u", fmt) + +#define assert_ld_eq(a, b, fmt...) assert_cmp(long, a, b, ==, \ + !=, "ld", fmt) +#define assert_ld_ne(a, b, fmt...) assert_cmp(long, a, b, !=, \ + ==, "ld", fmt) +#define assert_ld_lt(a, b, fmt...) assert_cmp(long, a, b, <, \ + >=, "ld", fmt) +#define assert_ld_le(a, b, fmt...) assert_cmp(long, a, b, <=, \ + >, "ld", fmt) +#define assert_ld_ge(a, b, fmt...) assert_cmp(long, a, b, >=, \ + <, "ld", fmt) +#define assert_ld_gt(a, b, fmt...) assert_cmp(long, a, b, >, \ + <=, "ld", fmt) + +#define assert_lu_eq(a, b, fmt...) assert_cmp(unsigned long, \ + a, b, ==, !=, "lu", fmt) +#define assert_lu_ne(a, b, fmt...) assert_cmp(unsigned long, \ + a, b, !=, ==, "lu", fmt) +#define assert_lu_lt(a, b, fmt...) assert_cmp(unsigned long, \ + a, b, <, >=, "lu", fmt) +#define assert_lu_le(a, b, fmt...) assert_cmp(unsigned long, \ + a, b, <=, >, "lu", fmt) +#define assert_lu_ge(a, b, fmt...) assert_cmp(unsigned long, \ + a, b, >=, <, "lu", fmt) +#define assert_lu_gt(a, b, fmt...) assert_cmp(unsigned long, \ + a, b, >, <=, "lu", fmt) + +#define assert_qd_eq(a, b, fmt...) assert_cmp(long long, a, b, ==, \ + !=, "qd", fmt) +#define assert_qd_ne(a, b, fmt...) assert_cmp(long long, a, b, !=, \ + ==, "qd", fmt) +#define assert_qd_lt(a, b, fmt...) assert_cmp(long long, a, b, <, \ + >=, "qd", fmt) +#define assert_qd_le(a, b, fmt...) assert_cmp(long long, a, b, <=, \ + >, "qd", fmt) +#define assert_qd_ge(a, b, fmt...) assert_cmp(long long, a, b, >=, \ + <, "qd", fmt) +#define assert_qd_gt(a, b, fmt...) assert_cmp(long long, a, b, >, \ + <=, "qd", fmt) + +#define assert_qu_eq(a, b, fmt...) assert_cmp(unsigned long long, \ + a, b, ==, !=, "qu", fmt) +#define assert_qu_ne(a, b, fmt...) assert_cmp(unsigned long long, \ + a, b, !=, ==, "qu", fmt) +#define assert_qu_lt(a, b, fmt...) assert_cmp(unsigned long long, \ + a, b, <, >=, "qu", fmt) +#define assert_qu_le(a, b, fmt...) assert_cmp(unsigned long long, \ + a, b, <=, >, "qu", fmt) +#define assert_qu_ge(a, b, fmt...) assert_cmp(unsigned long long, \ + a, b, >=, <, "qu", fmt) +#define assert_qu_gt(a, b, fmt...) assert_cmp(unsigned long long, \ + a, b, >, <=, "qu", fmt) + +#define assert_jd_eq(a, b, fmt...) assert_cmp(intmax_t, a, b, ==, \ + !=, "jd", fmt) +#define assert_jd_ne(a, b, fmt...) assert_cmp(intmax_t, a, b, !=, \ + ==, "jd", fmt) +#define assert_jd_lt(a, b, fmt...) assert_cmp(intmax_t, a, b, <, \ + >=, "jd", fmt) +#define assert_jd_le(a, b, fmt...) assert_cmp(intmax_t, a, b, <=, \ + >, "jd", fmt) +#define assert_jd_ge(a, b, fmt...) assert_cmp(intmax_t, a, b, >=, \ + <, "jd", fmt) +#define assert_jd_gt(a, b, fmt...) assert_cmp(intmax_t, a, b, >, \ + <=, "jd", fmt) + +#define assert_ju_eq(a, b, fmt...) assert_cmp(uintmax_t, a, b, ==, \ + !=, "ju", fmt) +#define assert_ju_ne(a, b, fmt...) assert_cmp(uintmax_t, a, b, !=, \ + ==, "ju", fmt) +#define assert_ju_lt(a, b, fmt...) assert_cmp(uintmax_t, a, b, <, \ + >=, "ju", fmt) +#define assert_ju_le(a, b, fmt...) assert_cmp(uintmax_t, a, b, <=, \ + >, "ju", fmt) +#define assert_ju_ge(a, b, fmt...) assert_cmp(uintmax_t, a, b, >=, \ + <, "ju", fmt) +#define assert_ju_gt(a, b, fmt...) assert_cmp(uintmax_t, a, b, >, \ + <=, "ju", fmt) + +#define assert_zd_eq(a, b, fmt...) assert_cmp(ssize_t, a, b, ==, \ + !=, "zd", fmt) +#define assert_zd_ne(a, b, fmt...) assert_cmp(ssize_t, a, b, !=, \ + ==, "zd", fmt) +#define assert_zd_lt(a, b, fmt...) assert_cmp(ssize_t, a, b, <, \ + >=, "zd", fmt) +#define assert_zd_le(a, b, fmt...) assert_cmp(ssize_t, a, b, <=, \ + >, "zd", fmt) +#define assert_zd_ge(a, b, fmt...) assert_cmp(ssize_t, a, b, >=, \ + <, "zd", fmt) +#define assert_zd_gt(a, b, fmt...) assert_cmp(ssize_t, a, b, >, \ + <=, "zd", fmt) + +#define assert_zu_eq(a, b, fmt...) assert_cmp(size_t, a, b, ==, \ + !=, "zu", fmt) +#define assert_zu_ne(a, b, fmt...) assert_cmp(size_t, a, b, !=, \ + ==, "zu", fmt) +#define assert_zu_lt(a, b, fmt...) assert_cmp(size_t, a, b, <, \ + >=, "zu", fmt) +#define assert_zu_le(a, b, fmt...) assert_cmp(size_t, a, b, <=, \ + >, "zu", fmt) +#define assert_zu_ge(a, b, fmt...) assert_cmp(size_t, a, b, >=, \ + <, "zu", fmt) +#define assert_zu_gt(a, b, fmt...) assert_cmp(size_t, a, b, >, \ + <=, "zu", fmt) + +#define assert_d32_eq(a, b, fmt...) assert_cmp(int32_t, a, b, ==, \ + !=, PRId32, fmt) +#define assert_d32_ne(a, b, fmt...) assert_cmp(int32_t, a, b, !=, \ + ==, PRId32, fmt) +#define assert_d32_lt(a, b, fmt...) assert_cmp(int32_t, a, b, <, \ + >=, PRId32, fmt) +#define assert_d32_le(a, b, fmt...) assert_cmp(int32_t, a, b, <=, \ + >, PRId32, fmt) +#define assert_d32_ge(a, b, fmt...) assert_cmp(int32_t, a, b, >=, \ + <, PRId32, fmt) +#define assert_d32_gt(a, b, fmt...) assert_cmp(int32_t, a, b, >, \ + <=, PRId32, fmt) + +#define assert_u32_eq(a, b, fmt...) assert_cmp(uint32_t, a, b, ==, \ + !=, PRIu32, fmt) +#define assert_u32_ne(a, b, fmt...) assert_cmp(uint32_t, a, b, !=, \ + ==, PRIu32, fmt) +#define assert_u32_lt(a, b, fmt...) assert_cmp(uint32_t, a, b, <, \ + >=, PRIu32, fmt) +#define assert_u32_le(a, b, fmt...) assert_cmp(uint32_t, a, b, <=, \ + >, PRIu32, fmt) +#define assert_u32_ge(a, b, fmt...) assert_cmp(uint32_t, a, b, >=, \ + <, PRIu32, fmt) +#define assert_u32_gt(a, b, fmt...) assert_cmp(uint32_t, a, b, >, \ + <=, PRIu32, fmt) + +#define assert_d64_eq(a, b, fmt...) assert_cmp(int64_t, a, b, ==, \ + !=, PRId64, fmt) +#define assert_d64_ne(a, b, fmt...) assert_cmp(int64_t, a, b, !=, \ + ==, PRId64, fmt) +#define assert_d64_lt(a, b, fmt...) assert_cmp(int64_t, a, b, <, \ + >=, PRId64, fmt) +#define assert_d64_le(a, b, fmt...) assert_cmp(int64_t, a, b, <=, \ + >, PRId64, fmt) +#define assert_d64_ge(a, b, fmt...) assert_cmp(int64_t, a, b, >=, \ + <, PRId64, fmt) +#define assert_d64_gt(a, b, fmt...) assert_cmp(int64_t, a, b, >, \ + <=, PRId64, fmt) + +#define assert_u64_eq(a, b, fmt...) assert_cmp(uint64_t, a, b, ==, \ + !=, PRIu64, fmt) +#define assert_u64_ne(a, b, fmt...) assert_cmp(uint64_t, a, b, !=, \ + ==, PRIu64, fmt) +#define assert_u64_lt(a, b, fmt...) assert_cmp(uint64_t, a, b, <, \ + >=, PRIu64, fmt) +#define assert_u64_le(a, b, fmt...) assert_cmp(uint64_t, a, b, <=, \ + >, PRIu64, fmt) +#define assert_u64_ge(a, b, fmt...) assert_cmp(uint64_t, a, b, >=, \ + <, PRIu64, fmt) +#define assert_u64_gt(a, b, fmt...) assert_cmp(uint64_t, a, b, >, \ + <=, PRIu64, fmt) + +#define assert_b_eq(a, b, fmt...) do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ == b_)) { \ + p_test_fail( \ + "%s:%s:%d: Failed assertion: " \ + "(%s) == (%s) --> %s != %s: ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_ ? "true" : "false", \ + b_ ? "true" : "false", fmt); \ + } \ +} while (0) +#define assert_b_ne(a, b, fmt...) do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ != b_)) { \ + p_test_fail( \ + "%s:%s:%d: Failed assertion: " \ + "(%s) != (%s) --> %s == %s: ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_ ? "true" : "false", \ + b_ ? "true" : "false", fmt); \ + } \ +} while (0) +#define assert_true(a, fmt...) assert_b_eq(a, true, fmt) +#define assert_false(a, fmt...) assert_b_eq(a, false, fmt) + +#define assert_str_eq(a, b, fmt...) do { \ + if (strcmp((a), (b))) { \ + p_test_fail( \ + "%s:%s:%d: Failed assertion: " \ + "(%s) same as (%s) --> " \ + "\"%s\" differs from \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b, fmt); \ + } \ +} while (0) +#define assert_str_ne(a, b, fmt...) do { \ + if (!strcmp((a), (b))) { \ + p_test_fail( \ + "%s:%s:%d: Failed assertion: " \ + "(%s) differs from (%s) --> " \ + "\"%s\" same as \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b, fmt); \ + } \ +} while (0) + +#define assert_not_reached(fmt...) do { \ + p_test_fail("%s:%s:%d: Unreachable code reached: ", \ + __func__, __FILE__, __LINE__, fmt); \ +} while (0) + +/* + * If this enum changes, corresponding changes in test/test.sh.in are also + * necessary. + */ +typedef enum { + test_status_pass = 0, + test_status_skip = 1, + test_status_fail = 2, + + test_status_count = 3 +} test_status_t; + +typedef void (test_t)(void); + +#define TEST_BEGIN(f) \ +static void \ +f(void) \ +{ \ + p_test_init(#f); + +#define TEST_END \ + goto label_test_end; \ +label_test_end: \ + p_test_fini(); \ +} + +#define test(tests...) \ + p_test(tests, NULL) + +#define test_skip_if(e) do { \ + if (e) { \ + test_skip("%s:%s:%d: Test skipped: (%s)", \ + __func__, __FILE__, __LINE__, #e); \ + goto label_test_end; \ + } \ +} while (0) + +void test_skip(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2)); +void test_fail(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2)); + +/* For private use by macros. */ +test_status_t p_test(test_t* t, ...); +void p_test_init(const char *name); +void p_test_fini(void); +void p_test_fail(const char *format, ...); diff --git a/test/include/test/thd.h b/test/include/test/thd.h new file mode 100644 index 00000000..f941d7a7 --- /dev/null +++ b/test/include/test/thd.h @@ -0,0 +1,9 @@ +/* Abstraction layer for threading in tests */ +#ifdef _WIN32 +typedef HANDLE thd_t; +#else +typedef pthread_t thd_t; +#endif + +void thd_create(thd_t *thd, void *(*proc)(void *), void *arg); +void thd_join(thd_t thd, void **ret); diff --git a/test/integration/ALLOCM_ARENA.c b/test/integration/ALLOCM_ARENA.c new file mode 100644 index 00000000..5bf3c4ab --- /dev/null +++ b/test/integration/ALLOCM_ARENA.c @@ -0,0 +1,58 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 10 + +void * +thd_start(void *arg) +{ + unsigned thread_ind = (unsigned)(uintptr_t)arg; + unsigned arena_ind; + void *p; + size_t rsz, sz; + + sz = sizeof(arena_ind); + assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, + "Error in arenas.extend"); + + if (thread_ind % 4 != 3) { + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + const char *dss_precs[] = {"disabled", "primary", "secondary"}; + const char *dss = dss_precs[thread_ind % + (sizeof(dss_precs)/sizeof(char*))]; + assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, + "Error in mallctlnametomib()"); + mib[1] = arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, + sizeof(const char *)), 0, "Error in mallctlbymib()"); + } + + assert_d_eq(allocm(&p, &rsz, 1, ALLOCM_ARENA(arena_ind)), + ALLOCM_SUCCESS, "Unexpected allocm() error"); + dallocm(p, 0); + + return (NULL); +} + +TEST_BEGIN(test_ALLOCM_ARENA) +{ + thd_t thds[NTHREADS]; + unsigned i; + + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, + (void *)(uintptr_t)i); + } + + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); +} +TEST_END + +int +main(void) +{ + + return (test( + test_ALLOCM_ARENA)); +} diff --git a/test/aligned_alloc.c b/test/integration/aligned_alloc.c similarity index 56% rename from test/aligned_alloc.c rename to test/integration/aligned_alloc.c index 5a9b0cae..17c2151c 100644 --- a/test/aligned_alloc.c +++ b/test/integration/aligned_alloc.c @@ -1,39 +1,36 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" +#include "test/jemalloc_test.h" -#define CHUNK 0x400000 +#define CHUNK 0x400000 /* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 +#define MAXALIGN ((size_t)0x2000000LU) +#define NITER 4 -int -main(void) +TEST_BEGIN(test_alignment_errors) { - size_t alignment, size, total; - unsigned i; - void *p, *ps[NITER]; + size_t alignment; + void *p; - malloc_printf("Test begin\n"); - - /* Test error conditions. */ alignment = 0; set_errno(0); p = aligned_alloc(alignment, 1); - if (p != NULL || get_errno() != EINVAL) { - malloc_printf( - "Expected error for invalid alignment %zu\n", alignment); - } + assert_false(p != NULL || get_errno() != EINVAL, + "Expected error for invalid alignment %zu", alignment); for (alignment = sizeof(size_t); alignment < MAXALIGN; alignment <<= 1) { set_errno(0); p = aligned_alloc(alignment + 1, 1); - if (p != NULL || get_errno() != EINVAL) { - malloc_printf( - "Expected error for invalid alignment %zu\n", - alignment + 1); - } + assert_false(p != NULL || get_errno() != EINVAL, + "Expected error for invalid alignment %zu", + alignment + 1); } +} +TEST_END + +TEST_BEGIN(test_oom_errors) +{ + size_t alignment, size; + void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); @@ -44,11 +41,9 @@ main(void) #endif set_errno(0); p = aligned_alloc(alignment, size); - if (p != NULL || get_errno() != ENOMEM) { - malloc_printf( - "Expected error for aligned_alloc(%zu, %zu)\n", - alignment, size); - } + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(%zu, %zu)", + alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); @@ -59,11 +54,9 @@ main(void) #endif set_errno(0); p = aligned_alloc(alignment, size); - if (p != NULL || get_errno() != ENOMEM) { - malloc_printf( - "Expected error for aligned_alloc(%zu, %zu)\n", - alignment, size); - } + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(%zu, %zu)", + alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 @@ -73,11 +66,17 @@ main(void) #endif set_errno(0); p = aligned_alloc(alignment, size); - if (p != NULL || get_errno() != ENOMEM) { - malloc_printf( - "Expected error for aligned_alloc(&p, %zu, %zu)\n", - alignment, size); - } + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(&p, %zu, %zu)", + alignment, size); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ + size_t alignment, size, total; + unsigned i; + void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; @@ -86,7 +85,6 @@ main(void) alignment <= MAXALIGN; alignment <<= 1) { total = 0; - malloc_printf("Alignment: %zu\n", alignment); for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { @@ -95,11 +93,11 @@ main(void) if (ps[i] == NULL) { char buf[BUFERROR_BUF]; - buferror(buf, sizeof(buf)); - malloc_printf( - "Error for size %zu (%#zx): %s\n", - size, size, buf); - exit(1); + buferror(get_errno(), buf, sizeof(buf)); + test_fail( + "Error for alignment=%zu, " + "size=%zu (%#zx): %s", + alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) @@ -113,7 +111,15 @@ main(void) } } } - - malloc_printf("Test end\n"); - return (0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_alignment_errors, + test_oom_errors, + test_alignment_and_size)); } diff --git a/test/integration/allocated.c b/test/integration/allocated.c new file mode 100644 index 00000000..3630e80c --- /dev/null +++ b/test/integration/allocated.c @@ -0,0 +1,125 @@ +#include "test/jemalloc_test.h" + +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; + +void * +thd_start(void *arg) +{ + int err; + void *p; + uint64_t a0, a1, d0, d1; + uint64_t *ap0, *ap1, *dp0, *dp1; + size_t sz, usize; + + sz = sizeof(a0); + if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + sz = sizeof(ap0); + if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + assert_u64_eq(*ap0, a0, + "\"thread.allocatedp\" should provide a pointer to internal " + "storage"); + + sz = sizeof(d0); + if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + sz = sizeof(dp0); + if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + assert_u64_eq(*dp0, d0, + "\"thread.deallocatedp\" should provide a pointer to internal " + "storage"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() error"); + + sz = sizeof(a1); + mallctl("thread.allocated", &a1, &sz, NULL, 0); + sz = sizeof(ap1); + mallctl("thread.allocatedp", &ap1, &sz, NULL, 0); + assert_u64_eq(*ap1, a1, + "Dereferenced \"thread.allocatedp\" value should equal " + "\"thread.allocated\" value"); + assert_ptr_eq(ap0, ap1, + "Pointer returned by \"thread.allocatedp\" should not change"); + + usize = malloc_usable_size(p); + assert_u64_le(a0 + usize, a1, + "Allocated memory counter should increase by at least the amount " + "explicitly allocated"); + + free(p); + + sz = sizeof(d1); + mallctl("thread.deallocated", &d1, &sz, NULL, 0); + sz = sizeof(dp1); + mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0); + assert_u64_eq(*dp1, d1, + "Dereferenced \"thread.deallocatedp\" value should equal " + "\"thread.deallocated\" value"); + assert_ptr_eq(dp0, dp1, + "Pointer returned by \"thread.deallocatedp\" should not change"); + + assert_u64_le(d0 + usize, d1, + "Deallocated memory counter should increase by at least the amount " + "explicitly deallocated"); + + return (NULL); +label_ENOENT: + assert_false(config_stats, + "ENOENT should only be returned if stats are disabled"); + test_skip("\"thread.allocated\" mallctl not available"); + return (NULL); +} + +TEST_BEGIN(test_main_thread) +{ + + thd_start(NULL); +} +TEST_END + +TEST_BEGIN(test_subthread) +{ + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) +{ + + /* Run tests multiple times to check for bad interactions. */ + return (test( + test_main_thread, + test_subthread, + test_main_thread, + test_subthread, + test_main_thread)); +} diff --git a/test/integration/allocm.c b/test/integration/allocm.c new file mode 100644 index 00000000..bd7a3ca5 --- /dev/null +++ b/test/integration/allocm.c @@ -0,0 +1,131 @@ +#include "test/jemalloc_test.h" + +#define CHUNK 0x400000 +/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ +#define MAXALIGN ((size_t)0x2000000LU) +#define NITER 4 + +TEST_BEGIN(test_basic) +{ + size_t nsz, rsz, sz; + void *p; + + sz = 42; + nsz = 0; + assert_d_eq(nallocm(&nsz, sz, 0), ALLOCM_SUCCESS, + "Unexpected nallocm() error"); + rsz = 0; + assert_d_eq(allocm(&p, &rsz, sz, 0), ALLOCM_SUCCESS, + "Unexpected allocm() error"); + assert_zu_ge(rsz, sz, "Real size smaller than expected"); + assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch"); + assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, + "Unexpected dallocm() error"); + + assert_d_eq(allocm(&p, NULL, sz, 0), ALLOCM_SUCCESS, + "Unexpected allocm() error"); + assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, + "Unexpected dallocm() error"); + + nsz = 0; + assert_d_eq(nallocm(&nsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS, + "Unexpected nallocm() error"); + rsz = 0; + assert_d_eq(allocm(&p, &rsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS, + "Unexpected allocm() error"); + assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch"); + assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, + "Unexpected dallocm() error"); +} +TEST_END + +TEST_BEGIN(test_alignment_errors) +{ + void *p; + size_t nsz, rsz, sz, alignment; + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x4000000000000000); + sz = UINT64_C(0x8400000000000001); +#else + alignment = 0x40000000LU; + sz = 0x84000001LU; +#endif + nsz = 0; + assert_d_eq(nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)), ALLOCM_SUCCESS, + "Unexpected nallocm() error"); + rsz = 0; + assert_d_ne(allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)), + ALLOCM_SUCCESS, "Expected error for allocm(&p, %zu, %#x)", + sz, ALLOCM_ALIGN(alignment)); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ + int r; + size_t nsz, rsz, sz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = 0; + r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment) | + ALLOCM_ZERO); + assert_d_eq(r, ALLOCM_SUCCESS, + "nallocm() error for alignment=%zu, " + "size=%zu (%#zx): %d", + alignment, sz, sz, r); + rsz = 0; + r = allocm(&ps[i], &rsz, sz, + ALLOCM_ALIGN(alignment) | ALLOCM_ZERO); + assert_d_eq(r, ALLOCM_SUCCESS, + "allocm() error for alignment=%zu, " + "size=%zu (%#zx): %d", + alignment, sz, sz, r); + assert_zu_ge(rsz, sz, + "Real size smaller than expected for " + "alignment=%zu, size=%zu", alignment, sz); + assert_zu_eq(nsz, rsz, + "nallocm()/allocm() rsize mismatch for " + "alignment=%zu, size=%zu", alignment, sz); + assert_ptr_null( + (void *)((uintptr_t)ps[i] & (alignment-1)), + "%p inadequately aligned for" + " alignment=%zu, size=%zu", ps[i], + alignment, sz); + sallocm(ps[i], &rsz, 0); + total += rsz; + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + dallocm(ps[i], 0); + ps[i] = NULL; + } + } + } + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_basic, + test_alignment_errors, + test_alignment_and_size)); +} diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c new file mode 100644 index 00000000..c26f6c56 --- /dev/null +++ b/test/integration/mallocx.c @@ -0,0 +1,119 @@ +#include "test/jemalloc_test.h" + +#define CHUNK 0x400000 +/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ +#define MAXALIGN ((size_t)0x2000000LU) +#define NITER 4 + +TEST_BEGIN(test_basic) +{ + size_t nsz, rsz, sz; + void *p; + + sz = 42; + nsz = nallocx(sz, 0); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + rsz = sallocx(p, 0); + assert_zu_ge(rsz, sz, "Real size smaller than expected"); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); + dallocx(p, 0); + + p = mallocx(sz, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, 0); + + nsz = nallocx(sz, MALLOCX_ZERO); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + rsz = sallocx(p, 0); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_alignment_errors) +{ + void *p; + size_t nsz, sz, alignment; + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x4000000000000000); + sz = UINT64_C(0x8400000000000001); +#else + alignment = 0x40000000LU; + sz = 0x84000001LU; +#endif + nsz = nallocx(sz, MALLOCX_ALIGN(alignment)); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, MALLOCX_ALIGN(alignment)); + assert_ptr_null(p, "Expected error for mallocx(%zu, %#x)", sz, + MALLOCX_ALIGN(alignment)); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ + size_t nsz, rsz, sz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_zu_ne(nsz, 0, + "nallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_ptr_not_null(ps[i], + "mallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + rsz = sallocx(ps[i], 0); + assert_zu_ge(rsz, sz, + "Real size smaller than expected for " + "alignment=%zu, size=%zu", alignment, sz); + assert_zu_eq(nsz, rsz, + "nallocx()/sallocx() size mismatch for " + "alignment=%zu, size=%zu", alignment, sz); + assert_ptr_null( + (void *)((uintptr_t)ps[i] & (alignment-1)), + "%p inadequately aligned for" + " alignment=%zu, size=%zu", ps[i], + alignment, sz); + total += rsz; + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + dallocx(ps[i], 0); + ps[i] = NULL; + } + } + } + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_basic, + test_alignment_errors, + test_alignment_and_size)); +} diff --git a/test/integration/mremap.c b/test/integration/mremap.c new file mode 100644 index 00000000..a7fb7ef0 --- /dev/null +++ b/test/integration/mremap.c @@ -0,0 +1,45 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_mremap) +{ + int err; + size_t sz, lg_chunk, chunksize, i; + char *p, *q; + + sz = sizeof(lg_chunk); + err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0); + assert_d_eq(err, 0, "Error in mallctl(): %s", strerror(err)); + chunksize = ((size_t)1U) << lg_chunk; + + p = (char *)malloc(chunksize); + assert_ptr_not_null(p, "malloc(%zu) --> %p", chunksize, p); + memset(p, 'a', chunksize); + + q = (char *)realloc(p, chunksize * 2); + assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize * 2, + q); + for (i = 0; i < chunksize; i++) { + assert_c_eq(q[i], 'a', + "realloc() should preserve existing bytes across copies"); + } + + p = q; + + q = (char *)realloc(p, chunksize); + assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize, q); + for (i = 0; i < chunksize; i++) { + assert_c_eq(q[i], 'a', + "realloc() should preserve existing bytes across copies"); + } + + free(q); +} +TEST_END + +int +main(void) +{ + + return (test( + test_mremap)); +} diff --git a/test/posix_memalign.c b/test/integration/posix_memalign.c similarity index 51% rename from test/posix_memalign.c rename to test/integration/posix_memalign.c index 2185bcf7..c88a4dcb 100644 --- a/test/posix_memalign.c +++ b/test/integration/posix_memalign.c @@ -1,40 +1,34 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" +#include "test/jemalloc_test.h" -#define CHUNK 0x400000 +#define CHUNK 0x400000 /* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 +#define MAXALIGN ((size_t)0x2000000LU) +#define NITER 4 -int -main(void) +TEST_BEGIN(test_alignment_errors) { - size_t alignment, size, total; - unsigned i; - int err; - void *p, *ps[NITER]; + size_t alignment; + void *p; - malloc_printf("Test begin\n"); - - /* Test error conditions. */ for (alignment = 0; alignment < sizeof(void *); alignment++) { - err = posix_memalign(&p, alignment, 1); - if (err != EINVAL) { - malloc_printf( - "Expected error for invalid alignment %zu\n", - alignment); - } + assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, + "Expected error for invalid alignment %zu", + alignment); } for (alignment = sizeof(size_t); alignment < MAXALIGN; alignment <<= 1) { - err = posix_memalign(&p, alignment + 1, 1); - if (err == 0) { - malloc_printf( - "Expected error for invalid alignment %zu\n", - alignment + 1); - } + assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0, + "Expected error for invalid alignment %zu", + alignment + 1); } +} +TEST_END + +TEST_BEGIN(test_oom_errors) +{ + size_t alignment, size; + void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); @@ -43,12 +37,9 @@ main(void) alignment = 0x80000000LU; size = 0x80000000LU; #endif - err = posix_memalign(&p, alignment, size); - if (err == 0) { - malloc_printf( - "Expected error for posix_memalign(&p, %zu, %zu)\n", - alignment, size); - } + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); @@ -57,12 +48,9 @@ main(void) alignment = 0x40000000LU; size = 0x84000001LU; #endif - err = posix_memalign(&p, alignment, size); - if (err == 0) { - malloc_printf( - "Expected error for posix_memalign(&p, %zu, %zu)\n", - alignment, size); - } + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 @@ -70,12 +58,18 @@ main(void) #else size = 0xfffffff0LU; #endif - err = posix_memalign(&p, alignment, size); - if (err == 0) { - malloc_printf( - "Expected error for posix_memalign(&p, %zu, %zu)\n", - alignment, size); - } + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ + size_t alignment, size, total; + unsigned i; + int err; + void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; @@ -84,7 +78,6 @@ main(void) alignment <= MAXALIGN; alignment <<= 1) { total = 0; - malloc_printf("Alignment: %zu\n", alignment); for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { @@ -92,10 +85,13 @@ main(void) err = posix_memalign(&ps[i], alignment, size); if (err) { - malloc_printf( - "Error for size %zu (%#zx): %s\n", - size, size, strerror(err)); - exit(1); + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + test_fail( + "Error for alignment=%zu, " + "size=%zu (%#zx): %s", + alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) @@ -109,7 +105,15 @@ main(void) } } } - - malloc_printf("Test end\n"); - return (0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_alignment_errors, + test_oom_errors, + test_alignment_and_size)); } diff --git a/test/integration/rallocm.c b/test/integration/rallocm.c new file mode 100644 index 00000000..33c11bb7 --- /dev/null +++ b/test/integration/rallocm.c @@ -0,0 +1,111 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_same_size) +{ + void *p, *q; + size_t sz, tsz; + + assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS, + "Unexpected allocm() error"); + + q = p; + assert_d_eq(rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE), ALLOCM_SUCCESS, + "Unexpected rallocm() error"); + assert_ptr_eq(q, p, "Unexpected object move"); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, + "Unexpected dallocm() error"); +} +TEST_END + +TEST_BEGIN(test_extra_no_move) +{ + void *p, *q; + size_t sz, tsz; + + assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS, + "Unexpected allocm() error"); + + q = p; + assert_d_eq(rallocm(&q, &tsz, sz, sz-42, ALLOCM_NO_MOVE), + ALLOCM_SUCCESS, "Unexpected rallocm() error"); + assert_ptr_eq(q, p, "Unexpected object move"); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, + "Unexpected dallocm() error"); +} +TEST_END + +TEST_BEGIN(test_no_move_fail) +{ + void *p, *q; + size_t sz, tsz; + + assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS, + "Unexpected allocm() error"); + + q = p; + assert_d_eq(rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE), + ALLOCM_ERR_NOT_MOVED, "Unexpected rallocm() result"); + assert_ptr_eq(q, p, "Unexpected object move"); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, + "Unexpected dallocm() error"); +} +TEST_END + +TEST_BEGIN(test_grow_and_shrink) +{ + void *p, *q; + size_t tsz; +#define NCYCLES 3 + unsigned i, j; +#define NSZS 2500 + size_t szs[NSZS]; +#define MAXSZ ZU(12 * 1024 * 1024) + + assert_d_eq(allocm(&p, &szs[0], 1, 0), ALLOCM_SUCCESS, + "Unexpected allocm() error"); + + for (i = 0; i < NCYCLES; i++) { + for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { + q = p; + assert_d_eq(rallocm(&q, &szs[j], szs[j-1]+1, 0, 0), + ALLOCM_SUCCESS, + "Unexpected rallocm() error for size=%zu-->%zu", + szs[j-1], szs[j-1]+1); + assert_zu_ne(szs[j], szs[j-1]+1, + "Expected size to at least: %zu", szs[j-1]+1); + p = q; + } + + for (j--; j > 0; j--) { + q = p; + assert_d_eq(rallocm(&q, &tsz, szs[j-1], 0, 0), + ALLOCM_SUCCESS, + "Unexpected rallocm() error for size=%zu-->%zu", + szs[j], szs[j-1]); + assert_zu_eq(tsz, szs[j-1], + "Expected size=%zu, got size=%zu", szs[j-1], tsz); + p = q; + } + } + + assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, + "Unexpected dallocm() error"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_same_size, + test_extra_no_move, + test_no_move_fail, + test_grow_and_shrink)); +} diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c new file mode 100644 index 00000000..b4b67802 --- /dev/null +++ b/test/integration/rallocx.c @@ -0,0 +1,182 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_grow_and_shrink) +{ + void *p, *q; + size_t tsz; +#define NCYCLES 3 + unsigned i, j; +#define NSZS 2500 + size_t szs[NSZS]; +#define MAXSZ ZU(12 * 1024 * 1024) + + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + szs[0] = sallocx(p, 0); + + for (i = 0; i < NCYCLES; i++) { + for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { + q = rallocx(p, szs[j-1]+1, 0); + assert_ptr_not_null(q, + "Unexpected rallocx() error for size=%zu-->%zu", + szs[j-1], szs[j-1]+1); + szs[j] = sallocx(q, 0); + assert_zu_ne(szs[j], szs[j-1]+1, + "Expected size to at least: %zu", szs[j-1]+1); + p = q; + } + + for (j--; j > 0; j--) { + q = rallocx(p, szs[j-1], 0); + assert_ptr_not_null(q, + "Unexpected rallocx() error for size=%zu-->%zu", + szs[j], szs[j-1]); + tsz = sallocx(q, 0); + assert_zu_eq(tsz, szs[j-1], + "Expected size=%zu, got size=%zu", szs[j-1], tsz); + p = q; + } + } + + dallocx(p, 0); +#undef MAXSZ +#undef NSZS +#undef NCYCLES +} +TEST_END + +static bool +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) +{ + bool ret = false; + const uint8_t *buf = (const uint8_t *)p; + size_t i; + + for (i = 0; i < len; i++) { + uint8_t b = buf[offset+i]; + if (b != c) { + test_fail("Allocation at %p contains %#x rather than " + "%#x at offset %zu", p, b, c, offset+i); + ret = true; + } + } + + return (ret); +} + +TEST_BEGIN(test_zero) +{ + void *p, *q; + size_t psz, qsz, i, j; + size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; +#define FILL_BYTE 0xaaU +#define RANGE 2048 + + for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { + size_t start_size = start_sizes[i]; + p = mallocx(start_size, MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + psz = sallocx(p, 0); + + assert_false(validate_fill(p, 0, 0, psz), + "Expected zeroed memory"); + memset(p, FILL_BYTE, psz); + assert_false(validate_fill(p, FILL_BYTE, 0, psz), + "Expected filled memory"); + + for (j = 1; j < RANGE; j++) { + q = rallocx(p, start_size+j, MALLOCX_ZERO); + assert_ptr_not_null(q, "Unexpected rallocx() error"); + qsz = sallocx(q, 0); + if (q != p || qsz != psz) { + assert_false(validate_fill(q, FILL_BYTE, 0, + psz), "Expected filled memory"); + assert_false(validate_fill(q, 0, psz, qsz-psz), + "Expected zeroed memory"); + } + if (psz != qsz) { + memset(q+psz, FILL_BYTE, qsz-psz); + psz = qsz; + } + p = q; + } + assert_false(validate_fill(p, FILL_BYTE, 0, psz), + "Expected filled memory"); + dallocx(p, 0); + } +#undef FILL_BYTE +} +TEST_END + +TEST_BEGIN(test_align) +{ + void *p, *q; + size_t align; +#define MAX_ALIGN (ZU(1) << 29) + + align = ZU(1); + p = mallocx(1, MALLOCX_ALIGN(align)); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { + q = rallocx(p, 1, MALLOCX_ALIGN(align)); + assert_ptr_not_null(q, + "Unexpected rallocx() error for align=%zu", align); + assert_ptr_null( + (void *)((uintptr_t)q & (align-1)), + "%p inadequately aligned for align=%zu", + q, align); + p = q; + } + dallocx(p, 0); +#undef MAX_ALIGN +} +TEST_END + +TEST_BEGIN(test_lg_align_and_zero) +{ + void *p, *q; + size_t lg_align, sz; +#define MAX_LG_ALIGN 29 +#define MAX_VALIDATE (ZU(1) << 22) + + lg_align = ZU(0); + p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { + q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); + assert_ptr_not_null(q, + "Unexpected rallocx() error for lg_align=%zu", lg_align); + assert_ptr_null( + (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), + "%p inadequately aligned for lg_align=%zu", + q, lg_align); + sz = sallocx(q, 0); + if ((sz << 1) <= MAX_VALIDATE) { + assert_false(validate_fill(q, 0, 0, sz), + "Expected zeroed memory"); + } else { + assert_false(validate_fill(q, 0, 0, MAX_VALIDATE), + "Expected zeroed memory"); + assert_false(validate_fill(q+sz-MAX_VALIDATE, 0, 0, + MAX_VALIDATE), "Expected zeroed memory"); + } + p = q; + } + dallocx(p, 0); +#undef MAX_VALIDATE +#undef MAX_LG_ALIGN +} +TEST_END + +int +main(void) +{ + + return (test( + test_grow_and_shrink, + test_zero, + test_align, + test_lg_align_and_zero)); +} diff --git a/test/integration/thread_arena.c b/test/integration/thread_arena.c new file mode 100644 index 00000000..67be5351 --- /dev/null +++ b/test/integration/thread_arena.c @@ -0,0 +1,79 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 10 + +void * +thd_start(void *arg) +{ + unsigned main_arena_ind = *(unsigned *)arg; + void *p; + unsigned arena_ind; + size_t size; + int err; + + p = malloc(1); + assert_ptr_not_null(p, "Error in malloc()"); + free(p); + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, + sizeof(main_arena_ind)))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + assert_u_eq(arena_ind, main_arena_ind, + "Arena index should be same as for main thread"); + + return (NULL); +} + +TEST_BEGIN(test_thread_arena) +{ + void *p; + unsigned arena_ind; + size_t size; + int err; + thd_t thds[NTHREADS]; + unsigned i; + + p = malloc(1); + assert_ptr_not_null(p, "Error in malloc()"); + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, + (void *)&arena_ind); + } + + for (i = 0; i < NTHREADS; i++) { + intptr_t join_ret; + thd_join(thds[i], (void *)&join_ret); + assert_zd_eq(join_ret, 0, "Unexpected thread join error"); + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_thread_arena)); +} diff --git a/test/integration/thread_tcache_enabled.c b/test/integration/thread_tcache_enabled.c new file mode 100644 index 00000000..f4e89c68 --- /dev/null +++ b/test/integration/thread_tcache_enabled.c @@ -0,0 +1,113 @@ +#include "test/jemalloc_test.h" + +static const bool config_tcache = +#ifdef JEMALLOC_TCACHE + true +#else + false +#endif + ; + +void * +thd_start(void *arg) +{ + int err; + size_t sz; + bool e0, e1; + + sz = sizeof(bool); + if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) { + if (err == ENOENT) { + assert_false(config_tcache, + "ENOENT should only be returned if tcache is " + "disabled"); + } + goto label_ENOENT; + } + + if (e0) { + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), + 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + } + + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + free(malloc(1)); + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + free(malloc(1)); + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + return (NULL); +label_ENOENT: + test_skip("\"thread.tcache.enabled\" mallctl not available"); + return (NULL); +} + +TEST_BEGIN(test_main_thread) +{ + + thd_start(NULL); +} +TEST_END + +TEST_BEGIN(test_subthread) +{ + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) +{ + + /* Run tests multiple times to check for bad interactions. */ + return (test( + test_main_thread, + test_subthread, + test_main_thread, + test_subthread, + test_main_thread)); +} diff --git a/test/integration/xallocx.c b/test/integration/xallocx.c new file mode 100644 index 00000000..ab4cf945 --- /dev/null +++ b/test/integration/xallocx.c @@ -0,0 +1,59 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_same_size) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz, 0, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_no_move) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz, sz-42, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_no_move_fail) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz + 5, 0, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_same_size, + test_extra_no_move, + test_no_move_fail)); +} diff --git a/test/jemalloc_test.h.in b/test/jemalloc_test.h.in deleted file mode 100644 index e38b48ef..00000000 --- a/test/jemalloc_test.h.in +++ /dev/null @@ -1,53 +0,0 @@ -/* - * This header should be included by tests, rather than directly including - * jemalloc/jemalloc.h, because --with-install-suffix may cause the header to - * have a different name. - */ -#include "jemalloc/jemalloc@install_suffix@.h" -#include "jemalloc/internal/jemalloc_internal.h" - -/* Abstraction layer for threading in tests */ -#ifdef _WIN32 -#include - -typedef HANDLE je_thread_t; - -void -je_thread_create(je_thread_t *thread, void *(*proc)(void *), void *arg) -{ - LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; - *thread = CreateThread(NULL, 0, routine, arg, 0, NULL); - if (*thread == NULL) { - malloc_printf("Error in CreateThread()\n"); - exit(1); - } -} - -void -je_thread_join(je_thread_t thread, void **ret) -{ - WaitForSingleObject(thread, INFINITE); -} - -#else -#include - -typedef pthread_t je_thread_t; - -void -je_thread_create(je_thread_t *thread, void *(*proc)(void *), void *arg) -{ - - if (pthread_create(thread, NULL, proc, arg) != 0) { - malloc_printf("Error in pthread_create()\n"); - exit(1); - } -} - -void -je_thread_join(je_thread_t thread, void **ret) -{ - - pthread_join(thread, ret); -} -#endif diff --git a/test/mremap.c b/test/mremap.c deleted file mode 100644 index 47efa7c4..00000000 --- a/test/mremap.c +++ /dev/null @@ -1,60 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -int -main(void) -{ - int ret, err; - size_t sz, lg_chunk, chunksize, i; - char *p, *q; - - malloc_printf("Test begin\n"); - - sz = sizeof(lg_chunk); - if ((err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0))) { - assert(err != ENOENT); - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - ret = 1; - goto label_return; - } - chunksize = ((size_t)1U) << lg_chunk; - - p = (char *)malloc(chunksize); - if (p == NULL) { - malloc_printf("malloc(%zu) --> %p\n", chunksize, p); - ret = 1; - goto label_return; - } - memset(p, 'a', chunksize); - - q = (char *)realloc(p, chunksize * 2); - if (q == NULL) { - malloc_printf("realloc(%p, %zu) --> %p\n", p, chunksize * 2, - q); - ret = 1; - goto label_return; - } - for (i = 0; i < chunksize; i++) { - assert(q[i] == 'a'); - } - - p = q; - - q = (char *)realloc(p, chunksize); - if (q == NULL) { - malloc_printf("realloc(%p, %zu) --> %p\n", p, chunksize, q); - ret = 1; - goto label_return; - } - for (i = 0; i < chunksize; i++) { - assert(q[i] == 'a'); - } - - free(q); - - ret = 0; -label_return: - malloc_printf("Test end\n"); - return (ret); -} diff --git a/test/mremap.exp b/test/mremap.exp deleted file mode 100644 index 369a88dd..00000000 --- a/test/mremap.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/test/posix_memalign.exp b/test/posix_memalign.exp deleted file mode 100644 index b5061c72..00000000 --- a/test/posix_memalign.exp +++ /dev/null @@ -1,25 +0,0 @@ -Test begin -Alignment: 8 -Alignment: 16 -Alignment: 32 -Alignment: 64 -Alignment: 128 -Alignment: 256 -Alignment: 512 -Alignment: 1024 -Alignment: 2048 -Alignment: 4096 -Alignment: 8192 -Alignment: 16384 -Alignment: 32768 -Alignment: 65536 -Alignment: 131072 -Alignment: 262144 -Alignment: 524288 -Alignment: 1048576 -Alignment: 2097152 -Alignment: 4194304 -Alignment: 8388608 -Alignment: 16777216 -Alignment: 33554432 -Test end diff --git a/test/rallocm.c b/test/rallocm.c deleted file mode 100644 index c5dedf48..00000000 --- a/test/rallocm.c +++ /dev/null @@ -1,127 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -int -main(void) -{ - size_t pagesize; - void *p, *q; - size_t sz, tsz; - int r; - - malloc_printf("Test begin\n"); - - /* Get page size. */ - { -#ifdef _WIN32 - SYSTEM_INFO si; - GetSystemInfo(&si); - pagesize = (size_t)si.dwPageSize; -#else - long result = sysconf(_SC_PAGESIZE); - assert(result != -1); - pagesize = (size_t)result; -#endif - } - - r = allocm(&p, &sz, 42, 0); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected allocm() error\n"); - abort(); - } - - q = p; - r = rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q != p) - malloc_printf("Unexpected object move\n"); - if (tsz != sz) { - malloc_printf("Unexpected size change: %zu --> %zu\n", - sz, tsz); - } - - q = p; - r = rallocm(&q, &tsz, sz, 5, ALLOCM_NO_MOVE); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q != p) - malloc_printf("Unexpected object move\n"); - if (tsz != sz) { - malloc_printf("Unexpected size change: %zu --> %zu\n", - sz, tsz); - } - - q = p; - r = rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE); - if (r != ALLOCM_ERR_NOT_MOVED) - malloc_printf("Unexpected rallocm() result\n"); - if (q != p) - malloc_printf("Unexpected object move\n"); - if (tsz != sz) { - malloc_printf("Unexpected size change: %zu --> %zu\n", - sz, tsz); - } - - q = p; - r = rallocm(&q, &tsz, sz + 5, 0, 0); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q == p) - malloc_printf("Expected object move\n"); - if (tsz == sz) { - malloc_printf("Expected size change: %zu --> %zu\n", - sz, tsz); - } - p = q; - sz = tsz; - - r = rallocm(&q, &tsz, pagesize*2, 0, 0); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q == p) - malloc_printf("Expected object move\n"); - if (tsz == sz) { - malloc_printf("Expected size change: %zu --> %zu\n", - sz, tsz); - } - p = q; - sz = tsz; - - r = rallocm(&q, &tsz, pagesize*4, 0, 0); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (tsz == sz) { - malloc_printf("Expected size change: %zu --> %zu\n", - sz, tsz); - } - p = q; - sz = tsz; - - r = rallocm(&q, &tsz, pagesize*2, 0, ALLOCM_NO_MOVE); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q != p) - malloc_printf("Unexpected object move\n"); - if (tsz == sz) { - malloc_printf("Expected size change: %zu --> %zu\n", - sz, tsz); - } - sz = tsz; - - r = rallocm(&q, &tsz, pagesize*4, 0, ALLOCM_NO_MOVE); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q != p) - malloc_printf("Unexpected object move\n"); - if (tsz == sz) { - malloc_printf("Expected size change: %zu --> %zu\n", - sz, tsz); - } - sz = tsz; - - dallocm(p, 0); - - malloc_printf("Test end\n"); - return (0); -} diff --git a/test/rallocm.exp b/test/rallocm.exp deleted file mode 100644 index 369a88dd..00000000 --- a/test/rallocm.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/test/src/SFMT.c b/test/src/SFMT.c new file mode 100644 index 00000000..433d7f6e --- /dev/null +++ b/test/src/SFMT.c @@ -0,0 +1,716 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT.c + * @brief SIMD oriented Fast Mersenne Twister(SFMT) + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software, see LICENSE.txt + */ +#define SFMT_C_ +#include "test/jemalloc_test.h" +#include "test/SFMT-params.h" + +#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(ONLY64) && !defined(BIG_ENDIAN64) + #if defined(__GNUC__) + #error "-DONLY64 must be specified with -DBIG_ENDIAN64" + #endif +#undef ONLY64 +#endif +/*------------------------------------------------------ + 128-bit SIMD data type for Altivec, SSE2 or standard C + ------------------------------------------------------*/ +#if defined(HAVE_ALTIVEC) +/** 128-bit data structure */ +union W128_T { + vector unsigned int s; + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef union W128_T w128_t; + +#elif defined(HAVE_SSE2) +/** 128-bit data structure */ +union W128_T { + __m128i si; + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef union W128_T w128_t; + +#else + +/** 128-bit data structure */ +struct W128_T { + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef struct W128_T w128_t; + +#endif + +struct sfmt_s { + /** the 128-bit internal state array */ + w128_t sfmt[N]; + /** index counter to the 32-bit internal state array */ + int idx; + /** a flag: it is 0 if and only if the internal state is not yet + * initialized. */ + int initialized; +}; + +/*-------------------------------------- + FILE GLOBAL VARIABLES + internal state, index counter and flag + --------------------------------------*/ + +/** a parity check vector which certificate the period of 2^{MEXP} */ +static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; + +/*---------------- + STATIC FUNCTIONS + ----------------*/ +JEMALLOC_INLINE_C int idxof(int i); +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift); +JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift); +#endif +JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx); +JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); +JEMALLOC_INLINE_C uint32_t func1(uint32_t x); +JEMALLOC_INLINE_C uint32_t func2(uint32_t x); +static void period_certification(sfmt_t *ctx); +#if defined(BIG_ENDIAN64) && !defined(ONLY64) +JEMALLOC_INLINE_C void swap(w128_t *array, int size); +#endif + +#if defined(HAVE_ALTIVEC) + #include "test/SFMT-alti.h" +#elif defined(HAVE_SSE2) + #include "test/SFMT-sse2.h" +#endif + +/** + * This function simulate a 64-bit index of LITTLE ENDIAN + * in BIG ENDIAN machine. + */ +#ifdef ONLY64 +JEMALLOC_INLINE_C int idxof(int i) { + return i ^ 1; +} +#else +JEMALLOC_INLINE_C int idxof(int i) { + return i; +} +#endif +/** + * This function simulates SIMD 128-bit right shift by the standard C. + * The 128-bit integer given in in is shifted by (shift * 8) bits. + * This function simulates the LITTLE ENDIAN SIMD. + * @param out the output of this function + * @param in the 128-bit data to be shifted + * @param shift the shift value + */ +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +#ifdef ONLY64 +JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); + tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); + + oh = th >> (shift * 8); + ol = tl >> (shift * 8); + ol |= th << (64 - shift * 8); + out->u[0] = (uint32_t)(ol >> 32); + out->u[1] = (uint32_t)ol; + out->u[2] = (uint32_t)(oh >> 32); + out->u[3] = (uint32_t)oh; +} +#else +JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); + tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); + + oh = th >> (shift * 8); + ol = tl >> (shift * 8); + ol |= th << (64 - shift * 8); + out->u[1] = (uint32_t)(ol >> 32); + out->u[0] = (uint32_t)ol; + out->u[3] = (uint32_t)(oh >> 32); + out->u[2] = (uint32_t)oh; +} +#endif +/** + * This function simulates SIMD 128-bit left shift by the standard C. + * The 128-bit integer given in in is shifted by (shift * 8) bits. + * This function simulates the LITTLE ENDIAN SIMD. + * @param out the output of this function + * @param in the 128-bit data to be shifted + * @param shift the shift value + */ +#ifdef ONLY64 +JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); + tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); + + oh = th << (shift * 8); + ol = tl << (shift * 8); + oh |= tl >> (64 - shift * 8); + out->u[0] = (uint32_t)(ol >> 32); + out->u[1] = (uint32_t)ol; + out->u[2] = (uint32_t)(oh >> 32); + out->u[3] = (uint32_t)oh; +} +#else +JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); + tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); + + oh = th << (shift * 8); + ol = tl << (shift * 8); + oh |= tl >> (64 - shift * 8); + out->u[1] = (uint32_t)(ol >> 32); + out->u[0] = (uint32_t)ol; + out->u[3] = (uint32_t)(oh >> 32); + out->u[2] = (uint32_t)oh; +} +#endif +#endif + +/** + * This function represents the recursion formula. + * @param r output + * @param a a 128-bit part of the internal state array + * @param b a 128-bit part of the internal state array + * @param c a 128-bit part of the internal state array + * @param d a 128-bit part of the internal state array + */ +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +#ifdef ONLY64 +JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, + w128_t *d) { + w128_t x; + w128_t y; + + lshift128(&x, a, SL2); + rshift128(&y, c, SR2); + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] + ^ (d->u[0] << SL1); + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] + ^ (d->u[1] << SL1); + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] + ^ (d->u[2] << SL1); + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] + ^ (d->u[3] << SL1); +} +#else +JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, + w128_t *d) { + w128_t x; + w128_t y; + + lshift128(&x, a, SL2); + rshift128(&y, c, SR2); + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] + ^ (d->u[0] << SL1); + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] + ^ (d->u[1] << SL1); + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] + ^ (d->u[2] << SL1); + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] + ^ (d->u[3] << SL1); +} +#endif +#endif + +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) { + int i; + w128_t *r1, *r2; + + r1 = &ctx->sfmt[N - 2]; + r2 = &ctx->sfmt[N - 1]; + for (i = 0; i < N - POS1; i++) { + do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, + r2); + r1 = r2; + r2 = &ctx->sfmt[i]; + } + for (; i < N; i++) { + do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1, + r2); + r1 = r2; + r2 = &ctx->sfmt[i]; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pseudorandom numbers to be generated. + */ +JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + w128_t *r1, *r2; + + r1 = &ctx->sfmt[N - 2]; + r2 = &ctx->sfmt[N - 1]; + for (i = 0; i < N - POS1; i++) { + do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (; i < N; i++) { + do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (; i < size - N; i++) { + do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (j = 0; j < 2 * N - size; j++) { + ctx->sfmt[j] = array[j + size - N]; + } + for (; i < size; i++, j++) { + do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + ctx->sfmt[j] = array[i]; + } +} +#endif + +#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) +JEMALLOC_INLINE_C void swap(w128_t *array, int size) { + int i; + uint32_t x, y; + + for (i = 0; i < size; i++) { + x = array[i].u[0]; + y = array[i].u[2]; + array[i].u[0] = array[i].u[1]; + array[i].u[2] = array[i].u[3]; + array[i].u[1] = x; + array[i].u[3] = y; + } +} +#endif +/** + * This function represents a function used in the initialization + * by init_by_array + * @param x 32-bit integer + * @return 32-bit integer + */ +static uint32_t func1(uint32_t x) { + return (x ^ (x >> 27)) * (uint32_t)1664525UL; +} + +/** + * This function represents a function used in the initialization + * by init_by_array + * @param x 32-bit integer + * @return 32-bit integer + */ +static uint32_t func2(uint32_t x) { + return (x ^ (x >> 27)) * (uint32_t)1566083941UL; +} + +/** + * This function certificate the period of 2^{MEXP} + */ +static void period_certification(sfmt_t *ctx) { + int inner = 0; + int i, j; + uint32_t work; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; + + for (i = 0; i < 4; i++) + inner ^= psfmt32[idxof(i)] & parity[i]; + for (i = 16; i > 0; i >>= 1) + inner ^= inner >> i; + inner &= 1; + /* check OK */ + if (inner == 1) { + return; + } + /* check NG, and modification */ + for (i = 0; i < 4; i++) { + work = 1; + for (j = 0; j < 32; j++) { + if ((work & parity[i]) != 0) { + psfmt32[idxof(i)] ^= work; + return; + } + work = work << 1; + } + } +} + +/*---------------- + PUBLIC FUNCTIONS + ----------------*/ +/** + * This function returns the identification string. + * The string shows the word size, the Mersenne exponent, + * and all parameters of this generator. + */ +const char *get_idstring(void) { + return IDSTR; +} + +/** + * This function returns the minimum size of array used for \b + * fill_array32() function. + * @return minimum size of array used for fill_array32() function. + */ +int get_min_array_size32(void) { + return N32; +} + +/** + * This function returns the minimum size of array used for \b + * fill_array64() function. + * @return minimum size of array used for fill_array64() function. + */ +int get_min_array_size64(void) { + return N64; +} + +#ifndef ONLY64 +/** + * This function generates and returns 32-bit pseudorandom number. + * init_gen_rand or init_by_array must be called before this function. + * @return 32-bit pseudorandom number + */ +uint32_t gen_rand32(sfmt_t *ctx) { + uint32_t r; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; + + assert(ctx->initialized); + if (ctx->idx >= N32) { + gen_rand_all(ctx); + ctx->idx = 0; + } + r = psfmt32[ctx->idx++]; + return r; +} + +/* Generate a random integer in [0..limit). */ +uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { + uint32_t ret, above; + + above = 0xffffffffU - (0xffffffffU % limit); + while (1) { + ret = gen_rand32(ctx); + if (ret < above) { + ret %= limit; + break; + } + } + return ret; +} +#endif +/** + * This function generates and returns 64-bit pseudorandom number. + * init_gen_rand or init_by_array must be called before this function. + * The function gen_rand64 should not be called after gen_rand32, + * unless an initialization is again executed. + * @return 64-bit pseudorandom number + */ +uint64_t gen_rand64(sfmt_t *ctx) { +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + uint32_t r1, r2; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; +#else + uint64_t r; + uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; +#endif + + assert(ctx->initialized); + assert(ctx->idx % 2 == 0); + + if (ctx->idx >= N32) { + gen_rand_all(ctx); + ctx->idx = 0; + } +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + r1 = psfmt32[ctx->idx]; + r2 = psfmt32[ctx->idx + 1]; + ctx->idx += 2; + return ((uint64_t)r2 << 32) | r1; +#else + r = psfmt64[ctx->idx / 2]; + ctx->idx += 2; + return r; +#endif +} + +/* Generate a random integer in [0..limit). */ +uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { + uint64_t ret, above; + + above = 0xffffffffffffffffLLU - (0xffffffffffffffffLLU % limit); + while (1) { + ret = gen_rand64(ctx); + if (ret < above) { + ret %= limit; + break; + } + } + return ret; +} + +#ifndef ONLY64 +/** + * This function generates pseudorandom 32-bit integers in the + * specified array[] by one call. The number of pseudorandom integers + * is specified by the argument size, which must be at least 624 and a + * multiple of four. The generation by this function is much faster + * than the following gen_rand function. + * + * For initialization, init_gen_rand or init_by_array must be called + * before the first call of this function. This function can not be + * used after calling gen_rand function, without initialization. + * + * @param array an array where pseudorandom 32-bit integers are filled + * by this function. The pointer to the array must be \b "aligned" + * (namely, must be a multiple of 16) in the SIMD version, since it + * refers to the address of a 128-bit integer. In the standard C + * version, the pointer is arbitrary. + * + * @param size the number of 32-bit pseudorandom integers to be + * generated. size must be a multiple of 4, and greater than or equal + * to (MEXP / 128 + 1) * 4. + * + * @note \b memalign or \b posix_memalign is available to get aligned + * memory. Mac OSX doesn't have these functions, but \b malloc of OSX + * returns the pointer to the aligned memory block. + */ +void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { + assert(ctx->initialized); + assert(ctx->idx == N32); + assert(size % 4 == 0); + assert(size >= N32); + + gen_rand_array(ctx, (w128_t *)array, size / 4); + ctx->idx = N32; +} +#endif + +/** + * This function generates pseudorandom 64-bit integers in the + * specified array[] by one call. The number of pseudorandom integers + * is specified by the argument size, which must be at least 312 and a + * multiple of two. The generation by this function is much faster + * than the following gen_rand function. + * + * For initialization, init_gen_rand or init_by_array must be called + * before the first call of this function. This function can not be + * used after calling gen_rand function, without initialization. + * + * @param array an array where pseudorandom 64-bit integers are filled + * by this function. The pointer to the array must be "aligned" + * (namely, must be a multiple of 16) in the SIMD version, since it + * refers to the address of a 128-bit integer. In the standard C + * version, the pointer is arbitrary. + * + * @param size the number of 64-bit pseudorandom integers to be + * generated. size must be a multiple of 2, and greater than or equal + * to (MEXP / 128 + 1) * 2 + * + * @note \b memalign or \b posix_memalign is available to get aligned + * memory. Mac OSX doesn't have these functions, but \b malloc of OSX + * returns the pointer to the aligned memory block. + */ +void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { + assert(ctx->initialized); + assert(ctx->idx == N32); + assert(size % 2 == 0); + assert(size >= N64); + + gen_rand_array(ctx, (w128_t *)array, size / 2); + ctx->idx = N32; + +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + swap((w128_t *)array, size /2); +#endif +} + +/** + * This function initializes the internal state array with a 32-bit + * integer seed. + * + * @param seed a 32-bit integer used as the seed. + */ +sfmt_t *init_gen_rand(uint32_t seed) { + void *p; + sfmt_t *ctx; + int i; + uint32_t *psfmt32; + + if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { + return NULL; + } + ctx = (sfmt_t *)p; + psfmt32 = &ctx->sfmt[0].u[0]; + + psfmt32[idxof(0)] = seed; + for (i = 1; i < N32; i++) { + psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] + ^ (psfmt32[idxof(i - 1)] >> 30)) + + i; + } + ctx->idx = N32; + period_certification(ctx); + ctx->initialized = 1; + + return ctx; +} + +/** + * This function initializes the internal state array, + * with an array of 32-bit integers used as the seeds + * @param init_key the array of 32-bit integers, used as a seed. + * @param key_length the length of init_key. + */ +sfmt_t *init_by_array(uint32_t *init_key, int key_length) { + void *p; + sfmt_t *ctx; + int i, j, count; + uint32_t r; + int lag; + int mid; + int size = N * 4; + uint32_t *psfmt32; + + if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { + return NULL; + } + ctx = (sfmt_t *)p; + psfmt32 = &ctx->sfmt[0].u[0]; + + if (size >= 623) { + lag = 11; + } else if (size >= 68) { + lag = 7; + } else if (size >= 39) { + lag = 5; + } else { + lag = 3; + } + mid = (size - lag) / 2; + + memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); + if (key_length + 1 > N32) { + count = key_length + 1; + } else { + count = N32; + } + r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] + ^ psfmt32[idxof(N32 - 1)]); + psfmt32[idxof(mid)] += r; + r += key_length; + psfmt32[idxof(mid + lag)] += r; + psfmt32[idxof(0)] = r; + + count--; + for (i = 1, j = 0; (j < count) && (j < key_length); j++) { + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + ^ psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] += r; + r += init_key[j] + i; + psfmt32[idxof((i + mid + lag) % N32)] += r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + for (; j < count; j++) { + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + ^ psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] += r; + r += i; + psfmt32[idxof((i + mid + lag) % N32)] += r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + for (j = 0; j < N32; j++) { + r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + + psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] ^= r; + r -= i; + psfmt32[idxof((i + mid + lag) % N32)] ^= r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + + ctx->idx = N32; + period_certification(ctx); + ctx->initialized = 1; + + return ctx; +} + +void fini_gen_rand(sfmt_t *ctx) { + assert(ctx != NULL); + + ctx->initialized = 0; + free(ctx); +} diff --git a/test/src/math.c b/test/src/math.c new file mode 100644 index 00000000..887a3639 --- /dev/null +++ b/test/src/math.c @@ -0,0 +1,2 @@ +#define MATH_C_ +#include "test/jemalloc_test.h" diff --git a/test/src/mtx.c b/test/src/mtx.c new file mode 100644 index 00000000..41b95d59 --- /dev/null +++ b/test/src/mtx.c @@ -0,0 +1,62 @@ +#include "test/jemalloc_test.h" + +bool +mtx_init(mtx_t *mtx) +{ + +#ifdef _WIN32 + if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) + return (true); +#elif (defined(JEMALLOC_OSSPIN)) + mtx->lock = 0; +#else + pthread_mutexattr_t attr; + + if (pthread_mutexattr_init(&attr) != 0) + return (true); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); + if (pthread_mutex_init(&mtx->lock, &attr) != 0) { + pthread_mutexattr_destroy(&attr); + return (true); + } + pthread_mutexattr_destroy(&attr); +#endif + return (false); +} + +void +mtx_fini(mtx_t *mtx) +{ + +#ifdef _WIN32 +#elif (defined(JEMALLOC_OSSPIN)) +#else + pthread_mutex_destroy(&mtx->lock); +#endif +} + +void +mtx_lock(mtx_t *mtx) +{ + +#ifdef _WIN32 + EnterCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockLock(&mtx->lock); +#else + pthread_mutex_lock(&mtx->lock); +#endif +} + +void +mtx_unlock(mtx_t *mtx) +{ + +#ifdef _WIN32 + LeaveCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockUnlock(&mtx->lock); +#else + pthread_mutex_unlock(&mtx->lock); +#endif +} diff --git a/test/src/test.c b/test/src/test.c new file mode 100644 index 00000000..6552e377 --- /dev/null +++ b/test/src/test.c @@ -0,0 +1,100 @@ +#include "test/jemalloc_test.h" + +static unsigned test_count = 0; +static test_status_t test_counts[test_status_count] = {0, 0, 0}; +static test_status_t test_status = test_status_pass; +static const char * test_name = ""; + +JEMALLOC_ATTR(format(printf, 1, 2)) +void +test_skip(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_skip; +} + +JEMALLOC_ATTR(format(printf, 1, 2)) +void +test_fail(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_fail; +} + +static const char * +test_status_string(test_status_t test_status) +{ + + switch (test_status) { + case test_status_pass: return "pass"; + case test_status_skip: return "skip"; + case test_status_fail: return "fail"; + default: not_reached(); + } +} + +void +p_test_init(const char *name) +{ + + test_count++; + test_status = test_status_pass; + test_name = name; +} + +void +p_test_fini(void) +{ + + test_counts[test_status]++; + malloc_printf("%s: %s\n", test_name, test_status_string(test_status)); +} + +test_status_t +p_test(test_t* t, ...) +{ + test_status_t ret = test_status_pass; + va_list ap; + + va_start(ap, t); + for (; t != NULL; t = va_arg(ap, test_t*)) { + t(); + if (test_status > ret) + ret = test_status; + } + va_end(ap); + + malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", + test_status_string(test_status_pass), + test_counts[test_status_pass], test_count, + test_status_string(test_status_skip), + test_counts[test_status_skip], test_count, + test_status_string(test_status_fail), + test_counts[test_status_fail], test_count); + + return (ret); +} + +void +p_test_fail(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + format = va_arg(ap, const char *); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_fail; +} diff --git a/test/src/thd.c b/test/src/thd.c new file mode 100644 index 00000000..233242a1 --- /dev/null +++ b/test/src/thd.c @@ -0,0 +1,35 @@ +#include "test/jemalloc_test.h" + +#ifdef _WIN32 +void +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) +{ + LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; + *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); + if (*thd == NULL) + test_fail("Error in CreateThread()\n"); +} + +void +thd_join(thd_t thd, void **ret) +{ + + WaitForSingleObject(thd, INFINITE); +} + +#else +void +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) +{ + + if (pthread_create(thd, NULL, proc, arg) != 0) + test_fail("Error in pthread_create()\n"); +} + +void +thd_join(thd_t thd, void **ret) +{ + + pthread_join(thd, ret); +} +#endif diff --git a/test/test.sh.in b/test/test.sh.in new file mode 100644 index 00000000..a39f99f6 --- /dev/null +++ b/test/test.sh.in @@ -0,0 +1,53 @@ +#!/bin/sh + +case @abi@ in + macho) + export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" + ;; + pecoff) + export PATH="${PATH}:@objroot@lib" + ;; + *) + ;; +esac + +# Corresponds to test_status_t. +pass_code=0 +skip_code=1 +fail_code=2 + +pass_count=0 +skip_count=0 +fail_count=0 +for t in $@; do + if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then + echo + fi + echo "=== ${t} ===" + ${t}@exe@ @abs_srcroot@ @abs_objroot@ + result_code=$? + case ${result_code} in + ${pass_code}) + pass_count=$((pass_count+1)) + ;; + ${skip_code}) + skip_count=$((skip_count+1)) + ;; + ${fail_code}) + fail_count=$((fail_count+1)) + ;; + *) + echo "Test harness error" 1>&2 + exit 1 + esac +done + +total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` +echo +echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" + +if [ ${fail_count} -eq 0 ] ; then + exit 0 +else + exit 1 +fi diff --git a/test/thread_arena.c b/test/thread_arena.c deleted file mode 100644 index 6b9bc9cf..00000000 --- a/test/thread_arena.c +++ /dev/null @@ -1,85 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -#define NTHREADS 10 - -void * -je_thread_start(void *arg) -{ - unsigned main_arena_ind = *(unsigned *)arg; - void *p; - unsigned arena_ind; - size_t size; - int err; - - p = malloc(1); - if (p == NULL) { - malloc_printf("%s(): Error in malloc()\n", __func__); - return (void *)1; - } - free(p); - - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, - sizeof(main_arena_ind)))) { - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - return (void *)1; - } - - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, - 0))) { - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - return (void *)1; - } - assert(arena_ind == main_arena_ind); - - return (NULL); -} - -int -main(void) -{ - int ret = 0; - void *p; - unsigned arena_ind; - size_t size; - int err; - je_thread_t threads[NTHREADS]; - unsigned i; - - malloc_printf("Test begin\n"); - - p = malloc(1); - if (p == NULL) { - malloc_printf("%s(): Error in malloc()\n", __func__); - ret = 1; - goto label_return; - } - - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - ret = 1; - goto label_return; - } - - for (i = 0; i < NTHREADS; i++) { - je_thread_create(&threads[i], je_thread_start, - (void *)&arena_ind); - } - - for (i = 0; i < NTHREADS; i++) { - intptr_t join_ret; - je_thread_join(threads[i], (void *)&join_ret); - if (join_ret != 0) - ret = 1; - } - -label_return: - malloc_printf("Test end\n"); - return (ret); -} diff --git a/test/thread_arena.exp b/test/thread_arena.exp deleted file mode 100644 index 369a88dd..00000000 --- a/test/thread_arena.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/test/thread_tcache_enabled.c b/test/thread_tcache_enabled.c deleted file mode 100644 index 586b5330..00000000 --- a/test/thread_tcache_enabled.c +++ /dev/null @@ -1,91 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -void * -je_thread_start(void *arg) -{ - int err; - size_t sz; - bool e0, e1; - - sz = sizeof(bool); - if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) { - if (err == ENOENT) { -#ifdef JEMALLOC_TCACHE - assert(false); -#endif - } - goto label_return; - } - - if (e0) { - e1 = false; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) - == 0); - assert(e0); - } - - e1 = true; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0 == false); - - e1 = true; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0); - - e1 = false; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0); - - e1 = false; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0 == false); - - free(malloc(1)); - e1 = true; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0 == false); - - free(malloc(1)); - e1 = true; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0); - - free(malloc(1)); - e1 = false; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0); - - free(malloc(1)); - e1 = false; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0 == false); - - free(malloc(1)); -label_return: - return (NULL); -} - -int -main(void) -{ - int ret = 0; - je_thread_t thread; - - malloc_printf("Test begin\n"); - - je_thread_start(NULL); - - je_thread_create(&thread, je_thread_start, NULL); - je_thread_join(thread, NULL); - - je_thread_start(NULL); - - je_thread_create(&thread, je_thread_start, NULL); - je_thread_join(thread, NULL); - - je_thread_start(NULL); - - malloc_printf("Test end\n"); - return (ret); -} diff --git a/test/thread_tcache_enabled.exp b/test/thread_tcache_enabled.exp deleted file mode 100644 index 369a88dd..00000000 --- a/test/thread_tcache_enabled.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/test/unit/SFMT.c b/test/unit/SFMT.c new file mode 100644 index 00000000..4805f8e4 --- /dev/null +++ b/test/unit/SFMT.c @@ -0,0 +1,1605 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "test/jemalloc_test.h" + +#define BLOCK_SIZE 10000 +#define BLOCK_SIZE64 (BLOCK_SIZE / 2) +#define COUNT_1 1000 +#define COUNT_2 700 + +static const uint32_t init_gen_rand_32_expected[] = { + 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, + 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U, + 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U, + 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U, + 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U, + 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U, + 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U, + 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U, + 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U, + 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U, + 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U, + 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U, + 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U, + 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U, + 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U, + 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U, + 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U, + 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U, + 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U, + 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U, + 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U, + 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U, + 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U, + 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U, + 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U, + 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U, + 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U, + 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U, + 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U, + 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U, + 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U, + 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U, + 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U, + 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U, + 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U, + 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U, + 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U, + 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U, + 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U, + 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U, + 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U, + 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U, + 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U, + 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U, + 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U, + 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U, + 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U, + 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U, + 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U, + 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U, + 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U, + 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U, + 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U, + 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U, + 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U, + 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U, + 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U, + 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U, + 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U, + 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U, + 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U, + 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U, + 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U, + 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U, + 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U, + 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U, + 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U, + 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U, + 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U, + 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U, + 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U, + 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U, + 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U, + 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U, + 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U, + 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U, + 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U, + 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U, + 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U, + 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U, + 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U, + 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U, + 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U, + 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U, + 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U, + 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U, + 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U, + 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U, + 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U, + 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U, + 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U, + 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U, + 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U, + 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U, + 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U, + 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U, + 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U, + 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U, + 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U, + 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U, + 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U, + 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U, + 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U, + 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U, + 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U, + 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U, + 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U, + 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U, + 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U, + 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U, + 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U, + 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U, + 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U, + 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U, + 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U, + 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U, + 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U, + 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U, + 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U, + 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U, + 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U, + 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U, + 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U, + 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U, + 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U, + 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U, + 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U, + 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U, + 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U, + 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U, + 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U, + 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U, + 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U, + 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U, + 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U, + 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U, + 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U, + 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U, + 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U, + 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U, + 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U, + 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U, + 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U, + 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U, + 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U, + 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U, + 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U, + 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U, + 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U, + 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U, + 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U, + 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U, + 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U, + 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U, + 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U, + 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U, + 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U, + 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U, + 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U, + 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U, + 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U, + 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U, + 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U, + 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U, + 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U, + 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U, + 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U, + 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U, + 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U, + 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U, + 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U, + 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U, + 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U, + 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U, + 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U, + 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U, + 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U, + 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U, + 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U, + 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U, + 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U, + 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U, + 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U, + 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U, + 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U, + 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U, + 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U, + 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U, + 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U, + 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U, + 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U, + 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U, + 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U, + 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U, + 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U, + 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U, + 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U, + 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U, + 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U, + 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U +}; +static const uint32_t init_by_array_32_expected[] = { + 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U, + 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U, + 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U, + 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U, + 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U, + 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U, + 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U, + 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U, + 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U, + 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U, + 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U, + 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U, + 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U, + 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U, + 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U, + 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U, + 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U, + 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U, + 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U, + 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U, + 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U, + 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U, + 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U, + 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U, + 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U, + 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U, + 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U, + 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U, + 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U, + 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U, + 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U, + 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U, + 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U, + 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U, + 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U, + 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U, + 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U, + 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U, + 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U, + 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U, + 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U, + 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U, + 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U, + 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U, + 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U, + 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U, + 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U, + 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U, + 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U, + 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U, + 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U, + 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U, + 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U, + 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U, + 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U, + 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U, + 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U, + 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U, + 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U, + 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U, + 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U, + 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U, + 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U, + 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U, + 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U, + 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U, + 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U, + 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U, + 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U, + 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U, + 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U, + 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U, + 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U, + 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U, + 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U, + 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U, + 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U, + 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U, + 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U, + 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U, + 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U, + 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U, + 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U, + 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U, + 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U, + 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U, + 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U, + 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U, + 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U, + 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U, + 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U, + 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U, + 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U, + 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U, + 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U, + 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U, + 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U, + 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U, + 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U, + 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U, + 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U, + 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U, + 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U, + 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U, + 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U, + 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U, + 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U, + 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U, + 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U, + 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U, + 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U, + 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U, + 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U, + 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U, + 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U, + 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U, + 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U, + 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U, + 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U, + 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U, + 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U, + 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U, + 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U, + 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U, + 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U, + 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U, + 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U, + 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U, + 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U, + 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U, + 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U, + 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U, + 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U, + 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U, + 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U, + 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U, + 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U, + 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U, + 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U, + 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U, + 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U, + 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U, + 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U, + 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U, + 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U, + 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U, + 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U, + 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U, + 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U, + 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U, + 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U, + 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U, + 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U, + 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U, + 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U, + 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U, + 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U, + 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U, + 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U, + 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U, + 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U, + 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U, + 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U, + 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U, + 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U, + 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U, + 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U, + 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U, + 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U, + 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U, + 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U, + 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U, + 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U, + 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U, + 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U, + 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U, + 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U, + 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U, + 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U, + 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U, + 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U, + 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U, + 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U, + 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U, + 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U, + 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U, + 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U, + 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U, + 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U, + 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U, + 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U, + 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U, + 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U, + 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U, + 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U, + 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U, + 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U, + 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U, + 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U, + 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U +}; +static const uint64_t init_gen_rand_64_expected[] = { + QU(16924766246869039260LLU), QU( 8201438687333352714LLU), + QU( 2265290287015001750LLU), QU(18397264611805473832LLU), + QU( 3375255223302384358LLU), QU( 6345559975416828796LLU), + QU(18229739242790328073LLU), QU( 7596792742098800905LLU), + QU( 255338647169685981LLU), QU( 2052747240048610300LLU), + QU(18328151576097299343LLU), QU(12472905421133796567LLU), + QU(11315245349717600863LLU), QU(16594110197775871209LLU), + QU(15708751964632456450LLU), QU(10452031272054632535LLU), + QU(11097646720811454386LLU), QU( 4556090668445745441LLU), + QU(17116187693090663106LLU), QU(14931526836144510645LLU), + QU( 9190752218020552591LLU), QU( 9625800285771901401LLU), + QU(13995141077659972832LLU), QU( 5194209094927829625LLU), + QU( 4156788379151063303LLU), QU( 8523452593770139494LLU), + QU(14082382103049296727LLU), QU( 2462601863986088483LLU), + QU( 3030583461592840678LLU), QU( 5221622077872827681LLU), + QU( 3084210671228981236LLU), QU(13956758381389953823LLU), + QU(13503889856213423831LLU), QU(15696904024189836170LLU), + QU( 4612584152877036206LLU), QU( 6231135538447867881LLU), + QU(10172457294158869468LLU), QU( 6452258628466708150LLU), + QU(14044432824917330221LLU), QU( 370168364480044279LLU), + QU(10102144686427193359LLU), QU( 667870489994776076LLU), + QU( 2732271956925885858LLU), QU(18027788905977284151LLU), + QU(15009842788582923859LLU), QU( 7136357960180199542LLU), + QU(15901736243475578127LLU), QU(16951293785352615701LLU), + QU(10551492125243691632LLU), QU(17668869969146434804LLU), + QU(13646002971174390445LLU), QU( 9804471050759613248LLU), + QU( 5511670439655935493LLU), QU(18103342091070400926LLU), + QU(17224512747665137533LLU), QU(15534627482992618168LLU), + QU( 1423813266186582647LLU), QU(15821176807932930024LLU), + QU( 30323369733607156LLU), QU(11599382494723479403LLU), + QU( 653856076586810062LLU), QU( 3176437395144899659LLU), + QU(14028076268147963917LLU), QU(16156398271809666195LLU), + QU( 3166955484848201676LLU), QU( 5746805620136919390LLU), + QU(17297845208891256593LLU), QU(11691653183226428483LLU), + QU(17900026146506981577LLU), QU(15387382115755971042LLU), + QU(16923567681040845943LLU), QU( 8039057517199388606LLU), + QU(11748409241468629263LLU), QU( 794358245539076095LLU), + QU(13438501964693401242LLU), QU(14036803236515618962LLU), + QU( 5252311215205424721LLU), QU(17806589612915509081LLU), + QU( 6802767092397596006LLU), QU(14212120431184557140LLU), + QU( 1072951366761385712LLU), QU(13098491780722836296LLU), + QU( 9466676828710797353LLU), QU(12673056849042830081LLU), + QU(12763726623645357580LLU), QU(16468961652999309493LLU), + QU(15305979875636438926LLU), QU(17444713151223449734LLU), + QU( 5692214267627883674LLU), QU(13049589139196151505LLU), + QU( 880115207831670745LLU), QU( 1776529075789695498LLU), + QU(16695225897801466485LLU), QU(10666901778795346845LLU), + QU( 6164389346722833869LLU), QU( 2863817793264300475LLU), + QU( 9464049921886304754LLU), QU( 3993566636740015468LLU), + QU( 9983749692528514136LLU), QU(16375286075057755211LLU), + QU(16042643417005440820LLU), QU(11445419662923489877LLU), + QU( 7999038846885158836LLU), QU( 6721913661721511535LLU), + QU( 5363052654139357320LLU), QU( 1817788761173584205LLU), + QU(13290974386445856444LLU), QU( 4650350818937984680LLU), + QU( 8219183528102484836LLU), QU( 1569862923500819899LLU), + QU( 4189359732136641860LLU), QU(14202822961683148583LLU), + QU( 4457498315309429058LLU), QU(13089067387019074834LLU), + QU(11075517153328927293LLU), QU(10277016248336668389LLU), + QU( 7070509725324401122LLU), QU(17808892017780289380LLU), + QU(13143367339909287349LLU), QU( 1377743745360085151LLU), + QU( 5749341807421286485LLU), QU(14832814616770931325LLU), + QU( 7688820635324359492LLU), QU(10960474011539770045LLU), + QU( 81970066653179790LLU), QU(12619476072607878022LLU), + QU( 4419566616271201744LLU), QU(15147917311750568503LLU), + QU( 5549739182852706345LLU), QU( 7308198397975204770LLU), + QU(13580425496671289278LLU), QU(17070764785210130301LLU), + QU( 8202832846285604405LLU), QU( 6873046287640887249LLU), + QU( 6927424434308206114LLU), QU( 6139014645937224874LLU), + QU(10290373645978487639LLU), QU(15904261291701523804LLU), + QU( 9628743442057826883LLU), QU(18383429096255546714LLU), + QU( 4977413265753686967LLU), QU( 7714317492425012869LLU), + QU( 9025232586309926193LLU), QU(14627338359776709107LLU), + QU(14759849896467790763LLU), QU(10931129435864423252LLU), + QU( 4588456988775014359LLU), QU(10699388531797056724LLU), + QU( 468652268869238792LLU), QU( 5755943035328078086LLU), + QU( 2102437379988580216LLU), QU( 9986312786506674028LLU), + QU( 2654207180040945604LLU), QU( 8726634790559960062LLU), + QU( 100497234871808137LLU), QU( 2800137176951425819LLU), + QU( 6076627612918553487LLU), QU( 5780186919186152796LLU), + QU( 8179183595769929098LLU), QU( 6009426283716221169LLU), + QU( 2796662551397449358LLU), QU( 1756961367041986764LLU), + QU( 6972897917355606205LLU), QU(14524774345368968243LLU), + QU( 2773529684745706940LLU), QU( 4853632376213075959LLU), + QU( 4198177923731358102LLU), QU( 8271224913084139776LLU), + QU( 2741753121611092226LLU), QU(16782366145996731181LLU), + QU(15426125238972640790LLU), QU(13595497100671260342LLU), + QU( 3173531022836259898LLU), QU( 6573264560319511662LLU), + QU(18041111951511157441LLU), QU( 2351433581833135952LLU), + QU( 3113255578908173487LLU), QU( 1739371330877858784LLU), + QU(16046126562789165480LLU), QU( 8072101652214192925LLU), + QU(15267091584090664910LLU), QU( 9309579200403648940LLU), + QU( 5218892439752408722LLU), QU(14492477246004337115LLU), + QU(17431037586679770619LLU), QU( 7385248135963250480LLU), + QU( 9580144956565560660LLU), QU( 4919546228040008720LLU), + QU(15261542469145035584LLU), QU(18233297270822253102LLU), + QU( 5453248417992302857LLU), QU( 9309519155931460285LLU), + QU(10342813012345291756LLU), QU(15676085186784762381LLU), + QU(15912092950691300645LLU), QU( 9371053121499003195LLU), + QU( 9897186478226866746LLU), QU(14061858287188196327LLU), + QU( 122575971620788119LLU), QU(12146750969116317754LLU), + QU( 4438317272813245201LLU), QU( 8332576791009527119LLU), + QU(13907785691786542057LLU), QU(10374194887283287467LLU), + QU( 2098798755649059566LLU), QU( 3416235197748288894LLU), + QU( 8688269957320773484LLU), QU( 7503964602397371571LLU), + QU(16724977015147478236LLU), QU( 9461512855439858184LLU), + QU(13259049744534534727LLU), QU( 3583094952542899294LLU), + QU( 8764245731305528292LLU), QU(13240823595462088985LLU), + QU(13716141617617910448LLU), QU(18114969519935960955LLU), + QU( 2297553615798302206LLU), QU( 4585521442944663362LLU), + QU(17776858680630198686LLU), QU( 4685873229192163363LLU), + QU( 152558080671135627LLU), QU(15424900540842670088LLU), + QU(13229630297130024108LLU), QU(17530268788245718717LLU), + QU(16675633913065714144LLU), QU( 3158912717897568068LLU), + QU(15399132185380087288LLU), QU( 7401418744515677872LLU), + QU(13135412922344398535LLU), QU( 6385314346100509511LLU), + QU(13962867001134161139LLU), QU(10272780155442671999LLU), + QU(12894856086597769142LLU), QU(13340877795287554994LLU), + QU(12913630602094607396LLU), QU(12543167911119793857LLU), + QU(17343570372251873096LLU), QU(10959487764494150545LLU), + QU( 6966737953093821128LLU), QU(13780699135496988601LLU), + QU( 4405070719380142046LLU), QU(14923788365607284982LLU), + QU( 2869487678905148380LLU), QU( 6416272754197188403LLU), + QU(15017380475943612591LLU), QU( 1995636220918429487LLU), + QU( 3402016804620122716LLU), QU(15800188663407057080LLU), + QU(11362369990390932882LLU), QU(15262183501637986147LLU), + QU(10239175385387371494LLU), QU( 9352042420365748334LLU), + QU( 1682457034285119875LLU), QU( 1724710651376289644LLU), + QU( 2038157098893817966LLU), QU( 9897825558324608773LLU), + QU( 1477666236519164736LLU), QU(16835397314511233640LLU), + QU(10370866327005346508LLU), QU(10157504370660621982LLU), + QU(12113904045335882069LLU), QU(13326444439742783008LLU), + QU(11302769043000765804LLU), QU(13594979923955228484LLU), + QU(11779351762613475968LLU), QU( 3786101619539298383LLU), + QU( 8021122969180846063LLU), QU(15745904401162500495LLU), + QU(10762168465993897267LLU), QU(13552058957896319026LLU), + QU(11200228655252462013LLU), QU( 5035370357337441226LLU), + QU( 7593918984545500013LLU), QU( 5418554918361528700LLU), + QU( 4858270799405446371LLU), QU( 9974659566876282544LLU), + QU(18227595922273957859LLU), QU( 2772778443635656220LLU), + QU(14285143053182085385LLU), QU( 9939700992429600469LLU), + QU(12756185904545598068LLU), QU( 2020783375367345262LLU), + QU( 57026775058331227LLU), QU( 950827867930065454LLU), + QU( 6602279670145371217LLU), QU( 2291171535443566929LLU), + QU( 5832380724425010313LLU), QU( 1220343904715982285LLU), + QU(17045542598598037633LLU), QU(15460481779702820971LLU), + QU(13948388779949365130LLU), QU(13975040175430829518LLU), + QU(17477538238425541763LLU), QU(11104663041851745725LLU), + QU(15860992957141157587LLU), QU(14529434633012950138LLU), + QU( 2504838019075394203LLU), QU( 7512113882611121886LLU), + QU( 4859973559980886617LLU), QU( 1258601555703250219LLU), + QU(15594548157514316394LLU), QU( 4516730171963773048LLU), + QU(11380103193905031983LLU), QU( 6809282239982353344LLU), + QU(18045256930420065002LLU), QU( 2453702683108791859LLU), + QU( 977214582986981460LLU), QU( 2006410402232713466LLU), + QU( 6192236267216378358LLU), QU( 3429468402195675253LLU), + QU(18146933153017348921LLU), QU(17369978576367231139LLU), + QU( 1246940717230386603LLU), QU(11335758870083327110LLU), + QU(14166488801730353682LLU), QU( 9008573127269635732LLU), + QU(10776025389820643815LLU), QU(15087605441903942962LLU), + QU( 1359542462712147922LLU), QU(13898874411226454206LLU), + QU(17911176066536804411LLU), QU( 9435590428600085274LLU), + QU( 294488509967864007LLU), QU( 8890111397567922046LLU), + QU( 7987823476034328778LLU), QU(13263827582440967651LLU), + QU( 7503774813106751573LLU), QU(14974747296185646837LLU), + QU( 8504765037032103375LLU), QU(17340303357444536213LLU), + QU( 7704610912964485743LLU), QU( 8107533670327205061LLU), + QU( 9062969835083315985LLU), QU(16968963142126734184LLU), + QU(12958041214190810180LLU), QU( 2720170147759570200LLU), + QU( 2986358963942189566LLU), QU(14884226322219356580LLU), + QU( 286224325144368520LLU), QU(11313800433154279797LLU), + QU(18366849528439673248LLU), QU(17899725929482368789LLU), + QU( 3730004284609106799LLU), QU( 1654474302052767205LLU), + QU( 5006698007047077032LLU), QU( 8196893913601182838LLU), + QU(15214541774425211640LLU), QU(17391346045606626073LLU), + QU( 8369003584076969089LLU), QU( 3939046733368550293LLU), + QU(10178639720308707785LLU), QU( 2180248669304388697LLU), + QU( 62894391300126322LLU), QU( 9205708961736223191LLU), + QU( 6837431058165360438LLU), QU( 3150743890848308214LLU), + QU(17849330658111464583LLU), QU(12214815643135450865LLU), + QU(13410713840519603402LLU), QU( 3200778126692046802LLU), + QU(13354780043041779313LLU), QU( 800850022756886036LLU), + QU(15660052933953067433LLU), QU( 6572823544154375676LLU), + QU(11030281857015819266LLU), QU(12682241941471433835LLU), + QU(11654136407300274693LLU), QU( 4517795492388641109LLU), + QU( 9757017371504524244LLU), QU(17833043400781889277LLU), + QU(12685085201747792227LLU), QU(10408057728835019573LLU), + QU( 98370418513455221LLU), QU( 6732663555696848598LLU), + QU(13248530959948529780LLU), QU( 3530441401230622826LLU), + QU(18188251992895660615LLU), QU( 1847918354186383756LLU), + QU( 1127392190402660921LLU), QU(11293734643143819463LLU), + QU( 3015506344578682982LLU), QU(13852645444071153329LLU), + QU( 2121359659091349142LLU), QU( 1294604376116677694LLU), + QU( 5616576231286352318LLU), QU( 7112502442954235625LLU), + QU(11676228199551561689LLU), QU(12925182803007305359LLU), + QU( 7852375518160493082LLU), QU( 1136513130539296154LLU), + QU( 5636923900916593195LLU), QU( 3221077517612607747LLU), + QU(17784790465798152513LLU), QU( 3554210049056995938LLU), + QU(17476839685878225874LLU), QU( 3206836372585575732LLU), + QU( 2765333945644823430LLU), QU(10080070903718799528LLU), + QU( 5412370818878286353LLU), QU( 9689685887726257728LLU), + QU( 8236117509123533998LLU), QU( 1951139137165040214LLU), + QU( 4492205209227980349LLU), QU(16541291230861602967LLU), + QU( 1424371548301437940LLU), QU( 9117562079669206794LLU), + QU(14374681563251691625LLU), QU(13873164030199921303LLU), + QU( 6680317946770936731LLU), QU(15586334026918276214LLU), + QU(10896213950976109802LLU), QU( 9506261949596413689LLU), + QU( 9903949574308040616LLU), QU( 6038397344557204470LLU), + QU( 174601465422373648LLU), QU(15946141191338238030LLU), + QU(17142225620992044937LLU), QU( 7552030283784477064LLU), + QU( 2947372384532947997LLU), QU( 510797021688197711LLU), + QU( 4962499439249363461LLU), QU( 23770320158385357LLU), + QU( 959774499105138124LLU), QU( 1468396011518788276LLU), + QU( 2015698006852312308LLU), QU( 4149400718489980136LLU), + QU( 5992916099522371188LLU), QU(10819182935265531076LLU), + QU(16189787999192351131LLU), QU( 342833961790261950LLU), + QU(12470830319550495336LLU), QU(18128495041912812501LLU), + QU( 1193600899723524337LLU), QU( 9056793666590079770LLU), + QU( 2154021227041669041LLU), QU( 4963570213951235735LLU), + QU( 4865075960209211409LLU), QU( 2097724599039942963LLU), + QU( 2024080278583179845LLU), QU(11527054549196576736LLU), + QU(10650256084182390252LLU), QU( 4808408648695766755LLU), + QU( 1642839215013788844LLU), QU(10607187948250398390LLU), + QU( 7076868166085913508LLU), QU( 730522571106887032LLU), + QU(12500579240208524895LLU), QU( 4484390097311355324LLU), + QU(15145801330700623870LLU), QU( 8055827661392944028LLU), + QU( 5865092976832712268LLU), QU(15159212508053625143LLU), + QU( 3560964582876483341LLU), QU( 4070052741344438280LLU), + QU( 6032585709886855634LLU), QU(15643262320904604873LLU), + QU( 2565119772293371111LLU), QU( 318314293065348260LLU), + QU(15047458749141511872LLU), QU( 7772788389811528730LLU), + QU( 7081187494343801976LLU), QU( 6465136009467253947LLU), + QU(10425940692543362069LLU), QU( 554608190318339115LLU), + QU(14796699860302125214LLU), QU( 1638153134431111443LLU), + QU(10336967447052276248LLU), QU( 8412308070396592958LLU), + QU( 4004557277152051226LLU), QU( 8143598997278774834LLU), + QU(16413323996508783221LLU), QU(13139418758033994949LLU), + QU( 9772709138335006667LLU), QU( 2818167159287157659LLU), + QU(17091740573832523669LLU), QU(14629199013130751608LLU), + QU(18268322711500338185LLU), QU( 8290963415675493063LLU), + QU( 8830864907452542588LLU), QU( 1614839084637494849LLU), + QU(14855358500870422231LLU), QU( 3472996748392519937LLU), + QU(15317151166268877716LLU), QU( 5825895018698400362LLU), + QU(16730208429367544129LLU), QU(10481156578141202800LLU), + QU( 4746166512382823750LLU), QU(12720876014472464998LLU), + QU( 8825177124486735972LLU), QU(13733447296837467838LLU), + QU( 6412293741681359625LLU), QU( 8313213138756135033LLU), + QU(11421481194803712517LLU), QU( 7997007691544174032LLU), + QU( 6812963847917605930LLU), QU( 9683091901227558641LLU), + QU(14703594165860324713LLU), QU( 1775476144519618309LLU), + QU( 2724283288516469519LLU), QU( 717642555185856868LLU), + QU( 8736402192215092346LLU), QU(11878800336431381021LLU), + QU( 4348816066017061293LLU), QU( 6115112756583631307LLU), + QU( 9176597239667142976LLU), QU(12615622714894259204LLU), + QU(10283406711301385987LLU), QU( 5111762509485379420LLU), + QU( 3118290051198688449LLU), QU( 7345123071632232145LLU), + QU( 9176423451688682359LLU), QU( 4843865456157868971LLU), + QU(12008036363752566088LLU), QU(12058837181919397720LLU), + QU( 2145073958457347366LLU), QU( 1526504881672818067LLU), + QU( 3488830105567134848LLU), QU(13208362960674805143LLU), + QU( 4077549672899572192LLU), QU( 7770995684693818365LLU), + QU( 1398532341546313593LLU), QU(12711859908703927840LLU), + QU( 1417561172594446813LLU), QU(17045191024194170604LLU), + QU( 4101933177604931713LLU), QU(14708428834203480320LLU), + QU(17447509264469407724LLU), QU(14314821973983434255LLU), + QU(17990472271061617265LLU), QU( 5087756685841673942LLU), + QU(12797820586893859939LLU), QU( 1778128952671092879LLU), + QU( 3535918530508665898LLU), QU( 9035729701042481301LLU), + QU(14808661568277079962LLU), QU(14587345077537747914LLU), + QU(11920080002323122708LLU), QU( 6426515805197278753LLU), + QU( 3295612216725984831LLU), QU(11040722532100876120LLU), + QU(12305952936387598754LLU), QU(16097391899742004253LLU), + QU( 4908537335606182208LLU), QU(12446674552196795504LLU), + QU(16010497855816895177LLU), QU( 9194378874788615551LLU), + QU( 3382957529567613384LLU), QU( 5154647600754974077LLU), + QU( 9801822865328396141LLU), QU( 9023662173919288143LLU), + QU(17623115353825147868LLU), QU( 8238115767443015816LLU), + QU(15811444159859002560LLU), QU( 9085612528904059661LLU), + QU( 6888601089398614254LLU), QU( 258252992894160189LLU), + QU( 6704363880792428622LLU), QU( 6114966032147235763LLU), + QU(11075393882690261875LLU), QU( 8797664238933620407LLU), + QU( 5901892006476726920LLU), QU( 5309780159285518958LLU), + QU(14940808387240817367LLU), QU(14642032021449656698LLU), + QU( 9808256672068504139LLU), QU( 3670135111380607658LLU), + QU(11211211097845960152LLU), QU( 1474304506716695808LLU), + QU(15843166204506876239LLU), QU( 7661051252471780561LLU), + QU(10170905502249418476LLU), QU( 7801416045582028589LLU), + QU( 2763981484737053050LLU), QU( 9491377905499253054LLU), + QU(16201395896336915095LLU), QU( 9256513756442782198LLU), + QU( 5411283157972456034LLU), QU( 5059433122288321676LLU), + QU( 4327408006721123357LLU), QU( 9278544078834433377LLU), + QU( 7601527110882281612LLU), QU(11848295896975505251LLU), + QU(12096998801094735560LLU), QU(14773480339823506413LLU), + QU(15586227433895802149LLU), QU(12786541257830242872LLU), + QU( 6904692985140503067LLU), QU( 5309011515263103959LLU), + QU(12105257191179371066LLU), QU(14654380212442225037LLU), + QU( 2556774974190695009LLU), QU( 4461297399927600261LLU), + QU(14888225660915118646LLU), QU(14915459341148291824LLU), + QU( 2738802166252327631LLU), QU( 6047155789239131512LLU), + QU(12920545353217010338LLU), QU(10697617257007840205LLU), + QU( 2751585253158203504LLU), QU(13252729159780047496LLU), + QU(14700326134672815469LLU), QU(14082527904374600529LLU), + QU(16852962273496542070LLU), QU(17446675504235853907LLU), + QU(15019600398527572311LLU), QU(12312781346344081551LLU), + QU(14524667935039810450LLU), QU( 5634005663377195738LLU), + QU(11375574739525000569LLU), QU( 2423665396433260040LLU), + QU( 5222836914796015410LLU), QU( 4397666386492647387LLU), + QU( 4619294441691707638LLU), QU( 665088602354770716LLU), + QU(13246495665281593610LLU), QU( 6564144270549729409LLU), + QU(10223216188145661688LLU), QU( 3961556907299230585LLU), + QU(11543262515492439914LLU), QU(16118031437285993790LLU), + QU( 7143417964520166465LLU), QU(13295053515909486772LLU), + QU( 40434666004899675LLU), QU(17127804194038347164LLU), + QU( 8599165966560586269LLU), QU( 8214016749011284903LLU), + QU(13725130352140465239LLU), QU( 5467254474431726291LLU), + QU( 7748584297438219877LLU), QU(16933551114829772472LLU), + QU( 2169618439506799400LLU), QU( 2169787627665113463LLU), + QU(17314493571267943764LLU), QU(18053575102911354912LLU), + QU(11928303275378476973LLU), QU(11593850925061715550LLU), + QU(17782269923473589362LLU), QU( 3280235307704747039LLU), + QU( 6145343578598685149LLU), QU(17080117031114086090LLU), + QU(18066839902983594755LLU), QU( 6517508430331020706LLU), + QU( 8092908893950411541LLU), QU(12558378233386153732LLU), + QU( 4476532167973132976LLU), QU(16081642430367025016LLU), + QU( 4233154094369139361LLU), QU( 8693630486693161027LLU), + QU(11244959343027742285LLU), QU(12273503967768513508LLU), + QU(14108978636385284876LLU), QU( 7242414665378826984LLU), + QU( 6561316938846562432LLU), QU( 8601038474994665795LLU), + QU(17532942353612365904LLU), QU(17940076637020912186LLU), + QU( 7340260368823171304LLU), QU( 7061807613916067905LLU), + QU(10561734935039519326LLU), QU(17990796503724650862LLU), + QU( 6208732943911827159LLU), QU( 359077562804090617LLU), + QU(14177751537784403113LLU), QU(10659599444915362902LLU), + QU(15081727220615085833LLU), QU(13417573895659757486LLU), + QU(15513842342017811524LLU), QU(11814141516204288231LLU), + QU( 1827312513875101814LLU), QU( 2804611699894603103LLU), + QU(17116500469975602763LLU), QU(12270191815211952087LLU), + QU(12256358467786024988LLU), QU(18435021722453971267LLU), + QU( 671330264390865618LLU), QU( 476504300460286050LLU), + QU(16465470901027093441LLU), QU( 4047724406247136402LLU), + QU( 1322305451411883346LLU), QU( 1388308688834322280LLU), + QU( 7303989085269758176LLU), QU( 9323792664765233642LLU), + QU( 4542762575316368936LLU), QU(17342696132794337618LLU), + QU( 4588025054768498379LLU), QU(13415475057390330804LLU), + QU(17880279491733405570LLU), QU(10610553400618620353LLU), + QU( 3180842072658960139LLU), QU(13002966655454270120LLU), + QU( 1665301181064982826LLU), QU( 7083673946791258979LLU), + QU( 190522247122496820LLU), QU(17388280237250677740LLU), + QU( 8430770379923642945LLU), QU(12987180971921668584LLU), + QU( 2311086108365390642LLU), QU( 2870984383579822345LLU), + QU(14014682609164653318LLU), QU(14467187293062251484LLU), + QU( 192186361147413298LLU), QU(15171951713531796524LLU), + QU( 9900305495015948728LLU), QU(17958004775615466344LLU), + QU(14346380954498606514LLU), QU(18040047357617407096LLU), + QU( 5035237584833424532LLU), QU(15089555460613972287LLU), + QU( 4131411873749729831LLU), QU( 1329013581168250330LLU), + QU(10095353333051193949LLU), QU(10749518561022462716LLU), + QU( 9050611429810755847LLU), QU(15022028840236655649LLU), + QU( 8775554279239748298LLU), QU(13105754025489230502LLU), + QU(15471300118574167585LLU), QU( 89864764002355628LLU), + QU( 8776416323420466637LLU), QU( 5280258630612040891LLU), + QU( 2719174488591862912LLU), QU( 7599309137399661994LLU), + QU(15012887256778039979LLU), QU(14062981725630928925LLU), + QU(12038536286991689603LLU), QU( 7089756544681775245LLU), + QU(10376661532744718039LLU), QU( 1265198725901533130LLU), + QU(13807996727081142408LLU), QU( 2935019626765036403LLU), + QU( 7651672460680700141LLU), QU( 3644093016200370795LLU), + QU( 2840982578090080674LLU), QU(17956262740157449201LLU), + QU(18267979450492880548LLU), QU(11799503659796848070LLU), + QU( 9942537025669672388LLU), QU(11886606816406990297LLU), + QU( 5488594946437447576LLU), QU( 7226714353282744302LLU), + QU( 3784851653123877043LLU), QU( 878018453244803041LLU), + QU(12110022586268616085LLU), QU( 734072179404675123LLU), + QU(11869573627998248542LLU), QU( 469150421297783998LLU), + QU( 260151124912803804LLU), QU(11639179410120968649LLU), + QU( 9318165193840846253LLU), QU(12795671722734758075LLU), + QU(15318410297267253933LLU), QU( 691524703570062620LLU), + QU( 5837129010576994601LLU), QU(15045963859726941052LLU), + QU( 5850056944932238169LLU), QU(12017434144750943807LLU), + QU( 7447139064928956574LLU), QU( 3101711812658245019LLU), + QU(16052940704474982954LLU), QU(18195745945986994042LLU), + QU( 8932252132785575659LLU), QU(13390817488106794834LLU), + QU(11582771836502517453LLU), QU( 4964411326683611686LLU), + QU( 2195093981702694011LLU), QU(14145229538389675669LLU), + QU(16459605532062271798LLU), QU( 866316924816482864LLU), + QU( 4593041209937286377LLU), QU( 8415491391910972138LLU), + QU( 4171236715600528969LLU), QU(16637569303336782889LLU), + QU( 2002011073439212680LLU), QU(17695124661097601411LLU), + QU( 4627687053598611702LLU), QU( 7895831936020190403LLU), + QU( 8455951300917267802LLU), QU( 2923861649108534854LLU), + QU( 8344557563927786255LLU), QU( 6408671940373352556LLU), + QU(12210227354536675772LLU), QU(14294804157294222295LLU), + QU(10103022425071085127LLU), QU(10092959489504123771LLU), + QU( 6554774405376736268LLU), QU(12629917718410641774LLU), + QU( 6260933257596067126LLU), QU( 2460827021439369673LLU), + QU( 2541962996717103668LLU), QU( 597377203127351475LLU), + QU( 5316984203117315309LLU), QU( 4811211393563241961LLU), + QU(13119698597255811641LLU), QU( 8048691512862388981LLU), + QU(10216818971194073842LLU), QU( 4612229970165291764LLU), + QU(10000980798419974770LLU), QU( 6877640812402540687LLU), + QU( 1488727563290436992LLU), QU( 2227774069895697318LLU), + QU(11237754507523316593LLU), QU(13478948605382290972LLU), + QU( 1963583846976858124LLU), QU( 5512309205269276457LLU), + QU( 3972770164717652347LLU), QU( 3841751276198975037LLU), + QU(10283343042181903117LLU), QU( 8564001259792872199LLU), + QU(16472187244722489221LLU), QU( 8953493499268945921LLU), + QU( 3518747340357279580LLU), QU( 4003157546223963073LLU), + QU( 3270305958289814590LLU), QU( 3966704458129482496LLU), + QU( 8122141865926661939LLU), QU(14627734748099506653LLU), + QU(13064426990862560568LLU), QU( 2414079187889870829LLU), + QU( 5378461209354225306LLU), QU(10841985740128255566LLU), + QU( 538582442885401738LLU), QU( 7535089183482905946LLU), + QU(16117559957598879095LLU), QU( 8477890721414539741LLU), + QU( 1459127491209533386LLU), QU(17035126360733620462LLU), + QU( 8517668552872379126LLU), QU(10292151468337355014LLU), + QU(17081267732745344157LLU), QU(13751455337946087178LLU), + QU(14026945459523832966LLU), QU( 6653278775061723516LLU), + QU(10619085543856390441LLU), QU( 2196343631481122885LLU), + QU(10045966074702826136LLU), QU(10082317330452718282LLU), + QU( 5920859259504831242LLU), QU( 9951879073426540617LLU), + QU( 7074696649151414158LLU), QU(15808193543879464318LLU), + QU( 7385247772746953374LLU), QU( 3192003544283864292LLU), + QU(18153684490917593847LLU), QU(12423498260668568905LLU), + QU(10957758099756378169LLU), QU(11488762179911016040LLU), + QU( 2099931186465333782LLU), QU(11180979581250294432LLU), + QU( 8098916250668367933LLU), QU( 3529200436790763465LLU), + QU(12988418908674681745LLU), QU( 6147567275954808580LLU), + QU( 3207503344604030989LLU), QU(10761592604898615360LLU), + QU( 229854861031893504LLU), QU( 8809853962667144291LLU), + QU(13957364469005693860LLU), QU( 7634287665224495886LLU), + QU(12353487366976556874LLU), QU( 1134423796317152034LLU), + QU( 2088992471334107068LLU), QU( 7393372127190799698LLU), + QU( 1845367839871058391LLU), QU( 207922563987322884LLU), + QU(11960870813159944976LLU), QU(12182120053317317363LLU), + QU(17307358132571709283LLU), QU(13871081155552824936LLU), + QU(18304446751741566262LLU), QU( 7178705220184302849LLU), + QU(10929605677758824425LLU), QU(16446976977835806844LLU), + QU(13723874412159769044LLU), QU( 6942854352100915216LLU), + QU( 1726308474365729390LLU), QU( 2150078766445323155LLU), + QU(15345558947919656626LLU), QU(12145453828874527201LLU), + QU( 2054448620739726849LLU), QU( 2740102003352628137LLU), + QU(11294462163577610655LLU), QU( 756164283387413743LLU), + QU(17841144758438810880LLU), QU(10802406021185415861LLU), + QU( 8716455530476737846LLU), QU( 6321788834517649606LLU), + QU(14681322910577468426LLU), QU(17330043563884336387LLU), + QU(12701802180050071614LLU), QU(14695105111079727151LLU), + QU( 5112098511654172830LLU), QU( 4957505496794139973LLU), + QU( 8270979451952045982LLU), QU(12307685939199120969LLU), + QU(12425799408953443032LLU), QU( 8376410143634796588LLU), + QU(16621778679680060464LLU), QU( 3580497854566660073LLU), + QU( 1122515747803382416LLU), QU( 857664980960597599LLU), + QU( 6343640119895925918LLU), QU(12878473260854462891LLU), + QU(10036813920765722626LLU), QU(14451335468363173812LLU), + QU( 5476809692401102807LLU), QU(16442255173514366342LLU), + QU(13060203194757167104LLU), QU(14354124071243177715LLU), + QU(15961249405696125227LLU), QU(13703893649690872584LLU), + QU( 363907326340340064LLU), QU( 6247455540491754842LLU), + QU(12242249332757832361LLU), QU( 156065475679796717LLU), + QU( 9351116235749732355LLU), QU( 4590350628677701405LLU), + QU( 1671195940982350389LLU), QU(13501398458898451905LLU), + QU( 6526341991225002255LLU), QU( 1689782913778157592LLU), + QU( 7439222350869010334LLU), QU(13975150263226478308LLU), + QU(11411961169932682710LLU), QU(17204271834833847277LLU), + QU( 541534742544435367LLU), QU( 6591191931218949684LLU), + QU( 2645454775478232486LLU), QU( 4322857481256485321LLU), + QU( 8477416487553065110LLU), QU(12902505428548435048LLU), + QU( 971445777981341415LLU), QU(14995104682744976712LLU), + QU( 4243341648807158063LLU), QU( 8695061252721927661LLU), + QU( 5028202003270177222LLU), QU( 2289257340915567840LLU), + QU(13870416345121866007LLU), QU(13994481698072092233LLU), + QU( 6912785400753196481LLU), QU( 2278309315841980139LLU), + QU( 4329765449648304839LLU), QU( 5963108095785485298LLU), + QU( 4880024847478722478LLU), QU(16015608779890240947LLU), + QU( 1866679034261393544LLU), QU( 914821179919731519LLU), + QU( 9643404035648760131LLU), QU( 2418114953615593915LLU), + QU( 944756836073702374LLU), QU(15186388048737296834LLU), + QU( 7723355336128442206LLU), QU( 7500747479679599691LLU), + QU(18013961306453293634LLU), QU( 2315274808095756456LLU), + QU(13655308255424029566LLU), QU(17203800273561677098LLU), + QU( 1382158694422087756LLU), QU( 5090390250309588976LLU), + QU( 517170818384213989LLU), QU( 1612709252627729621LLU), + QU( 1330118955572449606LLU), QU( 300922478056709885LLU), + QU(18115693291289091987LLU), QU(13491407109725238321LLU), + QU(15293714633593827320LLU), QU( 5151539373053314504LLU), + QU( 5951523243743139207LLU), QU(14459112015249527975LLU), + QU( 5456113959000700739LLU), QU( 3877918438464873016LLU), + QU(12534071654260163555LLU), QU(15871678376893555041LLU), + QU(11005484805712025549LLU), QU(16353066973143374252LLU), + QU( 4358331472063256685LLU), QU( 8268349332210859288LLU), + QU(12485161590939658075LLU), QU(13955993592854471343LLU), + QU( 5911446886848367039LLU), QU(14925834086813706974LLU), + QU( 6590362597857994805LLU), QU( 1280544923533661875LLU), + QU( 1637756018947988164LLU), QU( 4734090064512686329LLU), + QU(16693705263131485912LLU), QU( 6834882340494360958LLU), + QU( 8120732176159658505LLU), QU( 2244371958905329346LLU), + QU(10447499707729734021LLU), QU( 7318742361446942194LLU), + QU( 8032857516355555296LLU), QU(14023605983059313116LLU), + QU( 1032336061815461376LLU), QU( 9840995337876562612LLU), + QU( 9869256223029203587LLU), QU(12227975697177267636LLU), + QU(12728115115844186033LLU), QU( 7752058479783205470LLU), + QU( 729733219713393087LLU), QU(12954017801239007622LLU) +}; +static const uint64_t init_by_array_64_expected[] = { + QU( 2100341266307895239LLU), QU( 8344256300489757943LLU), + QU(15687933285484243894LLU), QU( 8268620370277076319LLU), + QU(12371852309826545459LLU), QU( 8800491541730110238LLU), + QU(18113268950100835773LLU), QU( 2886823658884438119LLU), + QU( 3293667307248180724LLU), QU( 9307928143300172731LLU), + QU( 7688082017574293629LLU), QU( 900986224735166665LLU), + QU( 9977972710722265039LLU), QU( 6008205004994830552LLU), + QU( 546909104521689292LLU), QU( 7428471521869107594LLU), + QU(14777563419314721179LLU), QU(16116143076567350053LLU), + QU( 5322685342003142329LLU), QU( 4200427048445863473LLU), + QU( 4693092150132559146LLU), QU(13671425863759338582LLU), + QU( 6747117460737639916LLU), QU( 4732666080236551150LLU), + QU( 5912839950611941263LLU), QU( 3903717554504704909LLU), + QU( 2615667650256786818LLU), QU(10844129913887006352LLU), + QU(13786467861810997820LLU), QU(14267853002994021570LLU), + QU(13767807302847237439LLU), QU(16407963253707224617LLU), + QU( 4802498363698583497LLU), QU( 2523802839317209764LLU), + QU( 3822579397797475589LLU), QU( 8950320572212130610LLU), + QU( 3745623504978342534LLU), QU(16092609066068482806LLU), + QU( 9817016950274642398LLU), QU(10591660660323829098LLU), + QU(11751606650792815920LLU), QU( 5122873818577122211LLU), + QU(17209553764913936624LLU), QU( 6249057709284380343LLU), + QU(15088791264695071830LLU), QU(15344673071709851930LLU), + QU( 4345751415293646084LLU), QU( 2542865750703067928LLU), + QU(13520525127852368784LLU), QU(18294188662880997241LLU), + QU( 3871781938044881523LLU), QU( 2873487268122812184LLU), + QU(15099676759482679005LLU), QU(15442599127239350490LLU), + QU( 6311893274367710888LLU), QU( 3286118760484672933LLU), + QU( 4146067961333542189LLU), QU(13303942567897208770LLU), + QU( 8196013722255630418LLU), QU( 4437815439340979989LLU), + QU(15433791533450605135LLU), QU( 4254828956815687049LLU), + QU( 1310903207708286015LLU), QU(10529182764462398549LLU), + QU(14900231311660638810LLU), QU( 9727017277104609793LLU), + QU( 1821308310948199033LLU), QU(11628861435066772084LLU), + QU( 9469019138491546924LLU), QU( 3145812670532604988LLU), + QU( 9938468915045491919LLU), QU( 1562447430672662142LLU), + QU(13963995266697989134LLU), QU( 3356884357625028695LLU), + QU( 4499850304584309747LLU), QU( 8456825817023658122LLU), + QU(10859039922814285279LLU), QU( 8099512337972526555LLU), + QU( 348006375109672149LLU), QU(11919893998241688603LLU), + QU( 1104199577402948826LLU), QU(16689191854356060289LLU), + QU(10992552041730168078LLU), QU( 7243733172705465836LLU), + QU( 5668075606180319560LLU), QU(18182847037333286970LLU), + QU( 4290215357664631322LLU), QU( 4061414220791828613LLU), + QU(13006291061652989604LLU), QU( 7140491178917128798LLU), + QU(12703446217663283481LLU), QU( 5500220597564558267LLU), + QU(10330551509971296358LLU), QU(15958554768648714492LLU), + QU( 5174555954515360045LLU), QU( 1731318837687577735LLU), + QU( 3557700801048354857LLU), QU(13764012341928616198LLU), + QU(13115166194379119043LLU), QU( 7989321021560255519LLU), + QU( 2103584280905877040LLU), QU( 9230788662155228488LLU), + QU(16396629323325547654LLU), QU( 657926409811318051LLU), + QU(15046700264391400727LLU), QU( 5120132858771880830LLU), + QU( 7934160097989028561LLU), QU( 6963121488531976245LLU), + QU(17412329602621742089LLU), QU(15144843053931774092LLU), + QU(17204176651763054532LLU), QU(13166595387554065870LLU), + QU( 8590377810513960213LLU), QU( 5834365135373991938LLU), + QU( 7640913007182226243LLU), QU( 3479394703859418425LLU), + QU(16402784452644521040LLU), QU( 4993979809687083980LLU), + QU(13254522168097688865LLU), QU(15643659095244365219LLU), + QU( 5881437660538424982LLU), QU(11174892200618987379LLU), + QU( 254409966159711077LLU), QU(17158413043140549909LLU), + QU( 3638048789290376272LLU), QU( 1376816930299489190LLU), + QU( 4622462095217761923LLU), QU(15086407973010263515LLU), + QU(13253971772784692238LLU), QU( 5270549043541649236LLU), + QU(11182714186805411604LLU), QU(12283846437495577140LLU), + QU( 5297647149908953219LLU), QU(10047451738316836654LLU), + QU( 4938228100367874746LLU), QU(12328523025304077923LLU), + QU( 3601049438595312361LLU), QU( 9313624118352733770LLU), + QU(13322966086117661798LLU), QU(16660005705644029394LLU), + QU(11337677526988872373LLU), QU(13869299102574417795LLU), + QU(15642043183045645437LLU), QU( 3021755569085880019LLU), + QU( 4979741767761188161LLU), QU(13679979092079279587LLU), + QU( 3344685842861071743LLU), QU(13947960059899588104LLU), + QU( 305806934293368007LLU), QU( 5749173929201650029LLU), + QU(11123724852118844098LLU), QU(15128987688788879802LLU), + QU(15251651211024665009LLU), QU( 7689925933816577776LLU), + QU(16732804392695859449LLU), QU(17087345401014078468LLU), + QU(14315108589159048871LLU), QU( 4820700266619778917LLU), + QU(16709637539357958441LLU), QU( 4936227875177351374LLU), + QU( 2137907697912987247LLU), QU(11628565601408395420LLU), + QU( 2333250549241556786LLU), QU( 5711200379577778637LLU), + QU( 5170680131529031729LLU), QU(12620392043061335164LLU), + QU( 95363390101096078LLU), QU( 5487981914081709462LLU), + QU( 1763109823981838620LLU), QU( 3395861271473224396LLU), + QU( 1300496844282213595LLU), QU( 6894316212820232902LLU), + QU(10673859651135576674LLU), QU( 5911839658857903252LLU), + QU(17407110743387299102LLU), QU( 8257427154623140385LLU), + QU(11389003026741800267LLU), QU( 4070043211095013717LLU), + QU(11663806997145259025LLU), QU(15265598950648798210LLU), + QU( 630585789434030934LLU), QU( 3524446529213587334LLU), + QU( 7186424168495184211LLU), QU(10806585451386379021LLU), + QU(11120017753500499273LLU), QU( 1586837651387701301LLU), + QU(17530454400954415544LLU), QU( 9991670045077880430LLU), + QU( 7550997268990730180LLU), QU( 8640249196597379304LLU), + QU( 3522203892786893823LLU), QU(10401116549878854788LLU), + QU(13690285544733124852LLU), QU( 8295785675455774586LLU), + QU(15535716172155117603LLU), QU( 3112108583723722511LLU), + QU(17633179955339271113LLU), QU(18154208056063759375LLU), + QU( 1866409236285815666LLU), QU(13326075895396412882LLU), + QU( 8756261842948020025LLU), QU( 6281852999868439131LLU), + QU(15087653361275292858LLU), QU(10333923911152949397LLU), + QU( 5265567645757408500LLU), QU(12728041843210352184LLU), + QU( 6347959327507828759LLU), QU( 154112802625564758LLU), + QU(18235228308679780218LLU), QU( 3253805274673352418LLU), + QU( 4849171610689031197LLU), QU(17948529398340432518LLU), + QU(13803510475637409167LLU), QU(13506570190409883095LLU), + QU(15870801273282960805LLU), QU( 8451286481299170773LLU), + QU( 9562190620034457541LLU), QU( 8518905387449138364LLU), + QU(12681306401363385655LLU), QU( 3788073690559762558LLU), + QU( 5256820289573487769LLU), QU( 2752021372314875467LLU), + QU( 6354035166862520716LLU), QU( 4328956378309739069LLU), + QU( 449087441228269600LLU), QU( 5533508742653090868LLU), + QU( 1260389420404746988LLU), QU(18175394473289055097LLU), + QU( 1535467109660399420LLU), QU( 8818894282874061442LLU), + QU(12140873243824811213LLU), QU(15031386653823014946LLU), + QU( 1286028221456149232LLU), QU( 6329608889367858784LLU), + QU( 9419654354945132725LLU), QU( 6094576547061672379LLU), + QU(17706217251847450255LLU), QU( 1733495073065878126LLU), + QU(16918923754607552663LLU), QU( 8881949849954945044LLU), + QU(12938977706896313891LLU), QU(14043628638299793407LLU), + QU(18393874581723718233LLU), QU( 6886318534846892044LLU), + QU(14577870878038334081LLU), QU(13541558383439414119LLU), + QU(13570472158807588273LLU), QU(18300760537910283361LLU), + QU( 818368572800609205LLU), QU( 1417000585112573219LLU), + QU(12337533143867683655LLU), QU(12433180994702314480LLU), + QU( 778190005829189083LLU), QU(13667356216206524711LLU), + QU( 9866149895295225230LLU), QU(11043240490417111999LLU), + QU( 1123933826541378598LLU), QU( 6469631933605123610LLU), + QU(14508554074431980040LLU), QU(13918931242962026714LLU), + QU( 2870785929342348285LLU), QU(14786362626740736974LLU), + QU(13176680060902695786LLU), QU( 9591778613541679456LLU), + QU( 9097662885117436706LLU), QU( 749262234240924947LLU), + QU( 1944844067793307093LLU), QU( 4339214904577487742LLU), + QU( 8009584152961946551LLU), QU(16073159501225501777LLU), + QU( 3335870590499306217LLU), QU(17088312653151202847LLU), + QU( 3108893142681931848LLU), QU(16636841767202792021LLU), + QU(10423316431118400637LLU), QU( 8008357368674443506LLU), + QU(11340015231914677875LLU), QU(17687896501594936090LLU), + QU(15173627921763199958LLU), QU( 542569482243721959LLU), + QU(15071714982769812975LLU), QU( 4466624872151386956LLU), + QU( 1901780715602332461LLU), QU( 9822227742154351098LLU), + QU( 1479332892928648780LLU), QU( 6981611948382474400LLU), + QU( 7620824924456077376LLU), QU(14095973329429406782LLU), + QU( 7902744005696185404LLU), QU(15830577219375036920LLU), + QU(10287076667317764416LLU), QU(12334872764071724025LLU), + QU( 4419302088133544331LLU), QU(14455842851266090520LLU), + QU(12488077416504654222LLU), QU( 7953892017701886766LLU), + QU( 6331484925529519007LLU), QU( 4902145853785030022LLU), + QU(17010159216096443073LLU), QU(11945354668653886087LLU), + QU(15112022728645230829LLU), QU(17363484484522986742LLU), + QU( 4423497825896692887LLU), QU( 8155489510809067471LLU), + QU( 258966605622576285LLU), QU( 5462958075742020534LLU), + QU( 6763710214913276228LLU), QU( 2368935183451109054LLU), + QU(14209506165246453811LLU), QU( 2646257040978514881LLU), + QU( 3776001911922207672LLU), QU( 1419304601390147631LLU), + QU(14987366598022458284LLU), QU( 3977770701065815721LLU), + QU( 730820417451838898LLU), QU( 3982991703612885327LLU), + QU( 2803544519671388477LLU), QU(17067667221114424649LLU), + QU( 2922555119737867166LLU), QU( 1989477584121460932LLU), + QU(15020387605892337354LLU), QU( 9293277796427533547LLU), + QU(10722181424063557247LLU), QU(16704542332047511651LLU), + QU( 5008286236142089514LLU), QU(16174732308747382540LLU), + QU(17597019485798338402LLU), QU(13081745199110622093LLU), + QU( 8850305883842258115LLU), QU(12723629125624589005LLU), + QU( 8140566453402805978LLU), QU(15356684607680935061LLU), + QU(14222190387342648650LLU), QU(11134610460665975178LLU), + QU( 1259799058620984266LLU), QU(13281656268025610041LLU), + QU( 298262561068153992LLU), QU(12277871700239212922LLU), + QU(13911297774719779438LLU), QU(16556727962761474934LLU), + QU(17903010316654728010LLU), QU( 9682617699648434744LLU), + QU(14757681836838592850LLU), QU( 1327242446558524473LLU), + QU(11126645098780572792LLU), QU( 1883602329313221774LLU), + QU( 2543897783922776873LLU), QU(15029168513767772842LLU), + QU(12710270651039129878LLU), QU(16118202956069604504LLU), + QU(15010759372168680524LLU), QU( 2296827082251923948LLU), + QU(10793729742623518101LLU), QU(13829764151845413046LLU), + QU(17769301223184451213LLU), QU( 3118268169210783372LLU), + QU(17626204544105123127LLU), QU( 7416718488974352644LLU), + QU(10450751996212925994LLU), QU( 9352529519128770586LLU), + QU( 259347569641110140LLU), QU( 8048588892269692697LLU), + QU( 1774414152306494058LLU), QU(10669548347214355622LLU), + QU(13061992253816795081LLU), QU(18432677803063861659LLU), + QU( 8879191055593984333LLU), QU(12433753195199268041LLU), + QU(14919392415439730602LLU), QU( 6612848378595332963LLU), + QU( 6320986812036143628LLU), QU(10465592420226092859LLU), + QU( 4196009278962570808LLU), QU( 3747816564473572224LLU), + QU(17941203486133732898LLU), QU( 2350310037040505198LLU), + QU( 5811779859134370113LLU), QU(10492109599506195126LLU), + QU( 7699650690179541274LLU), QU( 1954338494306022961LLU), + QU(14095816969027231152LLU), QU( 5841346919964852061LLU), + QU(14945969510148214735LLU), QU( 3680200305887550992LLU), + QU( 6218047466131695792LLU), QU( 8242165745175775096LLU), + QU(11021371934053307357LLU), QU( 1265099502753169797LLU), + QU( 4644347436111321718LLU), QU( 3609296916782832859LLU), + QU( 8109807992218521571LLU), QU(18387884215648662020LLU), + QU(14656324896296392902LLU), QU(17386819091238216751LLU), + QU(17788300878582317152LLU), QU( 7919446259742399591LLU), + QU( 4466613134576358004LLU), QU(12928181023667938509LLU), + QU(13147446154454932030LLU), QU(16552129038252734620LLU), + QU( 8395299403738822450LLU), QU(11313817655275361164LLU), + QU( 434258809499511718LLU), QU( 2074882104954788676LLU), + QU( 7929892178759395518LLU), QU( 9006461629105745388LLU), + QU( 5176475650000323086LLU), QU(11128357033468341069LLU), + QU(12026158851559118955LLU), QU(14699716249471156500LLU), + QU( 448982497120206757LLU), QU( 4156475356685519900LLU), + QU( 6063816103417215727LLU), QU(10073289387954971479LLU), + QU( 8174466846138590962LLU), QU( 2675777452363449006LLU), + QU( 9090685420572474281LLU), QU( 6659652652765562060LLU), + QU(12923120304018106621LLU), QU(11117480560334526775LLU), + QU( 937910473424587511LLU), QU( 1838692113502346645LLU), + QU(11133914074648726180LLU), QU( 7922600945143884053LLU), + QU(13435287702700959550LLU), QU( 5287964921251123332LLU), + QU(11354875374575318947LLU), QU(17955724760748238133LLU), + QU(13728617396297106512LLU), QU( 4107449660118101255LLU), + QU( 1210269794886589623LLU), QU(11408687205733456282LLU), + QU( 4538354710392677887LLU), QU(13566803319341319267LLU), + QU(17870798107734050771LLU), QU( 3354318982568089135LLU), + QU( 9034450839405133651LLU), QU(13087431795753424314LLU), + QU( 950333102820688239LLU), QU( 1968360654535604116LLU), + QU(16840551645563314995LLU), QU( 8867501803892924995LLU), + QU(11395388644490626845LLU), QU( 1529815836300732204LLU), + QU(13330848522996608842LLU), QU( 1813432878817504265LLU), + QU( 2336867432693429560LLU), QU(15192805445973385902LLU), + QU( 2528593071076407877LLU), QU( 128459777936689248LLU), + QU( 9976345382867214866LLU), QU( 6208885766767996043LLU), + QU(14982349522273141706LLU), QU( 3099654362410737822LLU), + QU(13776700761947297661LLU), QU( 8806185470684925550LLU), + QU( 8151717890410585321LLU), QU( 640860591588072925LLU), + QU(14592096303937307465LLU), QU( 9056472419613564846LLU), + QU(14861544647742266352LLU), QU(12703771500398470216LLU), + QU( 3142372800384138465LLU), QU( 6201105606917248196LLU), + QU(18337516409359270184LLU), QU(15042268695665115339LLU), + QU(15188246541383283846LLU), QU(12800028693090114519LLU), + QU( 5992859621101493472LLU), QU(18278043971816803521LLU), + QU( 9002773075219424560LLU), QU( 7325707116943598353LLU), + QU( 7930571931248040822LLU), QU( 5645275869617023448LLU), + QU( 7266107455295958487LLU), QU( 4363664528273524411LLU), + QU(14313875763787479809LLU), QU(17059695613553486802LLU), + QU( 9247761425889940932LLU), QU(13704726459237593128LLU), + QU( 2701312427328909832LLU), QU(17235532008287243115LLU), + QU(14093147761491729538LLU), QU( 6247352273768386516LLU), + QU( 8268710048153268415LLU), QU( 7985295214477182083LLU), + QU(15624495190888896807LLU), QU( 3772753430045262788LLU), + QU( 9133991620474991698LLU), QU( 5665791943316256028LLU), + QU( 7551996832462193473LLU), QU(13163729206798953877LLU), + QU( 9263532074153846374LLU), QU( 1015460703698618353LLU), + QU(17929874696989519390LLU), QU(18257884721466153847LLU), + QU(16271867543011222991LLU), QU( 3905971519021791941LLU), + QU(16814488397137052085LLU), QU( 1321197685504621613LLU), + QU( 2870359191894002181LLU), QU(14317282970323395450LLU), + QU(13663920845511074366LLU), QU( 2052463995796539594LLU), + QU(14126345686431444337LLU), QU( 1727572121947022534LLU), + QU(17793552254485594241LLU), QU( 6738857418849205750LLU), + QU( 1282987123157442952LLU), QU(16655480021581159251LLU), + QU( 6784587032080183866LLU), QU(14726758805359965162LLU), + QU( 7577995933961987349LLU), QU(12539609320311114036LLU), + QU(10789773033385439494LLU), QU( 8517001497411158227LLU), + QU(10075543932136339710LLU), QU(14838152340938811081LLU), + QU( 9560840631794044194LLU), QU(17445736541454117475LLU), + QU(10633026464336393186LLU), QU(15705729708242246293LLU), + QU( 1117517596891411098LLU), QU( 4305657943415886942LLU), + QU( 4948856840533979263LLU), QU(16071681989041789593LLU), + QU(13723031429272486527LLU), QU( 7639567622306509462LLU), + QU(12670424537483090390LLU), QU( 9715223453097197134LLU), + QU( 5457173389992686394LLU), QU( 289857129276135145LLU), + QU(17048610270521972512LLU), QU( 692768013309835485LLU), + QU(14823232360546632057LLU), QU(18218002361317895936LLU), + QU( 3281724260212650204LLU), QU(16453957266549513795LLU), + QU( 8592711109774511881LLU), QU( 929825123473369579LLU), + QU(15966784769764367791LLU), QU( 9627344291450607588LLU), + QU(10849555504977813287LLU), QU( 9234566913936339275LLU), + QU( 6413807690366911210LLU), QU(10862389016184219267LLU), + QU(13842504799335374048LLU), QU( 1531994113376881174LLU), + QU( 2081314867544364459LLU), QU(16430628791616959932LLU), + QU( 8314714038654394368LLU), QU( 9155473892098431813LLU), + QU(12577843786670475704LLU), QU( 4399161106452401017LLU), + QU( 1668083091682623186LLU), QU( 1741383777203714216LLU), + QU( 2162597285417794374LLU), QU(15841980159165218736LLU), + QU( 1971354603551467079LLU), QU( 1206714764913205968LLU), + QU( 4790860439591272330LLU), QU(14699375615594055799LLU), + QU( 8374423871657449988LLU), QU(10950685736472937738LLU), + QU( 697344331343267176LLU), QU(10084998763118059810LLU), + QU(12897369539795983124LLU), QU(12351260292144383605LLU), + QU( 1268810970176811234LLU), QU( 7406287800414582768LLU), + QU( 516169557043807831LLU), QU( 5077568278710520380LLU), + QU( 3828791738309039304LLU), QU( 7721974069946943610LLU), + QU( 3534670260981096460LLU), QU( 4865792189600584891LLU), + QU(16892578493734337298LLU), QU( 9161499464278042590LLU), + QU(11976149624067055931LLU), QU(13219479887277343990LLU), + QU(14161556738111500680LLU), QU(14670715255011223056LLU), + QU( 4671205678403576558LLU), QU(12633022931454259781LLU), + QU(14821376219869187646LLU), QU( 751181776484317028LLU), + QU( 2192211308839047070LLU), QU(11787306362361245189LLU), + QU(10672375120744095707LLU), QU( 4601972328345244467LLU), + QU(15457217788831125879LLU), QU( 8464345256775460809LLU), + QU(10191938789487159478LLU), QU( 6184348739615197613LLU), + QU(11425436778806882100LLU), QU( 2739227089124319793LLU), + QU( 461464518456000551LLU), QU( 4689850170029177442LLU), + QU( 6120307814374078625LLU), QU(11153579230681708671LLU), + QU( 7891721473905347926LLU), QU(10281646937824872400LLU), + QU( 3026099648191332248LLU), QU( 8666750296953273818LLU), + QU(14978499698844363232LLU), QU(13303395102890132065LLU), + QU( 8182358205292864080LLU), QU(10560547713972971291LLU), + QU(11981635489418959093LLU), QU( 3134621354935288409LLU), + QU(11580681977404383968LLU), QU(14205530317404088650LLU), + QU( 5997789011854923157LLU), QU(13659151593432238041LLU), + QU(11664332114338865086LLU), QU( 7490351383220929386LLU), + QU( 7189290499881530378LLU), QU(15039262734271020220LLU), + QU( 2057217285976980055LLU), QU( 555570804905355739LLU), + QU(11235311968348555110LLU), QU(13824557146269603217LLU), + QU(16906788840653099693LLU), QU( 7222878245455661677LLU), + QU( 5245139444332423756LLU), QU( 4723748462805674292LLU), + QU(12216509815698568612LLU), QU(17402362976648951187LLU), + QU(17389614836810366768LLU), QU( 4880936484146667711LLU), + QU( 9085007839292639880LLU), QU(13837353458498535449LLU), + QU(11914419854360366677LLU), QU(16595890135313864103LLU), + QU( 6313969847197627222LLU), QU(18296909792163910431LLU), + QU(10041780113382084042LLU), QU( 2499478551172884794LLU), + QU(11057894246241189489LLU), QU( 9742243032389068555LLU), + QU(12838934582673196228LLU), QU(13437023235248490367LLU), + QU(13372420669446163240LLU), QU( 6752564244716909224LLU), + QU( 7157333073400313737LLU), QU(12230281516370654308LLU), + QU( 1182884552219419117LLU), QU( 2955125381312499218LLU), + QU(10308827097079443249LLU), QU( 1337648572986534958LLU), + QU(16378788590020343939LLU), QU( 108619126514420935LLU), + QU( 3990981009621629188LLU), QU( 5460953070230946410LLU), + QU( 9703328329366531883LLU), QU(13166631489188077236LLU), + QU( 1104768831213675170LLU), QU( 3447930458553877908LLU), + QU( 8067172487769945676LLU), QU( 5445802098190775347LLU), + QU( 3244840981648973873LLU), QU(17314668322981950060LLU), + QU( 5006812527827763807LLU), QU(18158695070225526260LLU), + QU( 2824536478852417853LLU), QU(13974775809127519886LLU), + QU( 9814362769074067392LLU), QU(17276205156374862128LLU), + QU(11361680725379306967LLU), QU( 3422581970382012542LLU), + QU(11003189603753241266LLU), QU(11194292945277862261LLU), + QU( 6839623313908521348LLU), QU(11935326462707324634LLU), + QU( 1611456788685878444LLU), QU(13112620989475558907LLU), + QU( 517659108904450427LLU), QU(13558114318574407624LLU), + QU(15699089742731633077LLU), QU( 4988979278862685458LLU), + QU( 8111373583056521297LLU), QU( 3891258746615399627LLU), + QU( 8137298251469718086LLU), QU(12748663295624701649LLU), + QU( 4389835683495292062LLU), QU( 5775217872128831729LLU), + QU( 9462091896405534927LLU), QU( 8498124108820263989LLU), + QU( 8059131278842839525LLU), QU(10503167994254090892LLU), + QU(11613153541070396656LLU), QU(18069248738504647790LLU), + QU( 570657419109768508LLU), QU( 3950574167771159665LLU), + QU( 5514655599604313077LLU), QU( 2908460854428484165LLU), + QU(10777722615935663114LLU), QU(12007363304839279486LLU), + QU( 9800646187569484767LLU), QU( 8795423564889864287LLU), + QU(14257396680131028419LLU), QU( 6405465117315096498LLU), + QU( 7939411072208774878LLU), QU(17577572378528990006LLU), + QU(14785873806715994850LLU), QU(16770572680854747390LLU), + QU(18127549474419396481LLU), QU(11637013449455757750LLU), + QU(14371851933996761086LLU), QU( 3601181063650110280LLU), + QU( 4126442845019316144LLU), QU(10198287239244320669LLU), + QU(18000169628555379659LLU), QU(18392482400739978269LLU), + QU( 6219919037686919957LLU), QU( 3610085377719446052LLU), + QU( 2513925039981776336LLU), QU(16679413537926716955LLU), + QU(12903302131714909434LLU), QU( 5581145789762985009LLU), + QU(12325955044293303233LLU), QU(17216111180742141204LLU), + QU( 6321919595276545740LLU), QU( 3507521147216174501LLU), + QU( 9659194593319481840LLU), QU(11473976005975358326LLU), + QU(14742730101435987026LLU), QU( 492845897709954780LLU), + QU(16976371186162599676LLU), QU(17712703422837648655LLU), + QU( 9881254778587061697LLU), QU( 8413223156302299551LLU), + QU( 1563841828254089168LLU), QU( 9996032758786671975LLU), + QU( 138877700583772667LLU), QU(13003043368574995989LLU), + QU( 4390573668650456587LLU), QU( 8610287390568126755LLU), + QU(15126904974266642199LLU), QU( 6703637238986057662LLU), + QU( 2873075592956810157LLU), QU( 6035080933946049418LLU), + QU(13382846581202353014LLU), QU( 7303971031814642463LLU), + QU(18418024405307444267LLU), QU( 5847096731675404647LLU), + QU( 4035880699639842500LLU), QU(11525348625112218478LLU), + QU( 3041162365459574102LLU), QU( 2604734487727986558LLU), + QU(15526341771636983145LLU), QU(14556052310697370254LLU), + QU(12997787077930808155LLU), QU( 9601806501755554499LLU), + QU(11349677952521423389LLU), QU(14956777807644899350LLU), + QU(16559736957742852721LLU), QU(12360828274778140726LLU), + QU( 6685373272009662513LLU), QU(16932258748055324130LLU), + QU(15918051131954158508LLU), QU( 1692312913140790144LLU), + QU( 546653826801637367LLU), QU( 5341587076045986652LLU), + QU(14975057236342585662LLU), QU(12374976357340622412LLU), + QU(10328833995181940552LLU), QU(12831807101710443149LLU), + QU(10548514914382545716LLU), QU( 2217806727199715993LLU), + QU(12627067369242845138LLU), QU( 4598965364035438158LLU), + QU( 150923352751318171LLU), QU(14274109544442257283LLU), + QU( 4696661475093863031LLU), QU( 1505764114384654516LLU), + QU(10699185831891495147LLU), QU( 2392353847713620519LLU), + QU( 3652870166711788383LLU), QU( 8640653276221911108LLU), + QU( 3894077592275889704LLU), QU( 4918592872135964845LLU), + QU(16379121273281400789LLU), QU(12058465483591683656LLU), + QU(11250106829302924945LLU), QU( 1147537556296983005LLU), + QU( 6376342756004613268LLU), QU(14967128191709280506LLU), + QU(18007449949790627628LLU), QU( 9497178279316537841LLU), + QU( 7920174844809394893LLU), QU(10037752595255719907LLU), + QU(15875342784985217697LLU), QU(15311615921712850696LLU), + QU( 9552902652110992950LLU), QU(14054979450099721140LLU), + QU( 5998709773566417349LLU), QU(18027910339276320187LLU), + QU( 8223099053868585554LLU), QU( 7842270354824999767LLU), + QU( 4896315688770080292LLU), QU(12969320296569787895LLU), + QU( 2674321489185759961LLU), QU( 4053615936864718439LLU), + QU(11349775270588617578LLU), QU( 4743019256284553975LLU), + QU( 5602100217469723769LLU), QU(14398995691411527813LLU), + QU( 7412170493796825470LLU), QU( 836262406131744846LLU), + QU( 8231086633845153022LLU), QU( 5161377920438552287LLU), + QU( 8828731196169924949LLU), QU(16211142246465502680LLU), + QU( 3307990879253687818LLU), QU( 5193405406899782022LLU), + QU( 8510842117467566693LLU), QU( 6070955181022405365LLU), + QU(14482950231361409799LLU), QU(12585159371331138077LLU), + QU( 3511537678933588148LLU), QU( 2041849474531116417LLU), + QU(10944936685095345792LLU), QU(18303116923079107729LLU), + QU( 2720566371239725320LLU), QU( 4958672473562397622LLU), + QU( 3032326668253243412LLU), QU(13689418691726908338LLU), + QU( 1895205511728843996LLU), QU( 8146303515271990527LLU), + QU(16507343500056113480LLU), QU( 473996939105902919LLU), + QU( 9897686885246881481LLU), QU(14606433762712790575LLU), + QU( 6732796251605566368LLU), QU( 1399778120855368916LLU), + QU( 935023885182833777LLU), QU(16066282816186753477LLU), + QU( 7291270991820612055LLU), QU(17530230393129853844LLU), + QU(10223493623477451366LLU), QU(15841725630495676683LLU), + QU(17379567246435515824LLU), QU( 8588251429375561971LLU), + QU(18339511210887206423LLU), QU(17349587430725976100LLU), + QU(12244876521394838088LLU), QU( 6382187714147161259LLU), + QU(12335807181848950831LLU), QU(16948885622305460665LLU), + QU(13755097796371520506LLU), QU(14806740373324947801LLU), + QU( 4828699633859287703LLU), QU( 8209879281452301604LLU), + QU(12435716669553736437LLU), QU(13970976859588452131LLU), + QU( 6233960842566773148LLU), QU(12507096267900505759LLU), + QU( 1198713114381279421LLU), QU(14989862731124149015LLU), + QU(15932189508707978949LLU), QU( 2526406641432708722LLU), + QU( 29187427817271982LLU), QU( 1499802773054556353LLU), + QU(10816638187021897173LLU), QU( 5436139270839738132LLU), + QU( 6659882287036010082LLU), QU( 2154048955317173697LLU), + QU(10887317019333757642LLU), QU(16281091802634424955LLU), + QU(10754549879915384901LLU), QU(10760611745769249815LLU), + QU( 2161505946972504002LLU), QU( 5243132808986265107LLU), + QU(10129852179873415416LLU), QU( 710339480008649081LLU), + QU( 7802129453068808528LLU), QU(17967213567178907213LLU), + QU(15730859124668605599LLU), QU(13058356168962376502LLU), + QU( 3701224985413645909LLU), QU(14464065869149109264LLU), + QU( 9959272418844311646LLU), QU(10157426099515958752LLU), + QU(14013736814538268528LLU), QU(17797456992065653951LLU), + QU(17418878140257344806LLU), QU(15457429073540561521LLU), + QU( 2184426881360949378LLU), QU( 2062193041154712416LLU), + QU( 8553463347406931661LLU), QU( 4913057625202871854LLU), + QU( 2668943682126618425LLU), QU(17064444737891172288LLU), + QU( 4997115903913298637LLU), QU(12019402608892327416LLU), + QU(17603584559765897352LLU), QU(11367529582073647975LLU), + QU( 8211476043518436050LLU), QU( 8676849804070323674LLU), + QU(18431829230394475730LLU), QU(10490177861361247904LLU), + QU( 9508720602025651349LLU), QU( 7409627448555722700LLU), + QU( 5804047018862729008LLU), QU(11943858176893142594LLU), + QU(11908095418933847092LLU), QU( 5415449345715887652LLU), + QU( 1554022699166156407LLU), QU( 9073322106406017161LLU), + QU( 7080630967969047082LLU), QU(18049736940860732943LLU), + QU(12748714242594196794LLU), QU( 1226992415735156741LLU), + QU(17900981019609531193LLU), QU(11720739744008710999LLU), + QU( 3006400683394775434LLU), QU(11347974011751996028LLU), + QU( 3316999628257954608LLU), QU( 8384484563557639101LLU), + QU(18117794685961729767LLU), QU( 1900145025596618194LLU), + QU(17459527840632892676LLU), QU( 5634784101865710994LLU), + QU( 7918619300292897158LLU), QU( 3146577625026301350LLU), + QU( 9955212856499068767LLU), QU( 1873995843681746975LLU), + QU( 1561487759967972194LLU), QU( 8322718804375878474LLU), + QU(11300284215327028366LLU), QU( 4667391032508998982LLU), + QU( 9820104494306625580LLU), QU(17922397968599970610LLU), + QU( 1784690461886786712LLU), QU(14940365084341346821LLU), + QU( 5348719575594186181LLU), QU(10720419084507855261LLU), + QU(14210394354145143274LLU), QU( 2426468692164000131LLU), + QU(16271062114607059202LLU), QU(14851904092357070247LLU), + QU( 6524493015693121897LLU), QU( 9825473835127138531LLU), + QU(14222500616268569578LLU), QU(15521484052007487468LLU), + QU(14462579404124614699LLU), QU(11012375590820665520LLU), + QU(11625327350536084927LLU), QU(14452017765243785417LLU), + QU( 9989342263518766305LLU), QU( 3640105471101803790LLU), + QU( 4749866455897513242LLU), QU(13963064946736312044LLU), + QU(10007416591973223791LLU), QU(18314132234717431115LLU), + QU( 3286596588617483450LLU), QU( 7726163455370818765LLU), + QU( 7575454721115379328LLU), QU( 5308331576437663422LLU), + QU(18288821894903530934LLU), QU( 8028405805410554106LLU), + QU(15744019832103296628LLU), QU( 149765559630932100LLU), + QU( 6137705557200071977LLU), QU(14513416315434803615LLU), + QU(11665702820128984473LLU), QU( 218926670505601386LLU), + QU( 6868675028717769519LLU), QU(15282016569441512302LLU), + QU( 5707000497782960236LLU), QU( 6671120586555079567LLU), + QU( 2194098052618985448LLU), QU(16849577895477330978LLU), + QU(12957148471017466283LLU), QU( 1997805535404859393LLU), + QU( 1180721060263860490LLU), QU(13206391310193756958LLU), + QU(12980208674461861797LLU), QU( 3825967775058875366LLU), + QU(17543433670782042631LLU), QU( 1518339070120322730LLU), + QU(16344584340890991669LLU), QU( 2611327165318529819LLU), + QU(11265022723283422529LLU), QU( 4001552800373196817LLU), + QU(14509595890079346161LLU), QU( 3528717165416234562LLU), + QU(18153222571501914072LLU), QU( 9387182977209744425LLU), + QU(10064342315985580021LLU), QU(11373678413215253977LLU), + QU( 2308457853228798099LLU), QU( 9729042942839545302LLU), + QU( 7833785471140127746LLU), QU( 6351049900319844436LLU), + QU(14454610627133496067LLU), QU(12533175683634819111LLU), + QU(15570163926716513029LLU), QU(13356980519185762498LLU) +}; + +TEST_BEGIN(test_gen_rand_32) +{ + uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + int i; + uint32_t r32; + sfmt_t *ctx; + + assert_d_le(get_min_array_size32(), BLOCK_SIZE, + "Array size too small"); + ctx = init_gen_rand(1234); + fill_array32(ctx, array32, BLOCK_SIZE); + fill_array32(ctx, array32_2, BLOCK_SIZE); + fini_gen_rand(ctx); + + ctx = init_gen_rand(1234); + for (i = 0; i < BLOCK_SIZE; i++) { + if (i < COUNT_1) { + assert_u32_eq(array32[i], init_gen_rand_32_expected[i], + "Output mismatch for i=%d", i); + } + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32[i], + "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); + } + for (i = 0; i < COUNT_2; i++) { + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32_2[i], + "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], + r32); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_by_array_32) +{ + uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + int i; + uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0}; + uint32_t r32; + sfmt_t *ctx; + + assert_d_le(get_min_array_size32(), BLOCK_SIZE, + "Array size too small"); + ctx = init_by_array(ini, 4); + fill_array32(ctx, array32, BLOCK_SIZE); + fill_array32(ctx, array32_2, BLOCK_SIZE); + fini_gen_rand(ctx); + + ctx = init_by_array(ini, 4); + for (i = 0; i < BLOCK_SIZE; i++) { + if (i < COUNT_1) { + assert_u32_eq(array32[i], init_by_array_32_expected[i], + "Output mismatch for i=%d", i); + } + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32[i], + "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); + } + for (i = 0; i < COUNT_2; i++) { + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32_2[i], + "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], + r32); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_gen_rand_64) +{ + uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + int i; + uint64_t r; + sfmt_t *ctx; + + assert_d_le(get_min_array_size64(), BLOCK_SIZE64, + "Array size too small"); + ctx = init_gen_rand(4321); + fill_array64(ctx, array64, BLOCK_SIZE64); + fill_array64(ctx, array64_2, BLOCK_SIZE64); + fini_gen_rand(ctx); + + ctx = init_gen_rand(4321); + for (i = 0; i < BLOCK_SIZE64; i++) { + if (i < COUNT_1) { + assert_u64_eq(array64[i], init_gen_rand_64_expected[i], + "Output mismatch for i=%d", i); + } + r = gen_rand64(ctx); + assert_u64_eq(r, array64[i], + "Mismatch at array64[%d]=%"PRIx64", gen=%"PRIx64, i, + array64[i], r); + } + for (i = 0; i < COUNT_2; i++) { + r = gen_rand64(ctx); + assert_u64_eq(r, array64_2[i], + "Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64"", i, + array64_2[i], r); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_by_array_64) +{ + uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + int i; + uint64_t r; + uint32_t ini[] = {5, 4, 3, 2, 1}; + sfmt_t *ctx; + + assert_d_le(get_min_array_size64(), BLOCK_SIZE64, + "Array size too small"); + ctx = init_by_array(ini, 5); + fill_array64(ctx, array64, BLOCK_SIZE64); + fill_array64(ctx, array64_2, BLOCK_SIZE64); + fini_gen_rand(ctx); + + ctx = init_by_array(ini, 5); + for (i = 0; i < BLOCK_SIZE64; i++) { + if (i < COUNT_1) { + assert_u64_eq(array64[i], init_by_array_64_expected[i], + "Output mismatch for i=%d"); + } + r = gen_rand64(ctx); + assert_u64_eq(r, array64[i], + "Mismatch at array64[%d]=%"PRIx64" gen=%"PRIx64, i, + array64[i], r); + } + for (i = 0; i < COUNT_2; i++) { + r = gen_rand64(ctx); + assert_u64_eq(r, array64_2[i], + "Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64, i, + array64_2[i], r); + } + fini_gen_rand(ctx); +} +TEST_END + +int +main(void) +{ + + return (test( + test_gen_rand_32, + test_by_array_32, + test_gen_rand_64, + test_by_array_64)); +} diff --git a/test/bitmap.c b/test/unit/bitmap.c similarity index 57% rename from test/bitmap.c rename to test/unit/bitmap.c index b2cb6300..8086b888 100644 --- a/test/bitmap.c +++ b/test/unit/bitmap.c @@ -1,5 +1,4 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" +#include "test/jemalloc_test.h" #if (LG_BITMAP_MAXBITS > 12) # define MAXBITS 4500 @@ -7,21 +6,21 @@ # define MAXBITS (1U << LG_BITMAP_MAXBITS) #endif -static void -test_bitmap_size(void) +TEST_BEGIN(test_bitmap_size) { size_t i, prev_size; prev_size = 0; for (i = 1; i <= MAXBITS; i++) { size_t size = bitmap_size(i); - assert(size >= prev_size); + assert_true(size >= prev_size, + "Bitmap size is smaller than expected"); prev_size = size; } } +TEST_END -static void -test_bitmap_init(void) +TEST_BEGIN(test_bitmap_init) { size_t i; @@ -34,16 +33,17 @@ test_bitmap_init(void) bitmap_info_ngroups(&binfo)); bitmap_init(bitmap, &binfo); - for (j = 0; j < i; j++) - assert(bitmap_get(bitmap, &binfo, j) == false); + for (j = 0; j < i; j++) { + assert_false(bitmap_get(bitmap, &binfo, j), + "Bit should be unset"); + } free(bitmap); - } } } +TEST_END -static void -test_bitmap_set(void) +TEST_BEGIN(test_bitmap_set) { size_t i; @@ -58,14 +58,15 @@ test_bitmap_set(void) for (j = 0; j < i; j++) bitmap_set(bitmap, &binfo, j); - assert(bitmap_full(bitmap, &binfo)); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); free(bitmap); } } } +TEST_END -static void -test_bitmap_unset(void) +TEST_BEGIN(test_bitmap_unset) { size_t i; @@ -80,19 +81,21 @@ test_bitmap_unset(void) for (j = 0; j < i; j++) bitmap_set(bitmap, &binfo, j); - assert(bitmap_full(bitmap, &binfo)); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); for (j = 0; j < i; j++) bitmap_unset(bitmap, &binfo, j); for (j = 0; j < i; j++) bitmap_set(bitmap, &binfo, j); - assert(bitmap_full(bitmap, &binfo)); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); free(bitmap); } } } +TEST_END -static void -test_bitmap_sfu(void) +TEST_BEGIN(test_bitmap_sfu) { size_t i; @@ -106,9 +109,13 @@ test_bitmap_sfu(void) bitmap_init(bitmap, &binfo); /* Iteratively set bits starting at the beginning. */ - for (j = 0; j < i; j++) - assert(bitmap_sfu(bitmap, &binfo) == j); - assert(bitmap_full(bitmap, &binfo)); + for (j = 0; j < i; j++) { + assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, + "First unset bit should be just after " + "previous first unset bit"); + } + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); /* * Iteratively unset bits starting at the end, and @@ -116,10 +123,13 @@ test_bitmap_sfu(void) */ for (j = i - 1; j >= 0; j--) { bitmap_unset(bitmap, &binfo, j); - assert(bitmap_sfu(bitmap, &binfo) == j); + assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, + "First unset bit should the bit previously " + "unset"); bitmap_unset(bitmap, &binfo, j); } - assert(bitmap_get(bitmap, &binfo, 0) == false); + assert_false(bitmap_get(bitmap, &binfo, 0), + "Bit should be unset"); /* * Iteratively set bits starting at the beginning, and @@ -127,27 +137,29 @@ test_bitmap_sfu(void) */ for (j = 1; j < i; j++) { bitmap_set(bitmap, &binfo, j - 1); - assert(bitmap_sfu(bitmap, &binfo) == j); + assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, + "First unset bit should be just after the " + "bit previously set"); bitmap_unset(bitmap, &binfo, j); } - assert(bitmap_sfu(bitmap, &binfo) == i - 1); - assert(bitmap_full(bitmap, &binfo)); + assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1, + "First unset bit should be the last bit"); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); free(bitmap); } } } +TEST_END int main(void) { - malloc_printf("Test begin\n"); - test_bitmap_size(); - test_bitmap_init(); - test_bitmap_set(); - test_bitmap_unset(); - test_bitmap_sfu(); - - malloc_printf("Test end\n"); - return (0); + return (test( + test_bitmap_size, + test_bitmap_init, + test_bitmap_set, + test_bitmap_unset, + test_bitmap_sfu)); } diff --git a/test/unit/ckh.c b/test/unit/ckh.c new file mode 100644 index 00000000..69fd7f52 --- /dev/null +++ b/test/unit/ckh.c @@ -0,0 +1,206 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_new_delete) +{ + ckh_t ckh; + + assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp), + "Unexpected ckh_new() error"); + ckh_delete(&ckh); + + assert_false(ckh_new(&ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp), + "Unexpected ckh_new() error"); + ckh_delete(&ckh); +} +TEST_END + +TEST_BEGIN(test_count_insert_search_remove) +{ + ckh_t ckh; + const char *strs[] = { + "a string", + "A string", + "a string.", + "A string." + }; + const char *missing = "A string not in the hash table."; + size_t i; + + assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp), + "Unexpected ckh_new() error"); + assert_zu_eq(ckh_count(&ckh), 0, + "ckh_count() should return %zu, but it returned %zu", 0, + ckh_count(&ckh)); + + /* Insert. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + ckh_insert(&ckh, strs[i], strs[i]); + assert_zu_eq(ckh_count(&ckh), i+1, + "ckh_count() should return %zu, but it returned %zu", i+1, + ckh_count(&ckh)); + } + + /* Search. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + union { + void *p; + const char *s; + } k, v; + void **kp, **vp; + const char *ks, *vs; + + kp = (i & 1) ? &k.p : NULL; + vp = (i & 2) ? &v.p : NULL; + k.p = NULL; + v.p = NULL; + assert_false(ckh_search(&ckh, strs[i], kp, vp), + "Unexpected ckh_search() error"); + + ks = (i & 1) ? strs[i] : (const char *)NULL; + vs = (i & 2) ? strs[i] : (const char *)NULL; + assert_ptr_eq((void *)ks, (void *)k.s, + "Key mismatch, i=%zu", i); + assert_ptr_eq((void *)vs, (void *)v.s, + "Value mismatch, i=%zu", i); + } + assert_true(ckh_search(&ckh, missing, NULL, NULL), + "Unexpected ckh_search() success"); + + /* Remove. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + union { + void *p; + const char *s; + } k, v; + void **kp, **vp; + const char *ks, *vs; + + kp = (i & 1) ? &k.p : NULL; + vp = (i & 2) ? &v.p : NULL; + k.p = NULL; + v.p = NULL; + assert_false(ckh_remove(&ckh, strs[i], kp, vp), + "Unexpected ckh_remove() error"); + + ks = (i & 1) ? strs[i] : (const char *)NULL; + vs = (i & 2) ? strs[i] : (const char *)NULL; + assert_ptr_eq((void *)ks, (void *)k.s, + "Key mismatch, i=%zu", i); + assert_ptr_eq((void *)vs, (void *)v.s, + "Value mismatch, i=%zu", i); + assert_zu_eq(ckh_count(&ckh), + sizeof(strs)/sizeof(const char *) - i - 1, + "ckh_count() should return %zu, but it returned %zu", + sizeof(strs)/sizeof(const char *) - i - 1, + ckh_count(&ckh)); + } + + ckh_delete(&ckh); +} +TEST_END + +TEST_BEGIN(test_insert_iter_remove) +{ +#define NITEMS 1000 + ckh_t ckh; + void **p[NITEMS]; + void *q, *r; + unsigned i; + + assert_false(ckh_new(&ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp), + "Unexpected ckh_new() error"); + + for (i = 0; i < NITEMS; i++) { + p[i] = mallocx(i+1, 0); + assert_ptr_not_null(p[i], "Unexpected mallocx() failure"); + } + + for (i = 0; i < NITEMS; i++) { + unsigned j; + + for (j = i; j < NITEMS; j++) { + assert_false(ckh_insert(&ckh, p[j], p[j]), + "Unexpected ckh_insert() failure"); + assert_false(ckh_search(&ckh, p[j], &q, &r), + "Unexpected ckh_search() failure"); + assert_ptr_eq(p[j], q, "Key pointer mismatch"); + assert_ptr_eq(p[j], r, "Value pointer mismatch"); + } + + assert_zu_eq(ckh_count(&ckh), NITEMS, + "ckh_count() should return %zu, but it returned %zu", + NITEMS, ckh_count(&ckh)); + + for (j = i + 1; j < NITEMS; j++) { + assert_false(ckh_search(&ckh, p[j], NULL, NULL), + "Unexpected ckh_search() failure"); + assert_false(ckh_remove(&ckh, p[j], &q, &r), + "Unexpected ckh_remove() failure"); + assert_ptr_eq(p[j], q, "Key pointer mismatch"); + assert_ptr_eq(p[j], r, "Value pointer mismatch"); + assert_true(ckh_search(&ckh, p[j], NULL, NULL), + "Unexpected ckh_search() success"); + assert_true(ckh_remove(&ckh, p[j], &q, &r), + "Unexpected ckh_remove() success"); + } + + { + bool seen[NITEMS]; + size_t tabind; + + memset(seen, 0, sizeof(seen)); + + for (tabind = 0; ckh_iter(&ckh, &tabind, &q, &r) == + false;) { + unsigned k; + + assert_ptr_eq(q, r, "Key and val not equal"); + + for (k = 0; k < NITEMS; k++) { + if (p[k] == q) { + assert_false(seen[k], + "Item %zu already seen", k); + seen[k] = true; + break; + } + } + } + + for (j = 0; j < i + 1; j++) + assert_true(seen[j], "Item %zu not seen", j); + for (; j < NITEMS; j++) + assert_false(seen[j], "Item %zu seen", j); + } + } + + for (i = 0; i < NITEMS; i++) { + assert_false(ckh_search(&ckh, p[i], NULL, NULL), + "Unexpected ckh_search() failure"); + assert_false(ckh_remove(&ckh, p[i], &q, &r), + "Unexpected ckh_remove() failure"); + assert_ptr_eq(p[i], q, "Key pointer mismatch"); + assert_ptr_eq(p[i], r, "Value pointer mismatch"); + assert_true(ckh_search(&ckh, p[i], NULL, NULL), + "Unexpected ckh_search() success"); + assert_true(ckh_remove(&ckh, p[i], &q, &r), + "Unexpected ckh_remove() success"); + dallocx(p[i], 0); + } + + assert_zu_eq(ckh_count(&ckh), 0, + "ckh_count() should return %zu, but it returned %zu", 0, + ckh_count(&ckh)); + ckh_delete(&ckh); +#undef NITEMS +} +TEST_END + +int +main(void) +{ + + return (test( + test_new_delete, + test_count_insert_search_remove, + test_insert_iter_remove)); +} diff --git a/test/unit/hash.c b/test/unit/hash.c new file mode 100644 index 00000000..0446e524 --- /dev/null +++ b/test/unit/hash.c @@ -0,0 +1,165 @@ +/* + * This file is based on code that is part of SMHasher + * (https://code.google.com/p/smhasher/), and is subject to the MIT license + * (http://www.opensource.org/licenses/mit-license.php). Both email addresses + * associated with the source code's revision history belong to Austin Appleby, + * and the revision history ranges from 2010 to 2012. Therefore the copyright + * and license are here taken to be: + * + * Copyright (c) 2010-2012 Austin Appleby + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "test/jemalloc_test.h" + +typedef enum { + hash_variant_x86_32, + hash_variant_x86_128, + hash_variant_x64_128 +} hash_variant_t; + +static size_t +hash_variant_bits(hash_variant_t variant) +{ + + switch (variant) { + case hash_variant_x86_32: return (32); + case hash_variant_x86_128: return (128); + case hash_variant_x64_128: return (128); + default: not_reached(); + } +} + +static const char * +hash_variant_string(hash_variant_t variant) +{ + + switch (variant) { + case hash_variant_x86_32: return ("hash_x86_32"); + case hash_variant_x86_128: return ("hash_x86_128"); + case hash_variant_x64_128: return ("hash_x64_128"); + default: not_reached(); + } +} + +static void +hash_variant_verify(hash_variant_t variant) +{ + const size_t hashbytes = hash_variant_bits(variant) / 8; + uint8_t key[256]; + uint8_t hashes[hashbytes * 256]; + uint8_t final[hashbytes]; + unsigned i; + uint32_t computed, expected; + + memset(key, 0, sizeof(key)); + memset(hashes, 0, sizeof(hashes)); + memset(final, 0, sizeof(final)); + + /* + * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the + * seed. + */ + for (i = 0; i < 256; i++) { + key[i] = (uint8_t)i; + switch (variant) { + case hash_variant_x86_32: { + uint32_t out; + out = hash_x86_32(key, i, 256-i); + memcpy(&hashes[i*hashbytes], &out, hashbytes); + break; + } case hash_variant_x86_128: { + uint64_t out[2]; + hash_x86_128(key, i, 256-i, out); + memcpy(&hashes[i*hashbytes], out, hashbytes); + break; + } case hash_variant_x64_128: { + uint64_t out[2]; + hash_x64_128(key, i, 256-i, out); + memcpy(&hashes[i*hashbytes], out, hashbytes); + break; + } default: not_reached(); + } + } + + /* Hash the result array. */ + switch (variant) { + case hash_variant_x86_32: { + uint32_t out = hash_x86_32(hashes, hashbytes*256, 0); + memcpy(final, &out, sizeof(out)); + break; + } case hash_variant_x86_128: { + uint64_t out[2]; + hash_x86_128(hashes, hashbytes*256, 0, out); + memcpy(final, out, sizeof(out)); + break; + } case hash_variant_x64_128: { + uint64_t out[2]; + hash_x64_128(hashes, hashbytes*256, 0, out); + memcpy(final, out, sizeof(out)); + break; + } default: not_reached(); + } + + computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) | + (final[3] << 24); + + switch (variant) { + case hash_variant_x86_32: expected = 0xb0f57ee3U; break; + case hash_variant_x86_128: expected = 0xb3ece62aU; break; + case hash_variant_x64_128: expected = 0x6384ba69U; break; + default: not_reached(); + } + + assert_u32_eq(computed, expected, + "Hash mismatch for %s(): expected %#x but got %#x", + hash_variant_string(variant), expected, computed); +} + +TEST_BEGIN(test_hash_x86_32) +{ + + hash_variant_verify(hash_variant_x86_32); +} +TEST_END + +TEST_BEGIN(test_hash_x86_128) +{ + + hash_variant_verify(hash_variant_x86_128); +} +TEST_END + +TEST_BEGIN(test_hash_x64_128) +{ + + hash_variant_verify(hash_variant_x64_128); +} +TEST_END + +int +main(void) +{ + + return (test( + test_hash_x86_32, + test_hash_x86_128, + test_hash_x64_128)); +} diff --git a/test/unit/junk.c b/test/unit/junk.c new file mode 100644 index 00000000..e27db2fe --- /dev/null +++ b/test/unit/junk.c @@ -0,0 +1,219 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_FILL +const char *malloc_conf = + "abort:false,junk:true,zero:false,redzone:true,quarantine:0"; +#endif + +static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; +static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig; +static huge_dalloc_junk_t *huge_dalloc_junk_orig; +static void *most_recently_junked; + +static void +arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) +{ + size_t i; + + arena_dalloc_junk_small_orig(ptr, bin_info); + for (i = 0; i < bin_info->reg_size; i++) { + assert_c_eq(((char *)ptr)[i], 0x5a, + "Missing junk fill for byte %zu/%zu of deallocated region", + i, bin_info->reg_size); + } + most_recently_junked = ptr; +} + +static void +arena_dalloc_junk_large_intercept(void *ptr, size_t usize) +{ + size_t i; + + arena_dalloc_junk_large_orig(ptr, usize); + for (i = 0; i < usize; i++) { + assert_c_eq(((char *)ptr)[i], 0x5a, + "Missing junk fill for byte %zu/%zu of deallocated region", + i, usize); + } + most_recently_junked = ptr; +} + +static void +huge_dalloc_junk_intercept(void *ptr, size_t usize) +{ + + huge_dalloc_junk_orig(ptr, usize); + /* + * The conditions under which junk filling actually occurs are nuanced + * enough that it doesn't make sense to duplicate the decision logic in + * test code, so don't actually check that the region is junk-filled. + */ + most_recently_junked = ptr; +} + +static void +test_junk(size_t sz_min, size_t sz_max) +{ + char *s; + size_t sz_prev, sz, i; + + arena_dalloc_junk_small_orig = arena_dalloc_junk_small; + arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; + arena_dalloc_junk_large_orig = arena_dalloc_junk_large; + arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; + huge_dalloc_junk_orig = huge_dalloc_junk; + huge_dalloc_junk = huge_dalloc_junk_intercept; + + sz_prev = 0; + s = (char *)mallocx(sz_min, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + + for (sz = sallocx(s, 0); sz <= sz_max; + sz_prev = sz, sz = sallocx(s, 0)) { + if (sz_prev > 0) { + assert_c_eq(s[0], 'a', + "Previously allocated byte %zu/%zu is corrupted", + 0, sz_prev); + assert_c_eq(s[sz_prev-1], 'a', + "Previously allocated byte %zu/%zu is corrupted", + sz_prev-1, sz_prev); + } + + for (i = sz_prev; i < sz; i++) { + assert_c_eq(s[i], 0xa5, + "Newly allocated byte %zu/%zu isn't junk-filled", + i, sz); + s[i] = 'a'; + } + + if (xallocx(s, sz+1, 0, 0) == sz) { + void *junked = (void *)s; + + s = (char *)rallocx(s, sz+1, 0); + assert_ptr_not_null((void *)s, + "Unexpected rallocx() failure"); + assert_ptr_eq(most_recently_junked, junked, + "Expected region of size %zu to be junk-filled", + sz); + } + } + + dallocx(s, 0); + assert_ptr_eq(most_recently_junked, (void *)s, + "Expected region of size %zu to be junk-filled", sz); + + arena_dalloc_junk_small = arena_dalloc_junk_small_orig; + arena_dalloc_junk_large = arena_dalloc_junk_large_orig; + huge_dalloc_junk = huge_dalloc_junk_orig; +} + +TEST_BEGIN(test_junk_small) +{ + + test_skip_if(!config_fill); + test_junk(1, SMALL_MAXCLASS-1); +} +TEST_END + +TEST_BEGIN(test_junk_large) +{ + + test_skip_if(!config_fill); + test_junk(SMALL_MAXCLASS+1, arena_maxclass); +} +TEST_END + +TEST_BEGIN(test_junk_huge) +{ + + test_skip_if(!config_fill); + test_junk(arena_maxclass+1, chunksize*2); +} +TEST_END + +arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig; +static void *most_recently_trimmed; + +static void +arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize) +{ + + arena_ralloc_junk_large_orig(ptr, old_usize, usize); + assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize"); + assert_zu_eq(usize, arena_maxclass-PAGE, "Unexpected usize"); + most_recently_trimmed = ptr; +} + +TEST_BEGIN(test_junk_large_ralloc_shrink) +{ + void *p1, *p2; + + p1 = mallocx(arena_maxclass, 0); + assert_ptr_not_null(p1, "Unexpected mallocx() failure"); + + arena_ralloc_junk_large_orig = arena_ralloc_junk_large; + arena_ralloc_junk_large = arena_ralloc_junk_large_intercept; + + p2 = rallocx(p1, arena_maxclass-PAGE, 0); + assert_ptr_eq(p1, p2, "Unexpected move during shrink"); + + arena_ralloc_junk_large = arena_ralloc_junk_large_orig; + + assert_ptr_eq(most_recently_trimmed, p1, + "Expected trimmed portion of region to be junk-filled"); +} +TEST_END + +static bool detected_redzone_corruption; + +static void +arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, + size_t offset, uint8_t byte) +{ + + detected_redzone_corruption = true; +} + +TEST_BEGIN(test_junk_redzone) +{ + char *s; + arena_redzone_corruption_t *arena_redzone_corruption_orig; + + test_skip_if(!config_fill); + + arena_redzone_corruption_orig = arena_redzone_corruption; + arena_redzone_corruption = arena_redzone_corruption_replacement; + + /* Test underflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[-1] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + /* Test overflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[sallocx(s, 0)] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + arena_redzone_corruption = arena_redzone_corruption_orig; +} +TEST_END + +int +main(void) +{ + + return (test( + test_junk_small, + test_junk_large, + test_junk_huge, + test_junk_large_ralloc_shrink, + test_junk_redzone)); +} diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c new file mode 100644 index 00000000..31fb8105 --- /dev/null +++ b/test/unit/mallctl.c @@ -0,0 +1,415 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_mallctl_errors) +{ + uint64_t epoch; + size_t sz; + + assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, + "mallctl() should return ENOENT for non-existent names"); + + assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), + EPERM, "mallctl() should return EPERM on attempt to write " + "read-only value"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1), + EINVAL, "mallctl() should return EINVAL for input size mismatch"); + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1), + EINVAL, "mallctl() should return EINVAL for input size mismatch"); + + sz = sizeof(epoch)-1; + assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, + "mallctl() should return EINVAL for output size mismatch"); + sz = sizeof(epoch)+1; + assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, + "mallctl() should return EINVAL for output size mismatch"); +} +TEST_END + +TEST_BEGIN(test_mallctlnametomib_errors) +{ + size_t mib[1]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, + "mallctlnametomib() should return ENOENT for non-existent names"); +} +TEST_END + +TEST_BEGIN(test_mallctlbymib_errors) +{ + uint64_t epoch; + size_t sz; + size_t mib[1]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", + strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " + "attempt to write read-only value"); + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, + sizeof(epoch)-1), EINVAL, + "mallctlbymib() should return EINVAL for input size mismatch"); + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, + sizeof(epoch)+1), EINVAL, + "mallctlbymib() should return EINVAL for input size mismatch"); + + sz = sizeof(epoch)-1; + assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, + "mallctlbymib() should return EINVAL for output size mismatch"); + sz = sizeof(epoch)+1; + assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, + "mallctlbymib() should return EINVAL for output size mismatch"); +} +TEST_END + +TEST_BEGIN(test_mallctl_read_write) +{ + uint64_t old_epoch, new_epoch; + size_t sz = sizeof(old_epoch); + + /* Blind. */ + assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Read. */ + assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Write. */ + assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)), + 0, "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Read+write. */ + assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch, + sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); +} +TEST_END + +TEST_BEGIN(test_mallctlnametomib_short_mib) +{ + size_t mib[4]; + size_t miblen; + + miblen = 3; + mib[3] = 42; + assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + assert_zu_eq(miblen, 3, "Unexpected mib output length"); + assert_zu_eq(mib[3], 42, + "mallctlnametomib() wrote past the end of the input mib"); +} +TEST_END + +TEST_BEGIN(test_mallctl_config) +{ + +#define TEST_MALLCTL_CONFIG(config) do { \ + bool oldval; \ + size_t sz = sizeof(oldval); \ + assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + assert_b_eq(oldval, config_##config, "Incorrect config value"); \ + assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ +} while (0) + + TEST_MALLCTL_CONFIG(debug); + TEST_MALLCTL_CONFIG(dss); + TEST_MALLCTL_CONFIG(fill); + TEST_MALLCTL_CONFIG(lazy_lock); + TEST_MALLCTL_CONFIG(mremap); + TEST_MALLCTL_CONFIG(munmap); + TEST_MALLCTL_CONFIG(prof); + TEST_MALLCTL_CONFIG(prof_libgcc); + TEST_MALLCTL_CONFIG(prof_libunwind); + TEST_MALLCTL_CONFIG(stats); + TEST_MALLCTL_CONFIG(tcache); + TEST_MALLCTL_CONFIG(tls); + TEST_MALLCTL_CONFIG(utrace); + TEST_MALLCTL_CONFIG(valgrind); + TEST_MALLCTL_CONFIG(xmalloc); + +#undef TEST_MALLCTL_CONFIG +} +TEST_END + +TEST_BEGIN(test_mallctl_opt) +{ + bool config_always = true; + +#define TEST_MALLCTL_OPT(t, opt, config) do { \ + t oldval; \ + size_t sz = sizeof(oldval); \ + int expected = config_##config ? 0 : ENOENT; \ + int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \ + assert_d_eq(result, expected, \ + "Unexpected mallctl() result for opt."#opt); \ + assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ +} while (0) + + TEST_MALLCTL_OPT(bool, abort, always); + TEST_MALLCTL_OPT(size_t, lg_chunk, always); + TEST_MALLCTL_OPT(const char *, dss, always); + TEST_MALLCTL_OPT(size_t, narenas, always); + TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always); + TEST_MALLCTL_OPT(bool, stats_print, always); + TEST_MALLCTL_OPT(bool, junk, fill); + TEST_MALLCTL_OPT(size_t, quarantine, fill); + TEST_MALLCTL_OPT(bool, redzone, fill); + TEST_MALLCTL_OPT(bool, zero, fill); + TEST_MALLCTL_OPT(bool, utrace, utrace); + TEST_MALLCTL_OPT(bool, valgrind, valgrind); + TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); + TEST_MALLCTL_OPT(bool, tcache, tcache); + TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache); + TEST_MALLCTL_OPT(bool, prof, prof); + TEST_MALLCTL_OPT(const char *, prof_prefix, prof); + TEST_MALLCTL_OPT(bool, prof_active, prof); + TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); + TEST_MALLCTL_OPT(bool, prof_accum, prof); + TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); + TEST_MALLCTL_OPT(bool, prof_gdump, prof); + TEST_MALLCTL_OPT(bool, prof_final, prof); + TEST_MALLCTL_OPT(bool, prof_leak, prof); + +#undef TEST_MALLCTL_OPT +} +TEST_END + +TEST_BEGIN(test_manpage_example) +{ + unsigned nbins, i; + size_t mib[4]; + size_t len, miblen; + + len = sizeof(nbins); + assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, + "Unexpected mallctl() failure"); + + miblen = 4; + assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + for (i = 0; i < nbins; i++) { + size_t bin_size; + + mib[2] = i; + len = sizeof(bin_size); + assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0), + 0, "Unexpected mallctlbymib() failure"); + /* Do something with bin_size... */ + } +} +TEST_END + +TEST_BEGIN(test_thread_arena) +{ + unsigned arena_old, arena_new, narenas; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); + arena_new = narenas - 1; + assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, + sizeof(unsigned)), 0, "Unexpected mallctl() failure"); + arena_new = 0; + assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, + sizeof(unsigned)), 0, "Unexpected mallctl() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_purge) +{ + unsigned narenas; + size_t sz = sizeof(unsigned); + size_t mib[3]; + size_t miblen = 3; + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = narenas; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_dss) +{ + const char *dss_prec_old, *dss_prec_new; + size_t sz = sizeof(dss_prec_old); + + dss_prec_new = "primary"; + assert_d_eq(mallctl("arena.0.dss", &dss_prec_old, &sz, &dss_prec_new, + sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected default for dss precedence"); + + assert_d_eq(mallctl("arena.0.dss", &dss_prec_new, &sz, &dss_prec_old, + sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); +} +TEST_END + +TEST_BEGIN(test_arenas_purge) +{ + unsigned arena = 0; + + assert_d_eq(mallctl("arenas.purge", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("arenas.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); +} +TEST_END + +TEST_BEGIN(test_arenas_initialized) +{ + unsigned narenas; + size_t sz = sizeof(narenas); + + assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + { + bool initialized[narenas]; + + sz = narenas * sizeof(bool); + assert_d_eq(mallctl("arenas.initialized", initialized, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + } +} +TEST_END + +TEST_BEGIN(test_arenas_constants) +{ + +#define TEST_ARENAS_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \ + "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); + TEST_ARENAS_CONSTANT(size_t, page, PAGE); + TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); + TEST_ARENAS_CONSTANT(size_t, nlruns, nlclasses); + +#undef TEST_ARENAS_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_bin_constants) +{ + +#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size); + TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs); + TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size); + +#undef TEST_ARENAS_BIN_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_lrun_constants) +{ + +#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_LRUN_CONSTANT(size_t, size, (1 << LG_PAGE)); + +#undef TEST_ARENAS_LRUN_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_extend) +{ + unsigned narenas_before, arena, narenas_after; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_u_eq(narenas_before+1, narenas_after, + "Unexpected number of arenas before versus after extension"); + assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); +} +TEST_END + +TEST_BEGIN(test_stats_arenas) +{ + +#define TEST_STATS_ARENAS(t, name) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ +} while (0) + + TEST_STATS_ARENAS(const char *, dss); + TEST_STATS_ARENAS(unsigned, nthreads); + TEST_STATS_ARENAS(size_t, pactive); + TEST_STATS_ARENAS(size_t, pdirty); + +#undef TEST_STATS_ARENAS +} +TEST_END + +int +main(void) +{ + + return (test( + test_mallctl_errors, + test_mallctlnametomib_errors, + test_mallctlbymib_errors, + test_mallctl_read_write, + test_mallctlnametomib_short_mib, + test_mallctl_config, + test_mallctl_opt, + test_manpage_example, + test_thread_arena, + test_arena_i_purge, + test_arena_i_dss, + test_arenas_purge, + test_arenas_initialized, + test_arenas_constants, + test_arenas_bin_constants, + test_arenas_lrun_constants, + test_arenas_extend, + test_stats_arenas)); +} diff --git a/test/unit/math.c b/test/unit/math.c new file mode 100644 index 00000000..a1b288ea --- /dev/null +++ b/test/unit/math.c @@ -0,0 +1,388 @@ +#include "test/jemalloc_test.h" + +#define MAX_REL_ERR 1.0e-9 +#define MAX_ABS_ERR 1.0e-9 + +static bool +double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) +{ + double rel_err; + + if (fabs(a - b) < max_abs_err) + return (true); + rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); + return (rel_err < max_rel_err); +} + +static uint64_t +factorial(unsigned x) +{ + uint64_t ret = 1; + unsigned i; + + for (i = 2; i <= x; i++) + ret *= (uint64_t)i; + + return (ret); +} + +TEST_BEGIN(test_ln_gamma_factorial) +{ + unsigned x; + + /* exp(ln_gamma(x)) == (x-1)! for integer x. */ + for (x = 1; x <= 21; x++) { + assert_true(double_eq_rel(exp(ln_gamma(x)), + (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect factorial result for x=%u", x); + } +} +TEST_END + +/* Expected ln_gamma([0.0..100.0] increment=0.25). */ +static const double ln_gamma_misc_expected[] = { + INFINITY, + 1.28802252469807743, 0.57236494292470008, 0.20328095143129538, + 0.00000000000000000, -0.09827183642181320, -0.12078223763524518, + -0.08440112102048555, 0.00000000000000000, 0.12487171489239651, + 0.28468287047291918, 0.47521466691493719, 0.69314718055994529, + 0.93580193110872523, 1.20097360234707429, 1.48681557859341718, + 1.79175946922805496, 2.11445692745037128, 2.45373657084244234, + 2.80857141857573644, 3.17805383034794575, 3.56137591038669710, + 3.95781396761871651, 4.36671603662228680, 4.78749174278204581, + 5.21960398699022932, 5.66256205985714178, 6.11591589143154568, + 6.57925121201010121, 7.05218545073853953, 7.53436423675873268, + 8.02545839631598312, 8.52516136106541467, 9.03318691960512332, + 9.54926725730099690, 10.07315123968123949, 10.60460290274525086, + 11.14340011995171231, 11.68933342079726856, 12.24220494005076176, + 12.80182748008146909, 13.36802367147604720, 13.94062521940376342, + 14.51947222506051816, 15.10441257307551943, 15.69530137706046524, + 16.29200047656724237, 16.89437797963419285, 17.50230784587389010, + 18.11566950571089407, 18.73434751193644843, 19.35823122022435427, + 19.98721449566188468, 20.62119544270163018, 21.26007615624470048, + 21.90376249182879320, 22.55216385312342098, 23.20519299513386002, + 23.86276584168908954, 24.52480131594137802, 25.19122118273868338, + 25.86194990184851861, 26.53691449111561340, 27.21604439872720604, + 27.89927138384089389, 28.58652940490193828, 29.27775451504081516, + 29.97288476399884871, 30.67186010608067548, 31.37462231367769050, + 32.08111489594735843, 32.79128302226991565, 33.50507345013689076, + 34.22243445715505317, 34.94331577687681545, 35.66766853819134298, + 36.39544520803305261, 37.12659953718355865, 37.86108650896109395, + 38.59886229060776230, 39.33988418719949465, 40.08411059791735198, + 40.83150097453079752, 41.58201578195490100, 42.33561646075348506, + 43.09226539146988699, 43.85192586067515208, 44.61456202863158893, + 45.38013889847690052, 46.14862228684032885, 46.91997879580877395, + 47.69417578616628361, 48.47118135183522014, 49.25096429545256882, + 50.03349410501914463, 50.81874093156324790, 51.60667556776436982, + 52.39726942748592364, 53.19049452616926743, 53.98632346204390586, + 54.78472939811231157, 55.58568604486942633, 56.38916764371992940, + 57.19514895105859864, 58.00360522298051080, 58.81451220059079787, + 59.62784609588432261, 60.44358357816834371, 61.26170176100199427, + 62.08217818962842927, 62.90499082887649962, 63.73011805151035958, + 64.55753862700632340, 65.38723171073768015, 66.21917683354901385, + 67.05335389170279825, 67.88974313718154008, 68.72832516833013017, + 69.56908092082363737, 70.41199165894616385, 71.25703896716800045, + 72.10420474200799390, 72.95347118416940191, 73.80482079093779646, + 74.65823634883015814, 75.51370092648485866, 76.37119786778275454, + 77.23071078519033961, 78.09222355331530707, 78.95572030266725960, + 79.82118541361435859, 80.68860351052903468, 81.55795945611502873, + 82.42923834590904164, 83.30242550295004378, 84.17750647261028973, + 85.05446701758152983, 85.93329311301090456, 86.81397094178107920, + 87.69648688992882057, 88.58082754219766741, 89.46697967771913795, + 90.35493026581838194, 91.24466646193963015, 92.13617560368709292, + 93.02944520697742803, 93.92446296229978486, 94.82121673107967297, + 95.71969454214321615, 96.61988458827809723, 97.52177522288820910, + 98.42535495673848800, 99.33061245478741341, 100.23753653310367895, + 101.14611615586458981, 102.05634043243354370, 102.96819861451382394, + 103.88168009337621811, 104.79677439715833032, 105.71347118823287303, + 106.63176026064346047, 107.55163153760463501, 108.47307506906540198, + 109.39608102933323153, 110.32063971475740516, 111.24674154146920557, + 112.17437704317786995, 113.10353686902013237, 114.03421178146170689, + 114.96639265424990128, 115.90007047041454769, 116.83523632031698014, + 117.77188139974506953, 118.70999700805310795, 119.64957454634490830, + 120.59060551569974962, 121.53308151543865279, 122.47699424143097247, + 123.42233548443955726, 124.36909712850338394, 125.31727114935689826, + 126.26684961288492559, 127.21782467361175861, 128.17018857322420899, + 129.12393363912724453, 130.07905228303084755, 131.03553699956862033, + 131.99338036494577864, 132.95257503561629164, 133.91311374698926784, + 134.87498931216194364, 135.83819462068046846, 136.80272263732638294, + 137.76856640092901785, 138.73571902320256299, 139.70417368760718091, + 140.67392364823425055, 141.64496222871400732, 142.61728282114600574, + 143.59087888505104047, 144.56574394634486680, 145.54187159633210058, + 146.51925549072063859, 147.49788934865566148, 148.47776695177302031, + 149.45888214327129617, 150.44122882700193600, 151.42480096657754984, + 152.40959258449737490, 153.39559776128982094, 154.38281063467164245, + 155.37122539872302696, 156.36083630307879844, 157.35163765213474107, + 158.34362380426921391, 159.33678917107920370, 160.33112821663092973, + 161.32663545672428995, 162.32330545817117695, 163.32113283808695314, + 164.32011226319519892, 165.32023844914485267, 166.32150615984036790, + 167.32391020678358018, 168.32744544842768164, 169.33210678954270634, + 170.33788918059275375, 171.34478761712384198, 172.35279713916281707, + 173.36191283062726143, 174.37212981874515094, 175.38344327348534080, + 176.39584840699734514, 177.40934047306160437, 178.42391476654847793, + 179.43956662288721304, 180.45629141754378111, 181.47408456550741107, + 182.49294152078630304, 183.51285777591152737, 184.53382886144947861, + 185.55585034552262869, 186.57891783333786861, 187.60302696672312095, + 188.62817342367162610, 189.65435291789341932, 190.68156119837468054, + 191.70979404894376330, 192.73904728784492590, 193.76931676731820176, + 194.80059837318714244, 195.83288802445184729, 196.86618167288995096, + 197.90047530266301123, 198.93576492992946214, 199.97204660246373464, + 201.00931639928148797, 202.04757043027063901, 203.08680483582807597, + 204.12701578650228385, 205.16819948264117102, 206.21035215404597807, + 207.25347005962987623, 208.29754948708190909, 209.34258675253678916, + 210.38857820024875878, 211.43552020227099320, 212.48340915813977858, + 213.53224149456323744, 214.58201366511514152, 215.63272214993284592, + 216.68436345542014010, 217.73693411395422004, 218.79043068359703739, + 219.84484974781133815, 220.90018791517996988, 221.95644181913033322, + 223.01360811766215875, 224.07168349307951871, 225.13066465172661879, + 226.19054832372759734, 227.25133126272962159, 228.31301024565024704, + 229.37558207242807384, 230.43904356577689896, 231.50339157094342113, + 232.56862295546847008, 233.63473460895144740, 234.70172344281823484, + 235.76958639009222907, 236.83832040516844586, 237.90792246359117712, + 238.97838956183431947, 240.04971871708477238, 241.12190696702904802, + 242.19495136964280846, 243.26884900298270509, 244.34359696498191283, + 245.41919237324782443, 246.49563236486270057, 247.57291409618682110, + 248.65103474266476269, 249.72999149863338175, 250.80978157713354904, + 251.89040220972316320, 252.97185064629374551, 254.05412415488834199, + 255.13722002152300661, 256.22113555000953511, 257.30586806178126835, + 258.39141489572085675, 259.47777340799029844, 260.56494097186322279, + 261.65291497755913497, 262.74169283208021852, 263.83127195904967266, + 264.92164979855277807, 266.01282380697938379, 267.10479145686849733, + 268.19755023675537586, 269.29109765101975427, 270.38543121973674488, + 271.48054847852881721, 272.57644697842033565, 273.67312428569374561, + 274.77057798174683967, 275.86880566295326389, 276.96780494052313770, + 278.06757344036617496, 279.16810880295668085, 280.26940868320008349, + 281.37147075030043197, 282.47429268763045229, 283.57787219260217171, + 284.68220697654078322, 285.78729476455760050, 286.89313329542699194, + 287.99972032146268930, 289.10705360839756395, 290.21513093526289140, + 291.32395009427028754, 292.43350889069523646, 293.54380514276073200, + 294.65483668152336350, 295.76660135076059532, 296.87909700685889902, + 297.99232151870342022, 299.10627276756946458, 300.22094864701409733, + 301.33634706277030091, 302.45246593264130297, 303.56930318639643929, + 304.68685676566872189, 305.80512462385280514, 306.92410472600477078, + 308.04379504874236773, 309.16419358014690033, 310.28529831966631036, + 311.40710727801865687, 312.52961847709792664, 313.65282994987899201, + 314.77673974032603610, 315.90134590329950015, 317.02664650446632777, + 318.15263962020929966, 319.27932333753892635, 320.40669575400545455, + 321.53475497761127144, 322.66349912672620803, 323.79292633000159185, + 324.92303472628691452, 326.05382246454587403, 327.18528770377525916, + 328.31742861292224234, 329.45024337080525356, 330.58373016603343331, + 331.71788719692847280, 332.85271267144611329, 333.98820480709991898, + 335.12436183088397001, 336.26118197919845443, 337.39866349777429377, + 338.53680464159958774, 339.67560367484657036, 340.81505887079896411, + 341.95516851178109619, 343.09593088908627578, 344.23734430290727460, + 345.37940706226686416, 346.52211748494903532, 347.66547389743118401, + 348.80947463481720661, 349.95411804077025408, 351.09940246744753267, + 352.24532627543504759, 353.39188783368263103, 354.53908551944078908, + 355.68691771819692349, 356.83538282361303118, 357.98447923746385868, + 359.13420536957539753 +}; + +TEST_BEGIN(test_ln_gamma_misc) +{ + unsigned i; + + for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { + double x = (double)i * 0.25; + assert_true(double_eq_rel(ln_gamma(x), + ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect ln_gamma result for i=%u", i); + } +} +TEST_END + +/* Expected pt_norm([0.01..0.99] increment=0.01). */ +static const double pt_norm_expected[] = { + -INFINITY, + -2.32634787404084076, -2.05374891063182252, -1.88079360815125085, + -1.75068607125216946, -1.64485362695147264, -1.55477359459685305, + -1.47579102817917063, -1.40507156030963221, -1.34075503369021654, + -1.28155156554460081, -1.22652812003661049, -1.17498679206608991, + -1.12639112903880045, -1.08031934081495606, -1.03643338949378938, + -0.99445788320975281, -0.95416525314619416, -0.91536508784281390, + -0.87789629505122846, -0.84162123357291418, -0.80642124701824025, + -0.77219321418868492, -0.73884684918521371, -0.70630256284008752, + -0.67448975019608171, -0.64334540539291685, -0.61281299101662701, + -0.58284150727121620, -0.55338471955567281, -0.52440051270804067, + -0.49585034734745320, -0.46769879911450812, -0.43991316567323380, + -0.41246312944140462, -0.38532046640756751, -0.35845879325119373, + -0.33185334643681652, -0.30548078809939738, -0.27931903444745404, + -0.25334710313579978, -0.22754497664114931, -0.20189347914185077, + -0.17637416478086135, -0.15096921549677725, -0.12566134685507399, + -0.10043372051146975, -0.07526986209982976, -0.05015358346473352, + -0.02506890825871106, 0.00000000000000000, 0.02506890825871106, + 0.05015358346473366, 0.07526986209982990, 0.10043372051146990, + 0.12566134685507413, 0.15096921549677739, 0.17637416478086146, + 0.20189347914185105, 0.22754497664114931, 0.25334710313579978, + 0.27931903444745404, 0.30548078809939738, 0.33185334643681652, + 0.35845879325119373, 0.38532046640756762, 0.41246312944140484, + 0.43991316567323391, 0.46769879911450835, 0.49585034734745348, + 0.52440051270804111, 0.55338471955567303, 0.58284150727121620, + 0.61281299101662701, 0.64334540539291685, 0.67448975019608171, + 0.70630256284008752, 0.73884684918521371, 0.77219321418868492, + 0.80642124701824036, 0.84162123357291441, 0.87789629505122879, + 0.91536508784281423, 0.95416525314619460, 0.99445788320975348, + 1.03643338949378938, 1.08031934081495606, 1.12639112903880045, + 1.17498679206608991, 1.22652812003661049, 1.28155156554460081, + 1.34075503369021654, 1.40507156030963265, 1.47579102817917085, + 1.55477359459685394, 1.64485362695147308, 1.75068607125217102, + 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 +}; + +TEST_BEGIN(test_pt_norm) +{ + unsigned i; + + for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { + double p = (double)i * 0.01; + assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i], + MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect pt_norm result for i=%u", i); + } +} +TEST_END + +/* + * Expected pt_chi2(p=[0.01..0.99] increment=0.07, + * df={0.1, 1.1, 10.1, 100.1, 1000.1}). + */ +static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1}; +static const double pt_chi2_expected[] = { + 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17, + 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09, + 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05, + 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03, + 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00, + + 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, + 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, + 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, + 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, + 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, + + 2.606673548632508, 4.602913725294877, 5.646152813924212, + 6.488971315540869, 7.249823275816285, 7.977314231410841, + 8.700354939944047, 9.441728024225892, 10.224338321374127, + 11.076435368801061, 12.039320937038386, 13.183878752697167, + 14.657791935084575, 16.885728216339373, 23.361991680031817, + + 70.14844087392152, 80.92379498849355, 85.53325420085891, + 88.94433120715347, 91.83732712857017, 94.46719943606301, + 96.96896479994635, 99.43412843510363, 101.94074719829733, + 104.57228644307247, 107.43900093448734, 110.71844673417287, + 114.76616819871325, 120.57422505959563, 135.92318818757556, + + 899.0072447849649, 937.9271278858220, 953.8117189560207, + 965.3079371501154, 974.8974061207954, 983.4936235182347, + 991.5691170518946, 999.4334123954690, 1007.3391826856553, + 1015.5445154999951, 1024.3777075619569, 1034.3538789836223, + 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 +}; + +TEST_BEGIN(test_pt_chi2) +{ + unsigned i, j; + unsigned e = 0; + + for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) { + double df = pt_chi2_df[i]; + double ln_gamma_df = ln_gamma(df * 0.5); + for (j = 1; j < 100; j += 7) { + double p = (double)j * 0.01; + assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df), + pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect pt_chi2 result for i=%u, j=%u", i, j); + e++; + } + } +} +TEST_END + +/* + * Expected pt_gamma(p=[0.1..0.99] increment=0.07, + * shape=[0.5..3.0] increment=0.5). + */ +static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; +static const double pt_gamma_expected[] = { + 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02, + 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01, + 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01, + 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01, + 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00, + + 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, + 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, + 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, + 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, + 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, + + 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, + 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, + 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, + 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, + 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, + + 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, + 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, + 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, + 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, + 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, + + 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, + 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, + 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, + 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, + 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, + + 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, + 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, + 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, + 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, + 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 +}; + +TEST_BEGIN(test_pt_gamma_shape) +{ + unsigned i, j; + unsigned e = 0; + + for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) { + double shape = pt_gamma_shape[i]; + double ln_gamma_shape = ln_gamma(shape); + for (j = 1; j < 100; j += 7) { + double p = (double)j * 0.01; + assert_true(double_eq_rel(pt_gamma(p, shape, 1.0, + ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR, + MAX_ABS_ERR), + "Incorrect pt_gamma result for i=%u, j=%u", i, j); + e++; + } + } +} +TEST_END + +TEST_BEGIN(test_pt_gamma_scale) +{ + double shape = 1.0; + double ln_gamma_shape = ln_gamma(shape); + + assert_true(double_eq_rel( + pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, + pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, + MAX_ABS_ERR), + "Scale should be trivially equivalent to external multiplication"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_ln_gamma_factorial, + test_ln_gamma_misc, + test_pt_norm, + test_pt_chi2, + test_pt_gamma_shape, + test_pt_gamma_scale)); +} diff --git a/test/unit/mq.c b/test/unit/mq.c new file mode 100644 index 00000000..f57e96af --- /dev/null +++ b/test/unit/mq.c @@ -0,0 +1,92 @@ +#include "test/jemalloc_test.h" + +#define NSENDERS 3 +#define NMSGS 100000 + +typedef struct mq_msg_s mq_msg_t; +struct mq_msg_s { + mq_msg(mq_msg_t) link; +}; +mq_gen(static, mq_, mq_t, mq_msg_t, link) + +TEST_BEGIN(test_mq_basic) +{ + mq_t mq; + mq_msg_t msg; + + assert_false(mq_init(&mq), "Unexpected mq_init() failure"); + assert_u_eq(mq_count(&mq), 0, "mq should be empty"); + assert_ptr_null(mq_tryget(&mq), + "mq_tryget() should fail when the queue is empty"); + + mq_put(&mq, &msg); + assert_u_eq(mq_count(&mq), 1, "mq should contain one message"); + assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg"); + + mq_put(&mq, &msg); + assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg"); + + mq_fini(&mq); +} +TEST_END + +static void * +thd_receiver_start(void *arg) +{ + mq_t *mq = (mq_t *)arg; + unsigned i; + + for (i = 0; i < (NSENDERS * NMSGS); i++) { + mq_msg_t *msg = mq_get(mq); + assert_ptr_not_null(msg, "mq_get() should never return NULL"); + dallocx(msg, 0); + } + return (NULL); +} + +static void * +thd_sender_start(void *arg) +{ + mq_t *mq = (mq_t *)arg; + unsigned i; + + for (i = 0; i < NMSGS; i++) { + mq_msg_t *msg; + void *p; + p = mallocx(sizeof(mq_msg_t), 0); + assert_ptr_not_null(p, "Unexpected allocm() failure"); + msg = (mq_msg_t *)p; + mq_put(mq, msg); + } + return (NULL); +} + +TEST_BEGIN(test_mq_threaded) +{ + mq_t mq; + thd_t receiver; + thd_t senders[NSENDERS]; + unsigned i; + + assert_false(mq_init(&mq), "Unexpected mq_init() failure"); + + thd_create(&receiver, thd_receiver_start, (void *)&mq); + for (i = 0; i < NSENDERS; i++) + thd_create(&senders[i], thd_sender_start, (void *)&mq); + + thd_join(receiver, NULL); + for (i = 0; i < NSENDERS; i++) + thd_join(senders[i], NULL); + + mq_fini(&mq); +} +TEST_END + +int +main(void) +{ + return (test( + test_mq_basic, + test_mq_threaded)); +} + diff --git a/test/unit/mtx.c b/test/unit/mtx.c new file mode 100644 index 00000000..96ff6948 --- /dev/null +++ b/test/unit/mtx.c @@ -0,0 +1,60 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 2 +#define NINCRS 2000000 + +TEST_BEGIN(test_mtx_basic) +{ + mtx_t mtx; + + assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); + mtx_lock(&mtx); + mtx_unlock(&mtx); + mtx_fini(&mtx); +} +TEST_END + +typedef struct { + mtx_t mtx; + unsigned x; +} thd_start_arg_t; + +static void * +thd_start(void *varg) +{ + thd_start_arg_t *arg = (thd_start_arg_t *)varg; + unsigned i; + + for (i = 0; i < NINCRS; i++) { + mtx_lock(&arg->mtx); + arg->x++; + mtx_unlock(&arg->mtx); + } + return (NULL); +} + +TEST_BEGIN(test_mtx_race) +{ + thd_start_arg_t arg; + thd_t thds[NTHREADS]; + unsigned i; + + assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); + arg.x = 0; + for (i = 0; i < NTHREADS; i++) + thd_create(&thds[i], thd_start, (void *)&arg); + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); + assert_u_eq(arg.x, NTHREADS * NINCRS, + "Race-related counter corruption"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_mtx_basic, + test_mtx_race)); +} diff --git a/test/unit/prof_accum.c b/test/unit/prof_accum.c new file mode 100644 index 00000000..cf3f287d --- /dev/null +++ b/test/unit/prof_accum.c @@ -0,0 +1,122 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 4 +#define NALLOCS_PER_THREAD 50 +#define DUMP_INTERVAL 1 +#define BT_COUNT_CHECK_INTERVAL 5 + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"; +#endif + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +#define alloc_n_proto(n) \ +static void *alloc_##n(unsigned bits); + +#define alloc_n_gen(n) \ +static void * \ +alloc_##n(unsigned bits) \ +{ \ + void *p; \ + \ + if (bits == 0) \ + p = mallocx(1, 0); \ + else { \ + switch (bits & 0x1U) { \ + case 0: \ + p = alloc_0(bits >> 1); \ + break; \ + case 1: \ + p = alloc_1(bits >> 1); \ + break; \ + default: not_reached(); \ + } \ + } \ + /* Intentionally sabotage tail call optimization. */ \ + assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ + return (p); \ +} +alloc_n_proto(0) +alloc_n_proto(1) +alloc_n_gen(0) +alloc_n_gen(1) + +static void * +alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) +{ + + return (alloc_0(thd_ind*NALLOCS_PER_THREAD + iteration)); +} + +static void * +thd_start(void *varg) +{ + unsigned thd_ind = *(unsigned *)varg; + size_t bt_count_prev, bt_count; + unsigned i_prev, i; + + i_prev = 0; + bt_count_prev = 0; + for (i = 0; i < NALLOCS_PER_THREAD; i++) { + void *p = alloc_from_permuted_backtrace(thd_ind, i); + dallocx(p, 0); + if (i % DUMP_INTERVAL == 0) { + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + } + + if (i % BT_COUNT_CHECK_INTERVAL == 0 || + i+1 == NALLOCS_PER_THREAD) { + bt_count = prof_bt_count(); + assert_zu_le(bt_count_prev+(i-i_prev), bt_count, + "Expected larger backtrace count increase"); + i_prev = i; + bt_count_prev = bt_count; + } + } + + return (NULL); +} + +TEST_BEGIN(test_idump) +{ + bool active; + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); +} +TEST_END + +int +main(void) +{ + + return (test( + test_idump)); +} diff --git a/test/unit/prof_gdump.c b/test/unit/prof_gdump.c new file mode 100644 index 00000000..a00b1054 --- /dev/null +++ b/test/unit/prof_gdump.c @@ -0,0 +1,56 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true"; +#endif + +static bool did_prof_dump_open; + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + did_prof_dump_open = true; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +TEST_BEGIN(test_gdump) +{ + bool active; + void *p, *q; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + did_prof_dump_open = false; + p = mallocx(chunksize, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + did_prof_dump_open = false; + q = mallocx(chunksize, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + dallocx(p, 0); + dallocx(q, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_gdump)); +} diff --git a/test/unit/prof_idump.c b/test/unit/prof_idump.c new file mode 100644 index 00000000..bdea53ec --- /dev/null +++ b/test/unit/prof_idump.c @@ -0,0 +1,51 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0," + "lg_prof_interval:0"; +#endif + +static bool did_prof_dump_open; + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + did_prof_dump_open = true; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +TEST_BEGIN(test_idump) +{ + bool active; + void *p; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + did_prof_dump_open = false; + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + dallocx(p, 0); + assert_true(did_prof_dump_open, "Expected a profile dump"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_idump)); +} diff --git a/test/unit/ql.c b/test/unit/ql.c new file mode 100644 index 00000000..05fad450 --- /dev/null +++ b/test/unit/ql.c @@ -0,0 +1,209 @@ +#include "test/jemalloc_test.h" + +/* Number of ring entries, in [2..26]. */ +#define NENTRIES 9 + +typedef struct list_s list_t; +typedef ql_head(list_t) list_head_t; + +struct list_s { + ql_elm(list_t) link; + char id; +}; + +static void +test_empty_list(list_head_t *head) +{ + list_t *t; + unsigned i; + + assert_ptr_null(ql_first(head), "Unexpected element for empty list"); + assert_ptr_null(ql_last(head, link), + "Unexpected element for empty list"); + + i = 0; + ql_foreach(t, head, link) { + i++; + } + assert_u_eq(i, 0, "Unexpected element for empty list"); + + i = 0; + ql_reverse_foreach(t, head, link) { + i++; + } + assert_u_eq(i, 0, "Unexpected element for empty list"); +} + +TEST_BEGIN(test_ql_empty) +{ + list_head_t head; + + ql_new(&head); + test_empty_list(&head); +} +TEST_END + +static void +init_entries(list_t *entries, unsigned nentries) +{ + unsigned i; + + for (i = 0; i < nentries; i++) { + entries[i].id = 'a' + i; + ql_elm_new(&entries[i], link); + } +} + +static void +test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) +{ + list_t *t; + unsigned i; + + assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch"); + assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id, + "Element id mismatch"); + + i = 0; + ql_foreach(t, head, link) { + assert_c_eq(t->id, entries[i].id, "Element id mismatch"); + i++; + } + + i = 0; + ql_reverse_foreach(t, head, link) { + assert_c_eq(t->id, entries[nentries-i-1].id, + "Element id mismatch"); + i++; + } + + for (i = 0; i < nentries-1; i++) { + t = ql_next(head, &entries[i], link); + assert_c_eq(t->id, entries[i+1].id, "Element id mismatch"); + } + assert_ptr_null(ql_next(head, &entries[nentries-1], link), + "Unexpected element"); + + assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element"); + for (i = 1; i < nentries; i++) { + t = ql_prev(head, &entries[i], link); + assert_c_eq(t->id, entries[i-1].id, "Element id mismatch"); + } +} + +TEST_BEGIN(test_ql_tail_insert) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_tail_insert(&head, &entries[i], link); + + test_entries_list(&head, entries, NENTRIES); +} +TEST_END + +TEST_BEGIN(test_ql_tail_remove) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_tail_insert(&head, &entries[i], link); + + for (i = 0; i < NENTRIES; i++) { + test_entries_list(&head, entries, NENTRIES-i); + ql_tail_remove(&head, list_t, link); + } + test_empty_list(&head); +} +TEST_END + +TEST_BEGIN(test_ql_head_insert) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_head_insert(&head, &entries[NENTRIES-i-1], link); + + test_entries_list(&head, entries, NENTRIES); +} +TEST_END + +TEST_BEGIN(test_ql_head_remove) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_head_insert(&head, &entries[NENTRIES-i-1], link); + + for (i = 0; i < NENTRIES; i++) { + test_entries_list(&head, &entries[i], NENTRIES-i); + ql_head_remove(&head, list_t, link); + } + test_empty_list(&head); +} +TEST_END + +TEST_BEGIN(test_ql_insert) +{ + list_head_t head; + list_t entries[8]; + list_t *a, *b, *c, *d, *e, *f, *g, *h; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + a = &entries[0]; + b = &entries[1]; + c = &entries[2]; + d = &entries[3]; + e = &entries[4]; + f = &entries[5]; + g = &entries[6]; + h = &entries[7]; + + /* + * ql_remove(), ql_before_insert(), and ql_after_insert() are used + * internally by other macros that are already tested, so there's no + * need to test them completely. However, insertion/deletion from the + * middle of lists is not otherwise tested; do so here. + */ + ql_tail_insert(&head, f, link); + ql_before_insert(&head, f, b, link); + ql_before_insert(&head, f, c, link); + ql_after_insert(f, h, link); + ql_after_insert(f, g, link); + ql_before_insert(&head, b, a, link); + ql_after_insert(c, d, link); + ql_before_insert(&head, f, e, link); + + test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t)); +} +TEST_END + +int +main(void) +{ + + return (test( + test_ql_empty, + test_ql_tail_insert, + test_ql_tail_remove, + test_ql_head_insert, + test_ql_head_remove, + test_ql_insert)); +} diff --git a/test/unit/qr.c b/test/unit/qr.c new file mode 100644 index 00000000..a2a2d902 --- /dev/null +++ b/test/unit/qr.c @@ -0,0 +1,248 @@ +#include "test/jemalloc_test.h" + +/* Number of ring entries, in [2..26]. */ +#define NENTRIES 9 +/* Split index, in [1..NENTRIES). */ +#define SPLIT_INDEX 5 + +typedef struct ring_s ring_t; + +struct ring_s { + qr(ring_t) link; + char id; +}; + +static void +init_entries(ring_t *entries) +{ + unsigned i; + + for (i = 0; i < NENTRIES; i++) { + qr_new(&entries[i], link); + entries[i].id = 'a' + i; + } +} + +static void +test_independent_entries(ring_t *entries) +{ + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + j++; + } + assert_u_eq(j, 1, + "Iteration over single-element ring should visit precisely " + "one element"); + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + j++; + } + assert_u_eq(j, 1, + "Iteration over single-element ring should visit precisely " + "one element"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_ptr_eq(t, &entries[i], + "Next element in single-element ring should be same as " + "current element"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_ptr_eq(t, &entries[i], + "Previous element in single-element ring should be same as " + "current element"); + } +} + +TEST_BEGIN(test_qr_one) +{ + ring_t entries[NENTRIES]; + + init_entries(entries); + test_independent_entries(entries); +} +TEST_END + +static void +test_entries_ring(ring_t *entries) +{ + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(i+j) % NENTRIES].id, + "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(NENTRIES+i-j-1) % + NENTRIES].id, "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, + "Element id mismatch"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, + "Element id mismatch"); + } +} + +TEST_BEGIN(test_qr_after_insert) +{ + ring_t entries[NENTRIES]; + unsigned i; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_after_insert(&entries[i - 1], &entries[i], link); + test_entries_ring(entries); +} +TEST_END + +TEST_BEGIN(test_qr_remove) +{ + ring_t entries[NENTRIES]; + ring_t *t; + unsigned i, j; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_after_insert(&entries[i - 1], &entries[i], link); + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[i+j].id, + "Element id mismatch"); + j++; + } + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[NENTRIES - 1 - j].id, + "Element id mismatch"); + j++; + } + qr_remove(&entries[i], link); + } + test_independent_entries(entries); +} +TEST_END + +TEST_BEGIN(test_qr_before_insert) +{ + ring_t entries[NENTRIES]; + ring_t *t; + unsigned i, j; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_before_insert(&entries[i - 1], &entries[i], link); + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(NENTRIES+i-j) % + NENTRIES].id, "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id, + "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, + "Element id mismatch"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, + "Element id mismatch"); + } +} +TEST_END + +static void +test_split_entries(ring_t *entries) +{ + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + if (i < SPLIT_INDEX) { + assert_c_eq(t->id, + entries[(i+j) % SPLIT_INDEX].id, + "Element id mismatch"); + } else { + assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) % + (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id, + "Element id mismatch"); + } + j++; + } + } +} + +TEST_BEGIN(test_qr_meld_split) +{ + ring_t entries[NENTRIES]; + unsigned i; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_after_insert(&entries[i - 1], &entries[i], link); + + qr_split(&entries[0], &entries[SPLIT_INDEX], link); + test_split_entries(entries); + + qr_meld(&entries[0], &entries[SPLIT_INDEX], link); + test_entries_ring(entries); + + qr_meld(&entries[0], &entries[SPLIT_INDEX], link); + test_split_entries(entries); + + qr_split(&entries[0], &entries[SPLIT_INDEX], link); + test_entries_ring(entries); + + qr_split(&entries[0], &entries[0], link); + test_entries_ring(entries); + + qr_meld(&entries[0], &entries[0], link); + test_entries_ring(entries); +} +TEST_END + +int +main(void) +{ + + return (test( + test_qr_one, + test_qr_after_insert, + test_qr_remove, + test_qr_before_insert, + test_qr_meld_split)); +} diff --git a/test/unit/quarantine.c b/test/unit/quarantine.c new file mode 100644 index 00000000..45349237 --- /dev/null +++ b/test/unit/quarantine.c @@ -0,0 +1,108 @@ +#include "test/jemalloc_test.h" + +#define QUARANTINE_SIZE 8192 +#define STRINGIFY_HELPER(x) #x +#define STRINGIFY(x) STRINGIFY_HELPER(x) + +#ifdef JEMALLOC_FILL +const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:" + STRINGIFY(QUARANTINE_SIZE); +#endif + +void +quarantine_clear(void) +{ + void *p; + + p = mallocx(QUARANTINE_SIZE*2, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + dallocx(p, 0); +} + +TEST_BEGIN(test_quarantine) +{ +#define SZ 256 +#define NQUARANTINED (QUARANTINE_SIZE/SZ) + void *quarantined[NQUARANTINED+1]; + size_t i, j; + + test_skip_if(!config_fill); + + assert_zu_eq(nallocx(SZ, 0), SZ, + "SZ=%zu does not precisely equal a size class", SZ); + + quarantine_clear(); + + /* + * Allocate enough regions to completely fill the quarantine, plus one + * more. The last iteration occurs with a completely full quarantine, + * but no regions should be drained from the quarantine until the last + * deallocation occurs. Therefore no region recycling should occur + * until after this loop completes. + */ + for (i = 0; i < NQUARANTINED+1; i++) { + void *p = mallocx(SZ, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + quarantined[i] = p; + dallocx(p, 0); + for (j = 0; j < i; j++) { + assert_ptr_ne(p, quarantined[j], + "Quarantined region recycled too early; " + "i=%zu, j=%zu", i, j); + } + } +#undef NQUARANTINED +#undef SZ +} +TEST_END + +static bool detected_redzone_corruption; + +static void +arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, + size_t offset, uint8_t byte) +{ + + detected_redzone_corruption = true; +} + +TEST_BEGIN(test_quarantine_redzone) +{ + char *s; + arena_redzone_corruption_t *arena_redzone_corruption_orig; + + test_skip_if(!config_fill); + + arena_redzone_corruption_orig = arena_redzone_corruption; + arena_redzone_corruption = arena_redzone_corruption_replacement; + + /* Test underflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[-1] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + /* Test overflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[sallocx(s, 0)] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + arena_redzone_corruption = arena_redzone_corruption_orig; +} +TEST_END + +int +main(void) +{ + + return (test( + test_quarantine, + test_quarantine_redzone)); +} diff --git a/test/unit/rb.c b/test/unit/rb.c new file mode 100644 index 00000000..b737485a --- /dev/null +++ b/test/unit/rb.c @@ -0,0 +1,333 @@ +#include "test/jemalloc_test.h" + +#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ + a_type *rbp_bh_t; \ + for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ + rbp_bh_t != &(a_rbt)->rbt_nil; \ + rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ + if (rbtn_red_get(a_type, a_field, rbp_bh_t) == false) { \ + (r_height)++; \ + } \ + } \ +} while (0) + +typedef struct node_s node_t; + +struct node_s { +#define NODE_MAGIC 0x9823af7e + uint32_t magic; + rb_node(node_t) link; + uint64_t key; +}; + +static int +node_cmp(node_t *a, node_t *b) { + int ret; + + assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); + assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); + + ret = (a->key > b->key) - (a->key < b->key); + if (ret == 0) { + /* + * Duplicates are not allowed in the tree, so force an + * arbitrary ordering for non-identical items with equal keys. + */ + ret = (((uintptr_t)a) > ((uintptr_t)b)) + - (((uintptr_t)a) < ((uintptr_t)b)); + } + return (ret); +} + +typedef rb_tree(node_t) tree_t; +rb_gen(static, tree_, tree_t, node_t, link, node_cmp); + +TEST_BEGIN(test_rb_empty) +{ + tree_t tree; + node_t key; + + tree_new(&tree); + + assert_ptr_null(tree_first(&tree), "Unexpected node"); + assert_ptr_null(tree_last(&tree), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_search(&tree, &key), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node"); +} +TEST_END + +static unsigned +tree_recurse(node_t *node, unsigned black_height, unsigned black_depth, + node_t *nil) +{ + unsigned ret = 0; + node_t *left_node = rbtn_left_get(node_t, link, node); + node_t *right_node = rbtn_right_get(node_t, link, node); + + if (rbtn_red_get(node_t, link, node) == false) + black_depth++; + + /* Red nodes must be interleaved with black nodes. */ + if (rbtn_red_get(node_t, link, node)) { + assert_false(rbtn_red_get(node_t, link, left_node), + "Node should be black"); + assert_false(rbtn_red_get(node_t, link, right_node), + "Node should be black"); + } + + if (node == nil) + return (ret); + /* Self. */ + assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); + + /* Left subtree. */ + if (left_node != nil) + ret += tree_recurse(left_node, black_height, black_depth, nil); + else + ret += (black_depth != black_height); + + /* Right subtree. */ + if (right_node != nil) + ret += tree_recurse(right_node, black_height, black_depth, nil); + else + ret += (black_depth != black_height); + + return (ret); +} + +static node_t * +tree_iterate_cb(tree_t *tree, node_t *node, void *data) +{ + unsigned *i = (unsigned *)data; + node_t *search_node; + + assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); + + /* Test rb_search(). */ + search_node = tree_search(tree, node); + assert_ptr_eq(search_node, node, + "tree_search() returned unexpected node"); + + /* Test rb_nsearch(). */ + search_node = tree_nsearch(tree, node); + assert_ptr_eq(search_node, node, + "tree_nsearch() returned unexpected node"); + + /* Test rb_psearch(). */ + search_node = tree_psearch(tree, node); + assert_ptr_eq(search_node, node, + "tree_psearch() returned unexpected node"); + + (*i)++; + + return (NULL); +} + +static unsigned +tree_iterate(tree_t *tree) +{ + unsigned i; + + i = 0; + tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); + + return (i); +} + +static unsigned +tree_iterate_reverse(tree_t *tree) +{ + unsigned i; + + i = 0; + tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); + + return (i); +} + +static void +node_remove(tree_t *tree, node_t *node, unsigned nnodes) +{ + node_t *search_node; + unsigned black_height, imbalances; + + tree_remove(tree, node); + + /* Test rb_nsearch(). */ + search_node = tree_nsearch(tree, node); + if (search_node != NULL) { + assert_u64_ge(search_node->key, node->key, + "Key ordering error"); + } + + /* Test rb_psearch(). */ + search_node = tree_psearch(tree, node); + if (search_node != NULL) { + assert_u64_le(search_node->key, node->key, + "Key ordering error"); + } + + node->magic = 0; + + rbtn_black_height(node_t, link, tree, black_height); + imbalances = tree_recurse(tree->rbt_root, black_height, 0, + &(tree->rbt_nil)); + assert_u_eq(imbalances, 0, "Tree is unbalanced"); + assert_u_eq(tree_iterate(tree), nnodes-1, + "Unexpected node iteration count"); + assert_u_eq(tree_iterate_reverse(tree), nnodes-1, + "Unexpected node iteration count"); +} + +static node_t * +remove_iterate_cb(tree_t *tree, node_t *node, void *data) +{ + unsigned *nnodes = (unsigned *)data; + node_t *ret = tree_next(tree, node); + + node_remove(tree, node, *nnodes); + + return (ret); +} + +static node_t * +remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) +{ + unsigned *nnodes = (unsigned *)data; + node_t *ret = tree_prev(tree, node); + + node_remove(tree, node, *nnodes); + + return (ret); +} + +TEST_BEGIN(test_rb_random) +{ +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 + sfmt_t *sfmt; + uint64_t bag[NNODES]; + tree_t tree; + node_t nodes[NNODES]; + unsigned i, j, k, black_height, imbalances; + + sfmt = init_gen_rand(SEED); + for (i = 0; i < NBAGS; i++) { + switch (i) { + case 0: + /* Insert in order. */ + for (j = 0; j < NNODES; j++) + bag[j] = j; + break; + case 1: + /* Insert in reverse order. */ + for (j = 0; j < NNODES; j++) + bag[j] = NNODES - j - 1; + break; + default: + for (j = 0; j < NNODES; j++) + bag[j] = gen_rand64_range(sfmt, NNODES); + } + + for (j = 1; j <= NNODES; j++) { + /* Initialize tree and nodes. */ + tree_new(&tree); + tree.rbt_nil.magic = 0; + for (k = 0; k < j; k++) { + nodes[k].magic = NODE_MAGIC; + nodes[k].key = bag[k]; + } + + /* Insert nodes. */ + for (k = 0; k < j; k++) { + tree_insert(&tree, &nodes[k]); + + rbtn_black_height(node_t, link, &tree, + black_height); + imbalances = tree_recurse(tree.rbt_root, + black_height, 0, &(tree.rbt_nil)); + assert_u_eq(imbalances, 0, + "Tree is unbalanced"); + + assert_u_eq(tree_iterate(&tree), k+1, + "Unexpected node iteration count"); + assert_u_eq(tree_iterate_reverse(&tree), k+1, + "Unexpected node iteration count"); + + assert_ptr_not_null(tree_first(&tree), + "Tree should not be empty"); + assert_ptr_not_null(tree_last(&tree), + "Tree should not be empty"); + + tree_next(&tree, &nodes[k]); + tree_prev(&tree, &nodes[k]); + } + + /* Remove nodes. */ + switch (i % 4) { + case 0: + for (k = 0; k < j; k++) + node_remove(&tree, &nodes[k], j - k); + break; + case 1: + for (k = j; k > 0; k--) + node_remove(&tree, &nodes[k-1], k); + break; + case 2: { + node_t *start; + unsigned nnodes = j; + + start = NULL; + do { + start = tree_iter(&tree, start, + remove_iterate_cb, (void *)&nnodes); + nnodes--; + } while (start != NULL); + assert_u_eq(nnodes, 0, + "Removal terminated early"); + break; + } case 3: { + node_t *start; + unsigned nnodes = j; + + start = NULL; + do { + start = tree_reverse_iter(&tree, start, + remove_reverse_iterate_cb, + (void *)&nnodes); + nnodes--; + } while (start != NULL); + assert_u_eq(nnodes, 0, + "Removal terminated early"); + break; + } default: + not_reached(); + } + } + } + fini_gen_rand(sfmt); +#undef NNODES +#undef NBAGS +#undef SEED +} +TEST_END + +int +main(void) +{ + + return (test( + test_rb_empty, + test_rb_random)); +} diff --git a/test/unit/rtree.c b/test/unit/rtree.c new file mode 100644 index 00000000..5e7a4113 --- /dev/null +++ b/test/unit/rtree.c @@ -0,0 +1,117 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_rtree_get_empty) +{ + unsigned i; + + for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { + rtree_t *rtree = rtree_new(i, imalloc, idalloc); + assert_u_eq(rtree_get(rtree, 0), 0, + "rtree_get() should return NULL for empty tree"); + rtree_delete(rtree); + } +} +TEST_END + +TEST_BEGIN(test_rtree_extrema) +{ + unsigned i; + + for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { + rtree_t *rtree = rtree_new(i, imalloc, idalloc); + + rtree_set(rtree, 0, 1); + assert_u_eq(rtree_get(rtree, 0), 1, + "rtree_get() should return previously set value"); + + rtree_set(rtree, ~((uintptr_t)0), 1); + assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1, + "rtree_get() should return previously set value"); + + rtree_delete(rtree); + } +} +TEST_END + +TEST_BEGIN(test_rtree_bits) +{ + unsigned i, j, k; + + for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { + uintptr_t keys[] = {0, 1, + (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; + rtree_t *rtree = rtree_new(i, imalloc, idalloc); + + for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { + rtree_set(rtree, keys[j], 1); + for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { + assert_u_eq(rtree_get(rtree, keys[k]), 1, + "rtree_get() should return previously set " + "value and ignore insignificant key bits; " + "i=%u, j=%u, k=%u, set key=%#x, " + "get key=%#x", i, j, k, keys[j], keys[k]); + } + assert_u_eq(rtree_get(rtree, + (((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0, + "Only leftmost rtree leaf should be set; " + "i=%u, j=%u", i, j); + rtree_set(rtree, keys[j], 0); + } + + rtree_delete(rtree); + } +} +TEST_END + +TEST_BEGIN(test_rtree_random) +{ + unsigned i; + sfmt_t *sfmt; +#define NSET 100 +#define SEED 42 + + sfmt = init_gen_rand(SEED); + for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { + rtree_t *rtree = rtree_new(i, imalloc, idalloc); + uintptr_t keys[NSET]; + unsigned j; + + for (j = 0; j < NSET; j++) { + keys[j] = (uintptr_t)gen_rand64(sfmt); + rtree_set(rtree, keys[j], 1); + assert_u_eq(rtree_get(rtree, keys[j]), 1, + "rtree_get() should return previously set value"); + } + for (j = 0; j < NSET; j++) { + assert_u_eq(rtree_get(rtree, keys[j]), 1, + "rtree_get() should return previously set value"); + } + + for (j = 0; j < NSET; j++) { + rtree_set(rtree, keys[j], 0); + assert_u_eq(rtree_get(rtree, keys[j]), 0, + "rtree_get() should return previously set value"); + } + for (j = 0; j < NSET; j++) { + assert_u_eq(rtree_get(rtree, keys[j]), 0, + "rtree_get() should return previously set value"); + } + + rtree_delete(rtree); + } + fini_gen_rand(sfmt); +#undef NSET +#undef SEED +} +TEST_END + +int +main(void) +{ + + return (test( + test_rtree_get_empty, + test_rtree_extrema, + test_rtree_bits, + test_rtree_random)); +} diff --git a/test/unit/stats.c b/test/unit/stats.c new file mode 100644 index 00000000..6cd97730 --- /dev/null +++ b/test/unit/stats.c @@ -0,0 +1,373 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_stats_summary) +{ + size_t *cactive; + size_t sz, allocated, active, mapped; + int expected = config_stats ? 0 : ENOENT; + + sz = sizeof(cactive); + assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_le(active, *cactive, + "active should be no larger than cactive"); + assert_zu_le(allocated, active, + "allocated should be no larger than active"); + assert_zu_le(active, mapped, + "active should be no larger than mapped"); + } +} +TEST_END + +TEST_BEGIN(test_stats_chunks) +{ + size_t current, total, high; + size_t sz = sizeof(size_t); + int expected = config_stats ? 0 : ENOENT; + + assert_d_eq(mallctl("stats.chunks.current", ¤t, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.chunks.total", &total, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.chunks.high", &high, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_le(current, high, + "current should be no larger than high"); + assert_zu_le(high, total, + "high should be no larger than total"); + } +} +TEST_END + +TEST_BEGIN(test_stats_huge) +{ + void *p; + uint64_t epoch; + size_t allocated; + uint64_t nmalloc, ndalloc; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + + p = mallocx(arena_maxclass+1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.huge.allocated", &allocated, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.huge.nmalloc", &nmalloc, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.huge.ndalloc", &ndalloc, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_summary) +{ + unsigned arena; + void *small, *large; + uint64_t epoch; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + size_t mapped; + uint64_t npurge, nmadvise, purged; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + small = mallocx(SMALL_MAXCLASS, 0); + assert_ptr_not_null(small, "Unexpected mallocx() failure"); + large = mallocx(arena_maxclass, 0); + assert_ptr_not_null(large, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + + if (config_stats) { + assert_u64_gt(npurge, 0, + "At least one purge should have occurred"); + assert_u64_le(nmadvise, purged, + "nmadvise should be no greater than purged"); + } + + dallocx(small, 0); + dallocx(large, 0); +} +TEST_END + +void * +thd_start(void *arg) +{ + + return (NULL); +} + +static void +no_lazy_lock(void) +{ + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} + +TEST_BEGIN(test_stats_arenas_small) +{ + unsigned arena; + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc, nrequests; + int expected = config_stats ? 0 : ENOENT; + + no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(SMALL_MAXCLASS, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_gt(nmalloc, 0, + "nmalloc should be no greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_large) +{ + unsigned arena; + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc, nrequests; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(arena_maxclass, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_zu_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_zu_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_zu_gt(nrequests, 0, + "nrequests should be greater than zero"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_bins) +{ + unsigned arena; + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nruns, nreruns, curruns; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(arena_bin_info[0].reg_size, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.bins.0.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz, + NULL, 0), config_tcache ? expected : ENOENT, + "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz, + NULL, 0), config_tcache ? expected : ENOENT, + "Unexpected mallctl() result"); + + assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + if (config_tcache) { + assert_u64_gt(nfills, 0, + "At least one fill should have occurred"); + assert_u64_gt(nflushes, 0, + "At least one flush should have occurred"); + } + assert_u64_gt(nruns, 0, + "At least one run should have been allocated"); + assert_u64_gt(curruns, 0, + "At least one run should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_lruns) +{ + unsigned arena; + void *p; + uint64_t epoch, nmalloc, ndalloc, nrequests, curruns; + size_t sz = sizeof(uint64_t); + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(SMALL_MAXCLASS+1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + assert_u64_gt(curruns, 0, + "At least one run should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_stats_summary, + test_stats_chunks, + test_stats_huge, + test_stats_arenas_summary, + test_stats_arenas_small, + test_stats_arenas_large, + test_stats_arenas_bins, + test_stats_arenas_lruns)); +} diff --git a/test/unit/tsd.c b/test/unit/tsd.c new file mode 100644 index 00000000..f421c1a3 --- /dev/null +++ b/test/unit/tsd.c @@ -0,0 +1,71 @@ +#include "test/jemalloc_test.h" + +#define THREAD_DATA 0x72b65c10 + +typedef unsigned int data_t; + +static bool data_cleanup_executed; + +void +data_cleanup(void *arg) +{ + data_t *data = (data_t *)arg; + + assert_x_eq(*data, THREAD_DATA, + "Argument passed into cleanup function should match tsd value"); + data_cleanup_executed = true; +} + +malloc_tsd_protos(, data, data_t) +malloc_tsd_externs(data, data_t) +#define DATA_INIT 0x12345678 +malloc_tsd_data(, data, data_t, DATA_INIT) +malloc_tsd_funcs(, data, data_t, DATA_INIT, data_cleanup) + +static void * +thd_start(void *arg) +{ + data_t d = (data_t)(uintptr_t)arg; + assert_x_eq(*data_tsd_get(), DATA_INIT, + "Initial tsd get should return initialization value"); + + data_tsd_set(&d); + assert_x_eq(*data_tsd_get(), d, + "After tsd set, tsd get should return value that was set"); + + d = 0; + assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg, + "Resetting local data should have no effect on tsd"); + + return (NULL); +} + +TEST_BEGIN(test_tsd_main_thread) +{ + + thd_start((void *) 0xa5f3e329); +} +TEST_END + +TEST_BEGIN(test_tsd_sub_thread) +{ + thd_t thd; + + data_cleanup_executed = false; + thd_create(&thd, thd_start, (void *)THREAD_DATA); + thd_join(thd, NULL); + assert_true(data_cleanup_executed, + "Cleanup function should have executed"); +} +TEST_END + +int +main(void) +{ + + data_tsd_boot(); + + return (test( + test_tsd_main_thread, + test_tsd_sub_thread)); +} diff --git a/test/unit/util.c b/test/unit/util.c new file mode 100644 index 00000000..dc3cfe8a --- /dev/null +++ b/test/unit/util.c @@ -0,0 +1,294 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_pow2_ceil) +{ + unsigned i, pow2; + size_t x; + + assert_zu_eq(pow2_ceil(0), 0, "Unexpected result"); + + for (i = 0; i < sizeof(size_t) * 8; i++) { + assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i, + "Unexpected result"); + } + + for (i = 2; i < sizeof(size_t) * 8; i++) { + assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i, + "Unexpected result"); + } + + for (i = 0; i < sizeof(size_t) * 8 - 1; i++) { + assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1), + "Unexpected result"); + } + + for (pow2 = 1; pow2 < 25; pow2++) { + for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) { + assert_zu_eq(pow2_ceil(x), ZU(1) << pow2, + "Unexpected result, x=%zu", x); + } + } +} +TEST_END + +TEST_BEGIN(test_malloc_strtoumax_no_endptr) +{ + int err; + + set_errno(0); + assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result"); + err = get_errno(); + assert_d_eq(err, 0, "Unexpected failure"); +} +TEST_END + +TEST_BEGIN(test_malloc_strtoumax) +{ + struct test_s { + const char *input; + const char *expected_remainder; + int base; + int expected_errno; + const char *expected_errno_name; + uintmax_t expected_x; + }; +#define ERR(e) e, #e +#define UMAX(x) ((uintmax_t)x##ULL) + struct test_s tests[] = { + {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, + + {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, + {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, + {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, + {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, + + {"42", "", 0, ERR(0), UMAX(42)}, + {"+42", "", 0, ERR(0), UMAX(42)}, + {"-42", "", 0, ERR(0), UMAX(-42)}, + {"042", "", 0, ERR(0), UMAX(042)}, + {"+042", "", 0, ERR(0), UMAX(042)}, + {"-042", "", 0, ERR(0), UMAX(-042)}, + {"0x42", "", 0, ERR(0), UMAX(0x42)}, + {"+0x42", "", 0, ERR(0), UMAX(0x42)}, + {"-0x42", "", 0, ERR(0), UMAX(-0x42)}, + + {"0", "", 0, ERR(0), UMAX(0)}, + {"1", "", 0, ERR(0), UMAX(1)}, + + {"42", "", 0, ERR(0), UMAX(42)}, + {" 42", "", 0, ERR(0), UMAX(42)}, + {"42 ", " ", 0, ERR(0), UMAX(42)}, + {"0x", "x", 0, ERR(0), UMAX(0)}, + {"42x", "x", 0, ERR(0), UMAX(42)}, + + {"07", "", 0, ERR(0), UMAX(7)}, + {"010", "", 0, ERR(0), UMAX(8)}, + {"08", "8", 0, ERR(0), UMAX(0)}, + {"0_", "_", 0, ERR(0), UMAX(0)}, + + {"0x", "x", 0, ERR(0), UMAX(0)}, + {"0X", "X", 0, ERR(0), UMAX(0)}, + {"0xg", "xg", 0, ERR(0), UMAX(0)}, + {"0XA", "", 0, ERR(0), UMAX(10)}, + + {"010", "", 10, ERR(0), UMAX(10)}, + {"0x3", "x3", 10, ERR(0), UMAX(0)}, + + {"12", "2", 2, ERR(0), UMAX(1)}, + {"78", "8", 8, ERR(0), UMAX(7)}, + {"9a", "a", 10, ERR(0), UMAX(9)}, + {"9A", "A", 10, ERR(0), UMAX(9)}, + {"fg", "g", 16, ERR(0), UMAX(15)}, + {"FG", "G", 16, ERR(0), UMAX(15)}, + {"0xfg", "g", 16, ERR(0), UMAX(15)}, + {"0XFG", "G", 16, ERR(0), UMAX(15)}, + {"z_", "_", 36, ERR(0), UMAX(35)}, + {"Z_", "_", 36, ERR(0), UMAX(35)} + }; +#undef ERR +#undef UMAX + unsigned i; + + for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { + struct test_s *test = &tests[i]; + int err; + uintmax_t result; + char *remainder; + + set_errno(0); + result = malloc_strtoumax(test->input, &remainder, test->base); + err = get_errno(); + assert_d_eq(err, test->expected_errno, + "Expected errno %s for \"%s\", base %d", + test->expected_errno_name, test->input, test->base); + assert_str_eq(remainder, test->expected_remainder, + "Unexpected remainder for \"%s\", base %d", + test->input, test->base); + if (err == 0) { + assert_ju_eq(result, test->expected_x, + "Unexpected result for \"%s\", base %d", + test->input, test->base); + } + } +} +TEST_END + +TEST_BEGIN(test_malloc_snprintf_truncated) +{ +#define BUFLEN 15 + char buf[BUFLEN]; + int result; + size_t len; +#define TEST(expected_str_untruncated, fmt...) do { \ + result = malloc_snprintf(buf, len, fmt); \ + assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ + "Unexpected string inequality (\"%s\" vs \"%s\")", \ + buf, expected_str_untruncated); \ + assert_d_eq(result, strlen(expected_str_untruncated), \ + "Unexpected result"); \ +} while (0) + + for (len = 1; len < BUFLEN; len++) { + TEST("012346789", "012346789"); + TEST("a0123b", "a%sb", "0123"); + TEST("a01234567", "a%s%s", "0123", "4567"); + TEST("a0123 ", "a%-6s", "0123"); + TEST("a 0123", "a%6s", "0123"); + TEST("a 012", "a%6.3s", "0123"); + TEST("a 012", "a%*.*s", 6, 3, "0123"); + TEST("a 123b", "a% db", 123); + TEST("a123b", "a%-db", 123); + TEST("a-123b", "a%-db", -123); + TEST("a+123b", "a%+db", 123); + } +#undef BUFLEN +#undef TEST +} +TEST_END + +TEST_BEGIN(test_malloc_snprintf) +{ +#define BUFLEN 128 + char buf[BUFLEN]; + int result; +#define TEST(expected_str, fmt...) do { \ + result = malloc_snprintf(buf, sizeof(buf), fmt); \ + assert_str_eq(buf, expected_str, "Unexpected output"); \ + assert_d_eq(result, strlen(expected_str), "Unexpected result"); \ +} while (0) + + TEST("hello", "hello"); + + TEST("50%, 100%", "50%%, %d%%", 100); + + TEST("a0123b", "a%sb", "0123"); + + TEST("a 0123b", "a%5sb", "0123"); + TEST("a 0123b", "a%*sb", 5, "0123"); + + TEST("a0123 b", "a%-5sb", "0123"); + TEST("a0123b", "a%*sb", -1, "0123"); + TEST("a0123 b", "a%*sb", -5, "0123"); + TEST("a0123 b", "a%-*sb", -5, "0123"); + + TEST("a012b", "a%.3sb", "0123"); + TEST("a012b", "a%.*sb", 3, "0123"); + TEST("a0123b", "a%.*sb", -3, "0123"); + + TEST("a 012b", "a%5.3sb", "0123"); + TEST("a 012b", "a%5.*sb", 3, "0123"); + TEST("a 012b", "a%*.3sb", 5, "0123"); + TEST("a 012b", "a%*.*sb", 5, 3, "0123"); + TEST("a 0123b", "a%*.*sb", 5, -3, "0123"); + + TEST("_abcd_", "_%x_", 0xabcd); + TEST("_0xabcd_", "_%#x_", 0xabcd); + TEST("_1234_", "_%o_", 01234); + TEST("_01234_", "_%#o_", 01234); + TEST("_1234_", "_%u_", 1234); + + TEST("_1234_", "_%d_", 1234); + TEST("_ 1234_", "_% d_", 1234); + TEST("_+1234_", "_%+d_", 1234); + TEST("_-1234_", "_%d_", -1234); + TEST("_-1234_", "_% d_", -1234); + TEST("_-1234_", "_%+d_", -1234); + + TEST("_-1234_", "_%d_", -1234); + TEST("_1234_", "_%d_", 1234); + TEST("_-1234_", "_%i_", -1234); + TEST("_1234_", "_%i_", 1234); + TEST("_01234_", "_%#o_", 01234); + TEST("_1234_", "_%u_", 1234); + TEST("_0x1234abc_", "_%#x_", 0x1234abc); + TEST("_0X1234ABC_", "_%#X_", 0x1234abc); + TEST("_c_", "_%c_", 'c'); + TEST("_string_", "_%s_", "string"); + TEST("_0x42_", "_%p_", ((void *)0x42)); + + TEST("_-1234_", "_%ld_", ((long)-1234)); + TEST("_1234_", "_%ld_", ((long)1234)); + TEST("_-1234_", "_%li_", ((long)-1234)); + TEST("_1234_", "_%li_", ((long)1234)); + TEST("_01234_", "_%#lo_", ((long)01234)); + TEST("_1234_", "_%lu_", ((long)1234)); + TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc)); + TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC)); + + TEST("_-1234_", "_%lld_", ((long long)-1234)); + TEST("_1234_", "_%lld_", ((long long)1234)); + TEST("_-1234_", "_%lli_", ((long long)-1234)); + TEST("_1234_", "_%lli_", ((long long)1234)); + TEST("_01234_", "_%#llo_", ((long long)01234)); + TEST("_1234_", "_%llu_", ((long long)1234)); + TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc)); + TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC)); + + TEST("_-1234_", "_%qd_", ((long long)-1234)); + TEST("_1234_", "_%qd_", ((long long)1234)); + TEST("_-1234_", "_%qi_", ((long long)-1234)); + TEST("_1234_", "_%qi_", ((long long)1234)); + TEST("_01234_", "_%#qo_", ((long long)01234)); + TEST("_1234_", "_%qu_", ((long long)1234)); + TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc)); + TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC)); + + TEST("_-1234_", "_%jd_", ((intmax_t)-1234)); + TEST("_1234_", "_%jd_", ((intmax_t)1234)); + TEST("_-1234_", "_%ji_", ((intmax_t)-1234)); + TEST("_1234_", "_%ji_", ((intmax_t)1234)); + TEST("_01234_", "_%#jo_", ((intmax_t)01234)); + TEST("_1234_", "_%ju_", ((intmax_t)1234)); + TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc)); + TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC)); + + TEST("_1234_", "_%td_", ((ptrdiff_t)1234)); + TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234)); + TEST("_1234_", "_%ti_", ((ptrdiff_t)1234)); + TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234)); + + TEST("_-1234_", "_%zd_", ((ssize_t)-1234)); + TEST("_1234_", "_%zd_", ((ssize_t)1234)); + TEST("_-1234_", "_%zi_", ((ssize_t)-1234)); + TEST("_1234_", "_%zi_", ((ssize_t)1234)); + TEST("_01234_", "_%#zo_", ((ssize_t)01234)); + TEST("_1234_", "_%zu_", ((ssize_t)1234)); + TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc)); + TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC)); +#undef BUFLEN +} +TEST_END + +int +main(void) +{ + + return (test( + test_pow2_ceil, + test_malloc_strtoumax_no_endptr, + test_malloc_strtoumax, + test_malloc_snprintf_truncated, + test_malloc_snprintf)); +} diff --git a/test/unit/zero.c b/test/unit/zero.c new file mode 100644 index 00000000..2fdae2fd --- /dev/null +++ b/test/unit/zero.c @@ -0,0 +1,78 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_FILL +const char *malloc_conf = + "abort:false,junk:false,zero:true,redzone:false,quarantine:0"; +#endif + +static void +test_zero(size_t sz_min, size_t sz_max) +{ + char *s; + size_t sz_prev, sz, i; + + sz_prev = 0; + s = (char *)mallocx(sz_min, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + + for (sz = sallocx(s, 0); sz <= sz_max; + sz_prev = sz, sz = sallocx(s, 0)) { + if (sz_prev > 0) { + assert_c_eq(s[0], 'a', + "Previously allocated byte %zu/%zu is corrupted", + 0, sz_prev); + assert_c_eq(s[sz_prev-1], 'a', + "Previously allocated byte %zu/%zu is corrupted", + sz_prev-1, sz_prev); + } + + for (i = sz_prev; i < sz; i++) { + assert_c_eq(s[i], 0x0, + "Newly allocated byte %zu/%zu isn't zero-filled", + i, sz); + s[i] = 'a'; + } + + if (xallocx(s, sz+1, 0, 0) == sz) { + s = (char *)rallocx(s, sz+1, 0); + assert_ptr_not_null((void *)s, + "Unexpected rallocx() failure"); + } + } + + dallocx(s, 0); +} + +TEST_BEGIN(test_zero_small) +{ + + test_skip_if(!config_fill); + test_zero(1, SMALL_MAXCLASS-1); +} +TEST_END + +TEST_BEGIN(test_zero_large) +{ + + test_skip_if(!config_fill); + test_zero(SMALL_MAXCLASS+1, arena_maxclass); +} +TEST_END + +TEST_BEGIN(test_zero_huge) +{ + + test_skip_if(!config_fill); + test_zero(arena_maxclass+1, chunksize*2); +} +TEST_END + +int +main(void) +{ + + return (test( + test_zero_small, + test_zero_large, + test_zero_huge)); +}