diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 8461fca..0000000 --- a/.gitmodules +++ /dev/null @@ -1,10 +0,0 @@ -[submodule "lib/folly"] - path = lib/folly - url = https://github.com/facebook/folly.git - branch = master -[submodule "lib/google-gflags"] - path = lib/google-gflags - url = https://code.google.com/p/gflags/ -[submodule "lib/double-conversion"] - path = lib/double-conversion - url = https://code.google.com/p/double-conversion/ diff --git a/CMakeLists.txt b/CMakeLists.txt index 82bd5b1..9da3425 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,30 +1,5 @@ cmake_minimum_required(VERSION 2.8 FATAL_ERROR) -project(doorkeeper) - -include(ExternalProject) - -add_subdirectory(lib/google-gflags) -add_subdirectory(lib/double-conversion) - -configure_file(lib/double-conversion/src/utils.h include/double-conversion/utils.h COPYONLY) -configure_file(lib/double-conversion/src/double-conversion.h include/double-conversion/double-conversion.h COPYONLY) - -set(FollyCustomLibs "-L${CMAKE_BINARY_DIR}/lib/google-gflags/lib -L${CMAKE_BINARY_DIR}/lib/google-glog/lib -L${CMAKE_BINARY_DIR}/lib/double-conversion/src") -set(FollyCustomIncs "-I${CMAKE_BINARY_DIR}/include -I${CMAKE_BINARY_DIR}/lib/google-gflags/include -I${CMAKE_BINARY_DIR}/lib/folly/src/folly-build") -ExternalProject_Add(jemalloc-3-6-0 - SOURCE_DIR ${CMAKE_SOURCE_DIR}/lib/jemalloc-3.6.0 - CONFIGURE_COMMAND ${CMAKE_SOURCE_DIR}/lib/jemalloc-3.6.0/configure --prefix= - PREFIX ${CMAKE_BINARY_DIR}/lib/jemalloc-3.6.0 - BUILD_IN_SOURCE 0 - BUILD_COMMAND make -) -ExternalProject_Add(folly - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/lib/folly - CONFIGURE_COMMAND autoreconf -ivf ${CMAKE_SOURCE_DIR}/lib/folly/folly && ${CMAKE_SOURCE_DIR}/lib/folly/folly/configure "LDFLAGS=${FollyCustomLibs}" "CPPFLAGS=${FollyCustomIncs}" --prefix= - PREFIX ${CMAKE_BINARY_DIR}/lib/folly - BUILD_IN_SOURCE 0 -) -link_directories(${CMAKE_BINARY_DIR}/lib/jemalloc-3.6.0/src/jemalloc-3-6-0-build/lib) +project(doorkeeper CXX) set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -std=c++11 -g -O0 -Wall -Wextra") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -std=c++11 -O3 -Wall -Wextra") @@ -34,18 +9,8 @@ include_directories( src/ include/ ) -include_directories(SYSTEM - lib/folly - {CMAKE_BINARY_DIR}/lib/jemalloc-3.6.0/include -) add_executable(${PROJECT_NAME} src/main.cpp src/doorkeeper.cpp ) - -add_dependencies(${PROJECT_NAME} jemalloc-3-6-0 folly) -add_dependencies(folly gflags double-conversion) -target_link_libraries(${PROJECT_NAME} - jemalloc -) diff --git a/lib/double-conversion b/lib/double-conversion deleted file mode 160000 index 0d25506..0000000 --- a/lib/double-conversion +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 0d25506a2e7fb52928963313343e0237e890059e diff --git a/lib/folly b/lib/folly deleted file mode 160000 index 6e46d46..0000000 --- a/lib/folly +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 6e46d468cf2876dd59c7a4dddcb4e37abf070b7a diff --git a/lib/google-gflags b/lib/google-gflags deleted file mode 160000 index 05b155f..0000000 --- a/lib/google-gflags +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 05b155ff59114735ec8cd089f669c4c3d8f59029 diff --git a/lib/jemalloc-3.6.0/.gitignore b/lib/jemalloc-3.6.0/.gitignore deleted file mode 100644 index 4c408ec..0000000 --- a/lib/jemalloc-3.6.0/.gitignore +++ /dev/null @@ -1,72 +0,0 @@ -/*.gcov.* - -/autom4te.cache/ - -/bin/jemalloc.sh - -/config.stamp -/config.log -/config.status -/configure - -/doc/html.xsl -/doc/manpages.xsl -/doc/jemalloc.xml -/doc/jemalloc.html -/doc/jemalloc.3 - -/lib/ - -/Makefile - -/include/jemalloc/internal/jemalloc_internal.h -/include/jemalloc/internal/jemalloc_internal_defs.h -/include/jemalloc/internal/private_namespace.h -/include/jemalloc/internal/private_unnamespace.h -/include/jemalloc/internal/public_namespace.h -/include/jemalloc/internal/public_symbols.txt -/include/jemalloc/internal/public_unnamespace.h -/include/jemalloc/internal/size_classes.h -/include/jemalloc/jemalloc.h -/include/jemalloc/jemalloc_defs.h -/include/jemalloc/jemalloc_macros.h -/include/jemalloc/jemalloc_mangle.h -/include/jemalloc/jemalloc_mangle_jet.h -/include/jemalloc/jemalloc_protos.h -/include/jemalloc/jemalloc_protos_jet.h -/include/jemalloc/jemalloc_rename.h - -/src/*.[od] -/src/*.gcda -/src/*.gcno - -/test/test.sh -test/include/test/jemalloc_test.h -test/include/test/jemalloc_test_defs.h - -/test/integration/[A-Za-z]* -!/test/integration/[A-Za-z]*.* -/test/integration/*.[od] -/test/integration/*.gcda -/test/integration/*.gcno -/test/integration/*.out - -/test/src/*.[od] -/test/src/*.gcda -/test/src/*.gcno - -/test/stress/[A-Za-z]* -!/test/stress/[A-Za-z]*.* -/test/stress/*.[od] -/test/stress/*.gcda -/test/stress/*.gcno -/test/stress/*.out - -/test/unit/[A-Za-z]* -!/test/unit/[A-Za-z]*.* -/test/unit/*.[od] -/test/unit/*.gcda -/test/unit/*.gcno -/test/unit/*.out - -/VERSION diff --git a/lib/jemalloc-3.6.0/COPYING b/lib/jemalloc-3.6.0/COPYING deleted file mode 100644 index bdda0fe..0000000 --- a/lib/jemalloc-3.6.0/COPYING +++ /dev/null @@ -1,27 +0,0 @@ -Unless otherwise specified, files in the jemalloc source distribution are -subject to the following license: --------------------------------------------------------------------------------- -Copyright (C) 2002-2014 Jason Evans . -All rights reserved. -Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2014 Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice(s), - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice(s), - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- diff --git a/lib/jemalloc-3.6.0/ChangeLog b/lib/jemalloc-3.6.0/ChangeLog deleted file mode 100644 index d56ee99..0000000 --- a/lib/jemalloc-3.6.0/ChangeLog +++ /dev/null @@ -1,548 +0,0 @@ -Following are change highlights associated with official releases. Important -bug fixes are all mentioned, but internal enhancements are omitted here for -brevity (even though they are more fun to write about). Much more detail can be -found in the git revision history: - - https://github.com/jemalloc/jemalloc - -* 3.6.0 (March 31, 2014) - - This version contains a critical bug fix for a regression present in 3.5.0 and - 3.5.1. - - Bug fixes: - - Fix a regression in arena_chunk_alloc() that caused crashes during - small/large allocation if chunk allocation failed. In the absence of this - bug, chunk allocation failure would result in allocation failure, e.g. NULL - return from malloc(). This regression was introduced in 3.5.0. - - Fix backtracing for gcc intrinsics-based backtracing by specifying - -fno-omit-frame-pointer to gcc. Note that the application (and all the - libraries it links to) must also be compiled with this option for - backtracing to be reliable. - - Use dss allocation precedence for huge allocations as well as small/large - allocations. - - Fix test assertion failure message formatting. This bug did not manifect on - x86_64 systems because of implementation subtleties in va_list. - - Fix inconsequential test failures for hash and SFMT code. - - New features: - - Support heap profiling on FreeBSD. This feature depends on the proc - filesystem being mounted during heap profile dumping. - -* 3.5.1 (February 25, 2014) - - This version primarily addresses minor bugs in test code. - - Bug fixes: - - Configure Solaris/Illumos to use MADV_FREE. - - Fix junk filling for mremap(2)-based huge reallocation. This is only - relevant if configuring with the --enable-mremap option specified. - - Avoid compilation failure if 'restrict' C99 keyword is not supported by the - compiler. - - Add a configure test for SSE2 rather than assuming it is usable on i686 - systems. This fixes test compilation errors, especially on 32-bit Linux - systems. - - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit - test. - - Fix/remove flawed alignment-related overflow tests. - - Prevent compiler optimizations that could change backtraces in the - prof_accum unit test. - -* 3.5.0 (January 22, 2014) - - This version focuses on refactoring and automated testing, though it also - includes some non-trivial heap profiling optimizations not mentioned below. - - New features: - - Add the *allocx() API, which is a successor to the experimental *allocm() - API. The *allocx() functions are slightly simpler to use because they have - fewer parameters, they directly return the results of primary interest, and - mallocx()/rallocx() avoid the strict aliasing pitfall that - allocm()/rallocm() share with posix_memalign(). Note that *allocm() is - slated for removal in the next non-bugfix release. - - Add support for LinuxThreads. - - Bug fixes: - - Unless heap profiling is enabled, disable floating point code and don't link - with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64 - systems, makes it possible to completely disable floating point register - use. Some versions of glibc neglect to save/restore caller-saved floating - point registers during dynamic lazy symbol loading, and the symbol loading - code uses whatever malloc the application happens to have linked/loaded - with, the result being potential floating point register corruption. - - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling - backtrace creation in imemalign(). This bug impacted posix_memalign() and - aligned_alloc(). - - Fix a file descriptor leak in a prof_dump_maps() error path. - - Fix prof_dump() to close the dump file descriptor for all relevant error - paths. - - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for - allocation, not just deallocation. - - Fix a data race for large allocation stats counters. - - Fix a potential infinite loop during thread exit. This bug occurred on - Solaris, and could affect other platforms with similar pthreads TSD - implementations. - - Don't junk-fill reallocations unless usable size changes. This fixes a - violation of the *allocx()/*allocm() semantics. - - Fix growing large reallocation to junk fill new space. - - Fix huge deallocation to junk fill when munmap is disabled. - - Change the default private namespace prefix from empty to je_, and change - --with-private-namespace-prefix so that it prepends an additional prefix - rather than replacing je_. This reduces the likelihood of applications - which statically link jemalloc experiencing symbol name collisions. - - Add missing private namespace mangling (relevant when - --with-private-namespace is specified). - - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as - static even for debug builds. - - Add a missing mutex unlock in a malloc_init_hard() error path. In practice - this error path is never executed. - - Fix numerous bugs in malloc_strotumax() error handling/reporting. These - bugs had no impact except for malformed inputs. - - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by - existing calls, so they had no impact. - -* 3.4.1 (October 20, 2013) - - Bug fixes: - - Fix a race in the "arenas.extend" mallctl that could cause memory corruption - of internal data structures and subsequent crashes. - - Fix Valgrind integration flaws that caused Valgrind warnings about reads of - uninitialized memory in: - + arena chunk headers - + internal zero-initialized data structures (relevant to tcache and prof - code) - - Preserve errno during the first allocation. A readlink(2) call during - initialization fails unless /etc/malloc.conf exists, so errno was typically - set during the first allocation prior to this fix. - - Fix compilation warnings reported by gcc 4.8.1. - -* 3.4.0 (June 2, 2013) - - This version is essentially a small bugfix release, but the addition of - aarch64 support requires that the minor version be incremented. - - Bug fixes: - - Fix race-triggered deadlocks in chunk_record(). These deadlocks were - typically triggered by multiple threads concurrently deallocating huge - objects. - - New features: - - Add support for the aarch64 architecture. - -* 3.3.1 (March 6, 2013) - - This version fixes bugs that are typically encountered only when utilizing - custom run-time options. - - Bug fixes: - - Fix a locking order bug that could cause deadlock during fork if heap - profiling were enabled. - - Fix a chunk recycling bug that could cause the allocator to lose track of - whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause - corruption if allocating via sbrk(2) (unlikely unless running with the - "dss:primary" option specified). This was completely harmless on Linux - unless using mlockall(2) (and unlikely even then, unless the - --disable-munmap configure option or the "dss:primary" option was - specified). This regression was introduced in 3.1.0 by the - mlockall(2)/madvise(2) interaction fix. - - Fix TLS-related memory corruption that could occur during thread exit if the - thread never allocated memory. Only the quarantine and prof facilities were - susceptible. - - Fix two quarantine bugs: - + Internal reallocation of the quarantined object array leaked the old - array. - + Reallocation failure for internal reallocation of the quarantined object - array (very unlikely) resulted in memory corruption. - - Fix Valgrind integration to annotate all internally allocated memory in a - way that keeps Valgrind happy about internal data structure access. - - Fix building for s390 systems. - -* 3.3.0 (January 23, 2013) - - This version includes a few minor performance improvements in addition to the - listed new features and bug fixes. - - New features: - - Add clipping support to lg_chunk option processing. - - Add the --enable-ivsalloc option. - - Add the --without-export option. - - Add the --disable-zone-allocator option. - - Bug fixes: - - Fix "arenas.extend" mallctl to output the number of arenas. - - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory - is undefined. - - Fix build break on FreeBSD related to alloca.h. - -* 3.2.0 (November 9, 2012) - - In addition to a couple of bug fixes, this version modifies page run - allocation and dirty page purging algorithms in order to better control - page-level virtual memory fragmentation. - - Incompatible changes: - - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1). - - Bug fixes: - - Fix dss/mmap allocation precedence code to use recyclable mmap memory only - after primary dss allocation fails. - - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced - in 3.1.0 by the addition of the "arena..purge" mallctl. - -* 3.1.0 (October 16, 2012) - - New features: - - Auto-detect whether running inside Valgrind, thus removing the need to - manually specify MALLOC_CONF=valgrind:true. - - Add the "arenas.extend" mallctl, which allows applications to create - manually managed arenas. - - Add the ALLOCM_ARENA() flag for {,r,d}allocm(). - - Add the "opt.dss", "arena..dss", and "stats.arenas..dss" mallctls, - which provide control over dss/mmap precedence. - - Add the "arena..purge" mallctl, which obsoletes "arenas.purge". - - Define LG_QUANTUM for hppa. - - Incompatible changes: - - Disable tcache by default if running inside Valgrind, in order to avoid - making unallocated objects appear reachable to Valgrind. - - Drop const from malloc_usable_size() argument on Linux. - - Bug fixes: - - Fix heap profiling crash if sampled object is freed via realloc(p, 0). - - Remove const from __*_hook variable declarations, so that glibc can modify - them during process forking. - - Fix mlockall(2)/madvise(2) interaction. - - Fix fork(2)-related deadlocks. - - Fix error return value for "thread.tcache.enabled" mallctl. - -* 3.0.0 (May 11, 2012) - - Although this version adds some major new features, the primary focus is on - internal code cleanup that facilitates maintainability and portability, most - of which is not reflected in the ChangeLog. This is the first release to - incorporate substantial contributions from numerous other developers, and the - result is a more broadly useful allocator (see the git revision history for - contribution details). Note that the license has been unified, thanks to - Facebook granting a license under the same terms as the other copyright - holders (see COPYING). - - New features: - - Implement Valgrind support, redzones, and quarantine. - - Add support for additional platforms: - + FreeBSD - + Mac OS X Lion - + MinGW - + Windows (no support yet for replacing the system malloc) - - Add support for additional architectures: - + MIPS - + SH4 - + Tilera - - Add support for cross compiling. - - Add nallocm(), which rounds a request size up to the nearest size class - without actually allocating. - - Implement aligned_alloc() (blame C11). - - Add the "thread.tcache.enabled" mallctl. - - Add the "opt.prof_final" mallctl. - - Update pprof (from gperftools 2.0). - - Add the --with-mangling option. - - Add the --disable-experimental option. - - Add the --disable-munmap option, and make it the default on Linux. - - Add the --enable-mremap option, which disables use of mremap(2) by default. - - Incompatible changes: - - Enable stats by default. - - Enable fill by default. - - Disable lazy locking by default. - - Rename the "tcache.flush" mallctl to "thread.tcache.flush". - - Rename the "arenas.pagesize" mallctl to "arenas.page". - - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB). - - Change the "opt.prof_accum" default from true to false. - - Removed features: - - Remove the swap feature, including the "config.swap", "swap.avail", - "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls. - - Remove highruns statistics, including the - "stats.arenas..bins..highruns" and - "stats.arenas..lruns..highruns" mallctls. - - As part of small size class refactoring, remove the "opt.lg_[qc]space_max", - "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and - "arenas.[tqcs]bins" mallctls. - - Remove the "arenas.chunksize" mallctl. - - Remove the "opt.lg_prof_tcmax" option. - - Remove the "opt.lg_prof_bt_max" option. - - Remove the "opt.lg_tcache_gc_sweep" option. - - Remove the --disable-tiny option, including the "config.tiny" mallctl. - - Remove the --enable-dynamic-page-shift configure option. - - Remove the --enable-sysv configure option. - - Bug fixes: - - Fix a statistics-related bug in the "thread.arena" mallctl that could cause - invalid statistics and crashes. - - Work around TLS deallocation via free() on Linux. This bug could cause - write-after-free memory corruption. - - Fix a potential deadlock that could occur during interval- and - growth-triggered heap profile dumps. - - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags. - - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could - cause memory corruption and crashes with --enable-dss specified. - - Fix fork-related bugs that could cause deadlock in children between fork - and exec. - - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter. - - Fix realloc(p, 0) to act like free(p). - - Do not enforce minimum alignment in memalign(). - - Check for NULL pointer in malloc_usable_size(). - - Fix an off-by-one heap profile statistics bug that could be observed in - interval- and growth-triggered heap profiles. - - Fix the "epoch" mallctl to update cached stats even if the passed in epoch - is 0. - - Fix bin->runcur management to fix a layout policy bug. This bug did not - affect correctness. - - Fix a bug in choose_arena_hard() that potentially caused more arenas to be - initialized than necessary. - - Add missing "opt.lg_tcache_max" mallctl implementation. - - Use glibc allocator hooks to make mixed allocator usage less likely. - - Fix build issues for --disable-tcache. - - Don't mangle pthread_create() when --with-private-namespace is specified. - -* 2.2.5 (November 14, 2011) - - Bug fixes: - - Fix huge_ralloc() race when using mremap(2). This is a serious bug that - could cause memory corruption and/or crashes. - - Fix huge_ralloc() to maintain chunk statistics. - - Fix malloc_stats_print(..., "a") output. - -* 2.2.4 (November 5, 2011) - - Bug fixes: - - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as - well as for --disable-tls builds in earlier releases. - - Do not assume a 4 KiB page size in test/rallocm.c. - -* 2.2.3 (August 31, 2011) - - This version fixes numerous bugs related to heap profiling. - - Bug fixes: - - Fix a prof-related race condition. This bug could cause memory corruption, - but only occurred in non-default configurations (prof_accum:false). - - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is - excluded from backtraces). - - Fix a prof-related bug in realloc() (only triggered by OOM errors). - - Fix prof-related bugs in allocm() and rallocm(). - - Fix prof_tdata_cleanup() for --disable-tls builds. - - Fix a relative include path, to fix objdir builds. - -* 2.2.2 (July 30, 2011) - - Bug fixes: - - Fix a build error for --disable-tcache. - - Fix assertions in arena_purge() (for real this time). - - Add the --with-private-namespace option. This is a workaround for symbol - conflicts that can inadvertently arise when using static libraries. - -* 2.2.1 (March 30, 2011) - - Bug fixes: - - Implement atomic operations for x86/x64. This fixes compilation failures - for versions of gcc that are still in wide use. - - Fix an assertion in arena_purge(). - -* 2.2.0 (March 22, 2011) - - This version incorporates several improvements to algorithms and data - structures that tend to reduce fragmentation and increase speed. - - New features: - - Add the "stats.cactive" mallctl. - - Update pprof (from google-perftools 1.7). - - Improve backtracing-related configuration logic, and add the - --disable-prof-libgcc option. - - Bug fixes: - - Change default symbol visibility from "internal", to "hidden", which - decreases the overhead of library-internal function calls. - - Fix symbol visibility so that it is also set on OS X. - - Fix a build dependency regression caused by the introduction of the .pic.o - suffix for PIC object files. - - Add missing checks for mutex initialization failures. - - Don't use libgcc-based backtracing except on x64, where it is known to work. - - Fix deadlocks on OS X that were due to memory allocation in - pthread_mutex_lock(). - - Heap profiling-specific fixes: - + Fix memory corruption due to integer overflow in small region index - computation, when using a small enough sample interval that profiling - context pointers are stored in small run headers. - + Fix a bootstrap ordering bug that only occurred with TLS disabled. - + Fix a rallocm() rsize bug. - + Fix error detection bugs for aligned memory allocation. - -* 2.1.3 (March 14, 2011) - - Bug fixes: - - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix - for OS X in 2.1.2). - - Fix a "thread.arena" mallctl bug. - - Fix a thread cache stats merging bug. - -* 2.1.2 (March 2, 2011) - - Bug fixes: - - Fix "thread.{de,}allocatedp" mallctl for OS X. - - Add missing jemalloc.a to build system. - -* 2.1.1 (January 31, 2011) - - Bug fixes: - - Fix aligned huge reallocation (affected allocm()). - - Fix the ALLOCM_LG_ALIGN macro definition. - - Fix a heap dumping deadlock. - - Fix a "thread.arena" mallctl bug. - -* 2.1.0 (December 3, 2010) - - This version incorporates some optimizations that can't quite be considered - bug fixes. - - New features: - - Use Linux's mremap(2) for huge object reallocation when possible. - - Avoid locking in mallctl*() when possible. - - Add the "thread.[de]allocatedp" mallctl's. - - Convert the manual page source from roff to DocBook, and generate both roff - and HTML manuals. - - Bug fixes: - - Fix a crash due to incorrect bootstrap ordering. This only impacted - --enable-debug --enable-dss configurations. - - Fix a minor statistics bug for mallctl("swap.avail", ...). - -* 2.0.1 (October 29, 2010) - - Bug fixes: - - Fix a race condition in heap profiling that could cause undefined behavior - if "opt.prof_accum" were disabled. - - Add missing mutex unlocks for some OOM error paths in the heap profiling - code. - - Fix a compilation error for non-C99 builds. - -* 2.0.0 (October 24, 2010) - - This version focuses on the experimental *allocm() API, and on improved - run-time configuration/introspection. Nonetheless, numerous performance - improvements are also included. - - New features: - - Implement the experimental {,r,s,d}allocm() API, which provides a superset - of the functionality available via malloc(), calloc(), posix_memalign(), - realloc(), malloc_usable_size(), and free(). These functions can be used to - allocate/reallocate aligned zeroed memory, ask for optional extra memory - during reallocation, prevent object movement during reallocation, etc. - - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is - more human-readable, and more flexible. For example: - JEMALLOC_OPTIONS=AJP - is now: - MALLOC_CONF=abort:true,fill:true,stats_print:true - - Port to Apple OS X. Sponsored by Mozilla. - - Make it possible for the application to control thread-->arena mappings via - the "thread.arena" mallctl. - - Add compile-time support for all TLS-related functionality via pthreads TSD. - This is mainly of interest for OS X, which does not support TLS, but has a - TSD implementation with similar performance. - - Override memalign() and valloc() if they are provided by the system. - - Add the "arenas.purge" mallctl, which can be used to synchronously purge all - dirty unused pages. - - Make cumulative heap profiling data optional, so that it is possible to - limit the amount of memory consumed by heap profiling data structures. - - Add per thread allocation counters that can be accessed via the - "thread.allocated" and "thread.deallocated" mallctls. - - Incompatible changes: - - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above). - - Increase default backtrace depth from 4 to 128 for heap profiling. - - Disable interval-based profile dumps by default. - - Bug fixes: - - Remove bad assertions in fork handler functions. These assertions could - cause aborts for some combinations of configure settings. - - Fix strerror_r() usage to deal with non-standard semantics in GNU libc. - - Fix leak context reporting. This bug tended to cause the number of contexts - to be underreported (though the reported number of objects and bytes were - correct). - - Fix a realloc() bug for large in-place growing reallocation. This bug could - cause memory corruption, but it was hard to trigger. - - Fix an allocation bug for small allocations that could be triggered if - multiple threads raced to create a new run of backing pages. - - Enhance the heap profiler to trigger samples based on usable size, rather - than request size. - - Fix a heap profiling bug due to sometimes losing track of requested object - size for sampled objects. - -* 1.0.3 (August 12, 2010) - - Bug fixes: - - Fix the libunwind-based implementation of stack backtracing (used for heap - profiling). This bug could cause zero-length backtraces to be reported. - - Add a missing mutex unlock in library initialization code. If multiple - threads raced to initialize malloc, some of them could end up permanently - blocked. - -* 1.0.2 (May 11, 2010) - - Bug fixes: - - Fix junk filling of large objects, which could cause memory corruption. - - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual - memory limits could cause swap file configuration to fail. Contributed by - Jordan DeLong. - -* 1.0.1 (April 14, 2010) - - Bug fixes: - - Fix compilation when --enable-fill is specified. - - Fix threads-related profiling bugs that affected accuracy and caused memory - to be leaked during thread exit. - - Fix dirty page purging race conditions that could cause crashes. - - Fix crash in tcache flushing code during thread destruction. - -* 1.0.0 (April 11, 2010) - - This release focuses on speed and run-time introspection. Numerous - algorithmic improvements make this release substantially faster than its - predecessors. - - New features: - - Implement autoconf-based configuration system. - - Add mallctl*(), for the purposes of introspection and run-time - configuration. - - Make it possible for the application to manually flush a thread's cache, via - the "tcache.flush" mallctl. - - Base maximum dirty page count on proportion of active memory. - - Compute various addtional run-time statistics, including per size class - statistics for large objects. - - Expose malloc_stats_print(), which can be called repeatedly by the - application. - - Simplify the malloc_message() signature to only take one string argument, - and incorporate an opaque data pointer argument for use by the application - in combination with malloc_stats_print(). - - Add support for allocation backed by one or more swap files, and allow the - application to disable over-commit if swap files are in use. - - Implement allocation profiling and leak checking. - - Removed features: - - Remove the dynamic arena rebalancing code, since thread-specific caching - reduces its utility. - - Bug fixes: - - Modify chunk allocation to work when address space layout randomization - (ASLR) is in use. - - Fix thread cleanup bugs related to TLS destruction. - - Handle 0-size allocation requests in posix_memalign(). - - Fix a chunk leak. The leaked chunks were never touched, so this impacted - virtual memory usage, but not physical memory usage. - -* linux_2008082[78]a (August 27/28, 2008) - - These snapshot releases are the simple result of incorporating Linux-specific - support into the FreeBSD malloc sources. - --------------------------------------------------------------------------------- -vim:filetype=text:textwidth=80 diff --git a/lib/jemalloc-3.6.0/INSTALL b/lib/jemalloc-3.6.0/INSTALL deleted file mode 100644 index 841704d..0000000 --- a/lib/jemalloc-3.6.0/INSTALL +++ /dev/null @@ -1,306 +0,0 @@ -Building and installing jemalloc can be as simple as typing the following while -in the root directory of the source tree: - - ./configure - make - make install - -=== Advanced configuration ===================================================== - -The 'configure' script supports numerous options that allow control of which -functionality is enabled, where jemalloc is installed, etc. Optionally, pass -any of the following arguments (not a definitive list) to 'configure': - ---help - Print a definitive list of options. - ---prefix= - Set the base directory in which to install. For example: - - ./configure --prefix=/usr/local - - will cause files to be installed into /usr/local/include, /usr/local/lib, - and /usr/local/man. - ---with-rpath= - Embed one or more library paths, so that libjemalloc can find the libraries - it is linked to. This works only on ELF-based systems. - ---with-mangling= - Mangle public symbols specified in which is a comma-separated list of - name:mangled pairs. - - For example, to use ld's --wrap option as an alternative method for - overriding libc's malloc implementation, specify something like: - - --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...] - - Note that mangling happens prior to application of the prefix specified by - --with-jemalloc-prefix, and mangled symbols are then ignored when applying - the prefix. - ---with-jemalloc-prefix= - Prefix all public APIs with . For example, if is - "prefix_", API changes like the following occur: - - malloc() --> prefix_malloc() - malloc_conf --> prefix_malloc_conf - /etc/malloc.conf --> /etc/prefix_malloc.conf - MALLOC_CONF --> PREFIX_MALLOC_CONF - - This makes it possible to use jemalloc at the same time as the system - allocator, or even to use multiple copies of jemalloc simultaneously. - - By default, the prefix is "", except on OS X, where it is "je_". On OS X, - jemalloc overlays the default malloc zone, but makes no attempt to actually - replace the "malloc", "calloc", etc. symbols. - ---without-export - Don't export public APIs. This can be useful when building jemalloc as a - static library, or to avoid exporting public APIs when using the zone - allocator on OSX. - ---with-private-namespace= - Prefix all library-private APIs with je_. For shared libraries, - symbol visibility mechanisms prevent these symbols from being exported, but - for static libraries, naming collisions are a real possibility. By - default, is empty, which results in a symbol prefix of je_ . - ---with-install-suffix= - Append to the base name of all installed files, such that multiple - versions of jemalloc can coexist in the same installation directory. For - example, libjemalloc.so.0 becomes libjemalloc.so.0. - ---enable-cc-silence - Enable code that silences non-useful compiler warnings. This is helpful - when trying to tell serious warnings from those due to compiler - limitations, but it potentially incurs a performance penalty. - ---enable-debug - Enable assertions and validation code. This incurs a substantial - performance hit, but is very useful during application development. - Implies --enable-ivsalloc. - ---enable-code-coverage - Enable code coverage support, for use during jemalloc test development. - Additional testing targets are available if this option is enabled: - - coverage - coverage_unit - coverage_integration - coverage_stress - - These targets do not clear code coverage results from previous runs, and - there are interactions between the various coverage targets, so it is - usually advisable to run 'make clean' between repeated code coverage runs. - ---enable-ivsalloc - Enable validation code, which verifies that pointers reside within - jemalloc-owned chunks before dereferencing them. This incurs a substantial - performance hit. - ---disable-stats - Disable statistics gathering functionality. See the "opt.stats_print" - option documentation for usage details. - ---enable-prof - Enable heap profiling and leak detection functionality. See the "opt.prof" - option documentation for usage details. When enabled, there are several - approaches to backtracing, and the configure script chooses the first one - in the following list that appears to function correctly: - - + libunwind (requires --enable-prof-libunwind) - + libgcc (unless --disable-prof-libgcc) - + gcc intrinsics (unless --disable-prof-gcc) - ---enable-prof-libunwind - Use the libunwind library (http://www.nongnu.org/libunwind/) for stack - backtracing. - ---disable-prof-libgcc - Disable the use of libgcc's backtracing functionality. - ---disable-prof-gcc - Disable the use of gcc intrinsics for backtracing. - ---with-static-libunwind= - Statically link against the specified libunwind.a rather than dynamically - linking with -lunwind. - ---disable-tcache - Disable thread-specific caches for small objects. Objects are cached and - released in bulk, thus reducing the total number of mutex operations. See - the "opt.tcache" option for usage details. - ---enable-mremap - Enable huge realloc() via mremap(2). mremap() is disabled by default - because the flavor used is specific to Linux, which has a quirk in its - virtual memory allocation algorithm that causes semi-permanent VM map holes - under normal jemalloc operation. - ---disable-munmap - Disable virtual memory deallocation via munmap(2); instead keep track of - the virtual memory for later use. munmap() is disabled by default (i.e. - --disable-munmap is implied) on Linux, which has a quirk in its virtual - memory allocation algorithm that causes semi-permanent VM map holes under - normal jemalloc operation. - ---enable-dss - Enable support for page allocation/deallocation via sbrk(2), in addition to - mmap(2). - ---disable-fill - Disable support for junk/zero filling of memory, quarantine, and redzones. - See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option - documentation for usage details. - ---disable-valgrind - Disable support for Valgrind. - ---disable-experimental - Disable support for the experimental API (*allocm()). - ---disable-zone-allocator - Disable zone allocator for Darwin. This means jemalloc won't be hooked as - the default allocator on OSX/iOS. - ---enable-utrace - Enable utrace(2)-based allocation tracing. This feature is not broadly - portable (FreeBSD has it, but Linux and OS X do not). - ---enable-xmalloc - Enable support for optional immediate termination due to out-of-memory - errors, as is commonly implemented by "xmalloc" wrapper function for malloc. - See the "opt.xmalloc" option documentation for usage details. - ---enable-lazy-lock - Enable code that wraps pthread_create() to detect when an application - switches from single-threaded to multi-threaded mode, so that it can avoid - mutex locking/unlocking operations while in single-threaded mode. In - practice, this feature usually has little impact on performance unless - thread-specific caching is disabled. - ---disable-tls - Disable thread-local storage (TLS), which allows for fast access to - thread-local variables via the __thread keyword. If TLS is available, - jemalloc uses it for several purposes. - ---with-xslroot= - Specify where to find DocBook XSL stylesheets when building the - documentation. - -The following environment variables (not a definitive list) impact configure's -behavior: - -CFLAGS="?" - Pass these flags to the compiler. You probably shouldn't define this unless - you know what you are doing. (Use EXTRA_CFLAGS instead.) - -EXTRA_CFLAGS="?" - Append these flags to CFLAGS. This makes it possible to add flags such as - -Werror, while allowing the configure script to determine what other flags - are appropriate for the specified configuration. - - The configure script specifically checks whether an optimization flag (-O*) - is specified in EXTRA_CFLAGS, and refrains from specifying an optimization - level if it finds that one has already been specified. - -CPPFLAGS="?" - Pass these flags to the C preprocessor. Note that CFLAGS is not passed to - 'cpp' when 'configure' is looking for include files, so you must use - CPPFLAGS instead if you need to help 'configure' find header files. - -LD_LIBRARY_PATH="?" - 'ld' uses this colon-separated list to find libraries. - -LDFLAGS="?" - Pass these flags when linking. - -PATH="?" - 'configure' uses this to find programs. - -=== Advanced compilation ======================================================= - -To build only parts of jemalloc, use the following targets: - - build_lib_shared - build_lib_static - build_lib - build_doc_html - build_doc_man - build_doc - -To install only parts of jemalloc, use the following targets: - - install_bin - install_include - install_lib_shared - install_lib_static - install_lib - install_doc_html - install_doc_man - install_doc - -To clean up build results to varying degrees, use the following make targets: - - clean - distclean - relclean - -=== Advanced installation ====================================================== - -Optionally, define make variables when invoking make, including (not -exclusively): - -INCLUDEDIR="?" - Use this as the installation prefix for header files. - -LIBDIR="?" - Use this as the installation prefix for libraries. - -MANDIR="?" - Use this as the installation prefix for man pages. - -DESTDIR="?" - Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful - when installing to a different path than was specified via --prefix. - -CC="?" - Use this to invoke the C compiler. - -CFLAGS="?" - Pass these flags to the compiler. - -CPPFLAGS="?" - Pass these flags to the C preprocessor. - -LDFLAGS="?" - Pass these flags when linking. - -PATH="?" - Use this to search for programs used during configuration and building. - -=== Development ================================================================ - -If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh' -script rather than 'configure'. This re-generates 'configure', enables -configuration dependency rules, and enables re-generation of automatically -generated source files. - -The build system supports using an object directory separate from the source -tree. For example, you can create an 'obj' directory, and from within that -directory, issue configuration and build commands: - - autoconf - mkdir obj - cd obj - ../configure --enable-autogen - make - -=== Documentation ============================================================== - -The manual page is generated in both html and roff formats. Any web browser -can be used to view the html manual. The roff manual page can be formatted -prior to installation via the following command: - - nroff -man -t doc/jemalloc.3 diff --git a/lib/jemalloc-3.6.0/Makefile.in b/lib/jemalloc-3.6.0/Makefile.in deleted file mode 100644 index d6b7d6e..0000000 --- a/lib/jemalloc-3.6.0/Makefile.in +++ /dev/null @@ -1,438 +0,0 @@ -# Clear out all vpaths, then set just one (default vpath) for the main build -# directory. -vpath -vpath % . - -# Clear the default suffixes, so that built-in rules are not used. -.SUFFIXES : - -SHELL := /bin/sh - -CC := @CC@ - -# Configuration parameters. -DESTDIR = -BINDIR := $(DESTDIR)@BINDIR@ -INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@ -LIBDIR := $(DESTDIR)@LIBDIR@ -DATADIR := $(DESTDIR)@DATADIR@ -MANDIR := $(DESTDIR)@MANDIR@ -srcroot := @srcroot@ -objroot := @objroot@ -abs_srcroot := @abs_srcroot@ -abs_objroot := @abs_objroot@ - -# Build parameters. -CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include -CFLAGS := @CFLAGS@ -LDFLAGS := @LDFLAGS@ -EXTRA_LDFLAGS := @EXTRA_LDFLAGS@ -LIBS := @LIBS@ -RPATH_EXTRA := @RPATH_EXTRA@ -SO := @so@ -IMPORTLIB := @importlib@ -O := @o@ -A := @a@ -EXE := @exe@ -LIBPREFIX := @libprefix@ -REV := @rev@ -install_suffix := @install_suffix@ -ABI := @abi@ -XSLTPROC := @XSLTPROC@ -AUTOCONF := @AUTOCONF@ -_RPATH = @RPATH@ -RPATH = $(if $(1),$(call _RPATH,$(1))) -cfghdrs_in := @cfghdrs_in@ -cfghdrs_out := @cfghdrs_out@ -cfgoutputs_in := @cfgoutputs_in@ -cfgoutputs_out := @cfgoutputs_out@ -enable_autogen := @enable_autogen@ -enable_code_coverage := @enable_code_coverage@ -enable_experimental := @enable_experimental@ -enable_zone_allocator := @enable_zone_allocator@ -DSO_LDFLAGS = @DSO_LDFLAGS@ -SOREV = @SOREV@ -PIC_CFLAGS = @PIC_CFLAGS@ -CTARGET = @CTARGET@ -LDTARGET = @LDTARGET@ -MKLIB = @MKLIB@ -AR = @AR@ -ARFLAGS = @ARFLAGS@ -CC_MM = @CC_MM@ - -ifeq (macho, $(ABI)) -TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib" -else -ifeq (pecoff, $(ABI)) -TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib" -else -TEST_LIBRARY_PATH := -endif -endif - -LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix) - -# Lists of files. -BINS := $(srcroot)bin/pprof $(objroot)bin/jemalloc.sh -C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h -C_SRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c \ - $(srcroot)src/atomic.c $(srcroot)src/base.c $(srcroot)src/bitmap.c \ - $(srcroot)src/chunk.c $(srcroot)src/chunk_dss.c \ - $(srcroot)src/chunk_mmap.c $(srcroot)src/ckh.c $(srcroot)src/ctl.c \ - $(srcroot)src/extent.c $(srcroot)src/hash.c $(srcroot)src/huge.c \ - $(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/prof.c \ - $(srcroot)src/quarantine.c $(srcroot)src/rtree.c $(srcroot)src/stats.c \ - $(srcroot)src/tcache.c $(srcroot)src/util.c $(srcroot)src/tsd.c -ifeq ($(enable_zone_allocator), 1) -C_SRCS += $(srcroot)src/zone.c -endif -ifeq ($(IMPORTLIB),$(SO)) -STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A) -endif -ifdef PIC_CFLAGS -STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A) -else -STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A) -endif -DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV) -ifneq ($(SOREV),$(SO)) -DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO) -endif -MAN3 := $(objroot)doc/jemalloc$(install_suffix).3 -DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml -DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.html) -DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3) -DOCS := $(DOCS_HTML) $(DOCS_MAN3) -C_TESTLIB_SRCS := $(srcroot)test/src/math.c $(srcroot)test/src/mtx.c \ - $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \ - $(srcroot)test/src/thd.c -C_UTIL_INTEGRATION_SRCS := $(srcroot)src/util.c -TESTS_UNIT := $(srcroot)test/unit/bitmap.c \ - $(srcroot)test/unit/ckh.c \ - $(srcroot)test/unit/hash.c \ - $(srcroot)test/unit/junk.c \ - $(srcroot)test/unit/mallctl.c \ - $(srcroot)test/unit/math.c \ - $(srcroot)test/unit/mq.c \ - $(srcroot)test/unit/mtx.c \ - $(srcroot)test/unit/prof_accum.c \ - $(srcroot)test/unit/prof_gdump.c \ - $(srcroot)test/unit/prof_idump.c \ - $(srcroot)test/unit/ql.c \ - $(srcroot)test/unit/qr.c \ - $(srcroot)test/unit/quarantine.c \ - $(srcroot)test/unit/rb.c \ - $(srcroot)test/unit/rtree.c \ - $(srcroot)test/unit/SFMT.c \ - $(srcroot)test/unit/stats.c \ - $(srcroot)test/unit/tsd.c \ - $(srcroot)test/unit/util.c \ - $(srcroot)test/unit/zero.c -TESTS_UNIT_AUX := $(srcroot)test/unit/prof_accum_a.c \ - $(srcroot)test/unit/prof_accum_b.c -TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ - $(srcroot)test/integration/allocated.c \ - $(srcroot)test/integration/mallocx.c \ - $(srcroot)test/integration/mremap.c \ - $(srcroot)test/integration/posix_memalign.c \ - $(srcroot)test/integration/rallocx.c \ - $(srcroot)test/integration/thread_arena.c \ - $(srcroot)test/integration/thread_tcache_enabled.c \ - $(srcroot)test/integration/xallocx.c -ifeq ($(enable_experimental), 1) -TESTS_INTEGRATION += $(srcroot)test/integration/allocm.c \ - $(srcroot)test/integration/MALLOCX_ARENA.c \ - $(srcroot)test/integration/rallocm.c -endif -TESTS_STRESS := -TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS) - -C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O)) -C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) -C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O)) -C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O)) -C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) -C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) -C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O)) -C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS) - -TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O)) -TESTS_UNIT_AUX_OBJS := $(TESTS_UNIT_AUX:$(srcroot)%.c=$(objroot)%.$(O)) -TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O)) -TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O)) -TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_UNIT_AUX_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS) - -.PHONY: all dist build_doc_html build_doc_man build_doc -.PHONY: install_bin install_include install_lib -.PHONY: install_doc_html install_doc_man install_doc install -.PHONY: tests check clean distclean relclean - -.SECONDARY : $(TESTS_OBJS) - -# Default target. -all: build_lib - -dist: build_doc - -$(srcroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl - $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $< - -$(srcroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl - $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $< - -build_doc_html: $(DOCS_HTML) -build_doc_man: $(DOCS_MAN3) -build_doc: $(DOCS) - -# -# Include generated dependency files. -# -ifdef CC_MM --include $(C_OBJS:%.$(O)=%.d) --include $(C_PIC_OBJS:%.$(O)=%.d) --include $(C_JET_OBJS:%.$(O)=%.d) --include $(C_TESTLIB_OBJS:%.$(O)=%.d) --include $(TESTS_OBJS:%.$(O)=%.d) -endif - -$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c -$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c -$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS) -$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c -$(C_JET_OBJS): CFLAGS += -DJEMALLOC_JET -$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c -$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST -$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c -$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST -$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c -$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c -$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB -$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include -$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST -$(TESTS_UNIT_AUX_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST -define make-unit-link-dep -$(1): TESTS_UNIT_LINK_OBJS += $(2) -$(1): $(2) -endef -$(foreach test, $(TESTS_UNIT:$(srcroot)test/unit/%.c=$(objroot)test/unit/%$(EXE)), $(eval $(call make-unit-link-dep,$(test),$(filter $(test:%=%_a.$(O)) $(test:%=%_b.$(O)),$(TESTS_UNIT_AUX_OBJS))))) -$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST -$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c -$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include -ifneq ($(IMPORTLIB),$(SO)) -$(C_OBJS): CPPFLAGS += -DDLLEXPORT -endif - -ifndef CC_MM -# Dependencies. -HEADER_DIRS = $(srcroot)include/jemalloc/internal \ - $(objroot)include/jemalloc $(objroot)include/jemalloc/internal -HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)) -$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): $(HEADERS) -$(TESTS_OBJS): $(objroot)test/unit/jemalloc_test.h -endif - -$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O): - @mkdir -p $(@D) - $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $< -ifdef CC_MM - @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< -endif - -ifneq ($(SOREV),$(SO)) -%.$(SO) : %.$(SOREV) - @mkdir -p $(@D) - ln -sf $( $(srcroot)config.stamp.in - -$(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure - ./$(objroot)config.status - @touch $@ - -# There must be some action in order for make to re-read Makefile when it is -# out of date. -$(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp - @true -endif diff --git a/lib/jemalloc-3.6.0/README b/lib/jemalloc-3.6.0/README deleted file mode 100644 index 9b268f4..0000000 --- a/lib/jemalloc-3.6.0/README +++ /dev/null @@ -1,20 +0,0 @@ -jemalloc is a general purpose malloc(3) implementation that emphasizes -fragmentation avoidance and scalable concurrency support. jemalloc first came -into use as the FreeBSD libc allocator in 2005, and since then it has found its -way into numerous applications that rely on its predictable behavior. In 2010 -jemalloc development efforts broadened to include developer support features -such as heap profiling, Valgrind integration, and extensive monitoring/tuning -hooks. Modern jemalloc releases continue to be integrated back into FreeBSD, -and therefore versatility remains critical. Ongoing development efforts trend -toward making jemalloc among the best allocators for a broad range of demanding -applications, and eliminating/mitigating weaknesses that have practical -repercussions for real world applications. - -The COPYING file contains copyright and licensing information. - -The INSTALL file contains information on how to configure, build, and install -jemalloc. - -The ChangeLog file contains a brief summary of changes for each release. - -URL: http://www.canonware.com/jemalloc/ diff --git a/lib/jemalloc-3.6.0/autogen.sh b/lib/jemalloc-3.6.0/autogen.sh deleted file mode 100755 index 75f32da..0000000 --- a/lib/jemalloc-3.6.0/autogen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -for i in autoconf; do - echo "$i" - $i - if [ $? -ne 0 ]; then - echo "Error $? in $i" - exit 1 - fi -done - -echo "./configure --enable-autogen $@" -./configure --enable-autogen $@ -if [ $? -ne 0 ]; then - echo "Error $? in ./configure" - exit 1 -fi diff --git a/lib/jemalloc-3.6.0/bin/jemalloc.sh.in b/lib/jemalloc-3.6.0/bin/jemalloc.sh.in deleted file mode 100644 index cdf3673..0000000 --- a/lib/jemalloc-3.6.0/bin/jemalloc.sh.in +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -prefix=@prefix@ -exec_prefix=@exec_prefix@ -libdir=@libdir@ - -@LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@ -export @LD_PRELOAD_VAR@ -exec "$@" diff --git a/lib/jemalloc-3.6.0/bin/pprof b/lib/jemalloc-3.6.0/bin/pprof deleted file mode 100755 index a309943..0000000 --- a/lib/jemalloc-3.6.0/bin/pprof +++ /dev/null @@ -1,5372 +0,0 @@ -#! /usr/bin/env perl - -# Copyright (c) 1998-2007, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# --- -# Program for printing the profile generated by common/profiler.cc, -# or by the heap profiler (common/debugallocation.cc) -# -# The profile contains a sequence of entries of the form: -# -# This program parses the profile, and generates user-readable -# output. -# -# Examples: -# -# % tools/pprof "program" "profile" -# Enters "interactive" mode -# -# % tools/pprof --text "program" "profile" -# Generates one line per procedure -# -# % tools/pprof --gv "program" "profile" -# Generates annotated call-graph and displays via "gv" -# -# % tools/pprof --gv --focus=Mutex "program" "profile" -# Restrict to code paths that involve an entry that matches "Mutex" -# -# % tools/pprof --gv --focus=Mutex --ignore=string "program" "profile" -# Restrict to code paths that involve an entry that matches "Mutex" -# and does not match "string" -# -# % tools/pprof --list=IBF_CheckDocid "program" "profile" -# Generates disassembly listing of all routines with at least one -# sample that match the --list= pattern. The listing is -# annotated with the flat and cumulative sample counts at each line. -# -# % tools/pprof --disasm=IBF_CheckDocid "program" "profile" -# Generates disassembly listing of all routines with at least one -# sample that match the --disasm= pattern. The listing is -# annotated with the flat and cumulative sample counts at each PC value. -# -# TODO: Use color to indicate files? - -use strict; -use warnings; -use Getopt::Long; - -my $PPROF_VERSION = "2.0"; - -# These are the object tools we use which can come from a -# user-specified location using --tools, from the PPROF_TOOLS -# environment variable, or from the environment. -my %obj_tool_map = ( - "objdump" => "objdump", - "nm" => "nm", - "addr2line" => "addr2line", - "c++filt" => "c++filt", - ## ConfigureObjTools may add architecture-specific entries: - #"nm_pdb" => "nm-pdb", # for reading windows (PDB-format) executables - #"addr2line_pdb" => "addr2line-pdb", # ditto - #"otool" => "otool", # equivalent of objdump on OS X -); -# NOTE: these are lists, so you can put in commandline flags if you want. -my @DOT = ("dot"); # leave non-absolute, since it may be in /usr/local -my @GV = ("gv"); -my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread -my @KCACHEGRIND = ("kcachegrind"); -my @PS2PDF = ("ps2pdf"); -# These are used for dynamic profiles -my @URL_FETCHER = ("curl", "-s"); - -# These are the web pages that servers need to support for dynamic profiles -my $HEAP_PAGE = "/pprof/heap"; -my $PROFILE_PAGE = "/pprof/profile"; # must support cgi-param "?seconds=#" -my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param - # ?seconds=#&event=x&period=n -my $GROWTH_PAGE = "/pprof/growth"; -my $CONTENTION_PAGE = "/pprof/contention"; -my $WALL_PAGE = "/pprof/wall(?:\\?.*)?"; # accepts options like namefilter -my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?"; -my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param - # "?seconds=#", - # "?tags_regexp=#" and - # "?type=#". -my $SYMBOL_PAGE = "/pprof/symbol"; # must support symbol lookup via POST -my $PROGRAM_NAME_PAGE = "/pprof/cmdline"; - -# These are the web pages that can be named on the command line. -# All the alternatives must begin with /. -my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" . - "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" . - "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)"; - -# default binary name -my $UNKNOWN_BINARY = "(unknown)"; - -# There is a pervasive dependency on the length (in hex characters, -# i.e., nibbles) of an address, distinguishing between 32-bit and -# 64-bit profiles. To err on the safe size, default to 64-bit here: -my $address_length = 16; - -my $dev_null = "/dev/null"; -if (! -e $dev_null && $^O =~ /MSWin/) { # $^O is the OS perl was built for - $dev_null = "nul"; -} - -# A list of paths to search for shared object files -my @prefix_list = (); - -# Special routine name that should not have any symbols. -# Used as separator to parse "addr2line -i" output. -my $sep_symbol = '_fini'; -my $sep_address = undef; - -##### Argument parsing ##### - -sub usage_string { - return < - is a space separated list of profile names. -pprof [options] - is a list of profile files where each file contains - the necessary symbol mappings as well as profile data (likely generated - with --raw). -pprof [options] - is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE - - Each name can be: - /path/to/profile - a path to a profile file - host:port[/] - a location of a service to get profile from - - The / can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile, - $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall, - $CENSUSPROFILE_PAGE, or /pprof/filteredprofile. - For instance: - pprof http://myserver.com:80$HEAP_PAGE - If / is omitted, the service defaults to $PROFILE_PAGE (cpu profiling). -pprof --symbols - Maps addresses to symbol names. In this mode, stdin should be a - list of library mappings, in the same format as is found in the heap- - and cpu-profile files (this loosely matches that of /proc/self/maps - on linux), followed by a list of hex addresses to map, one per line. - - For more help with querying remote servers, including how to add the - necessary server-side support code, see this filename (or one like it): - - /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html - -Options: - --cum Sort by cumulative data - --base= Subtract from before display - --interactive Run in interactive mode (interactive "help" gives help) [default] - --seconds= Length of time for dynamic profiles [default=30 secs] - --add_lib= Read additional symbols and line info from the given library - --lib_prefix= Comma separated list of library path prefixes - -Reporting Granularity: - --addresses Report at address level - --lines Report at source line level - --functions Report at function level [default] - --files Report at source file level - -Output type: - --text Generate text report - --callgrind Generate callgrind format to stdout - --gv Generate Postscript and display - --evince Generate PDF and display - --web Generate SVG and display - --list= Generate source listing of matching routines - --disasm= Generate disassembly of matching routines - --symbols Print demangled symbol names found at given addresses - --dot Generate DOT file to stdout - --ps Generate Postcript to stdout - --pdf Generate PDF to stdout - --svg Generate SVG to stdout - --gif Generate GIF to stdout - --raw Generate symbolized pprof data (useful with remote fetch) - -Heap-Profile Options: - --inuse_space Display in-use (mega)bytes [default] - --inuse_objects Display in-use objects - --alloc_space Display allocated (mega)bytes - --alloc_objects Display allocated objects - --show_bytes Display space in bytes - --drop_negative Ignore negative differences - -Contention-profile options: - --total_delay Display total delay at each region [default] - --contentions Display number of delays at each region - --mean_delay Display mean delay at each region - -Call-graph Options: - --nodecount= Show at most so many nodes [default=80] - --nodefraction= Hide nodes below *total [default=.005] - --edgefraction= Hide edges below *total [default=.001] - --maxdegree= Max incoming/outgoing edges per node [default=8] - --focus= Focus on nodes matching - --ignore= Ignore nodes matching - --scale= Set GV scaling [default=0] - --heapcheck Make nodes with non-0 object counts - (i.e. direct leak generators) more visible - -Miscellaneous: - --tools=[,...] \$PATH for object tool pathnames - --test Run unit tests - --help This message - --version Version information - -Environment Variables: - PPROF_TMPDIR Profiles directory. Defaults to \$HOME/pprof - PPROF_TOOLS Prefix for object tools pathnames - -Examples: - -pprof /bin/ls ls.prof - Enters "interactive" mode -pprof --text /bin/ls ls.prof - Outputs one line per procedure -pprof --web /bin/ls ls.prof - Displays annotated call-graph in web browser -pprof --gv /bin/ls ls.prof - Displays annotated call-graph via 'gv' -pprof --gv --focus=Mutex /bin/ls ls.prof - Restricts to code paths including a .*Mutex.* entry -pprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof - Code paths including Mutex but not string -pprof --list=getdir /bin/ls ls.prof - (Per-line) annotated source listing for getdir() -pprof --disasm=getdir /bin/ls ls.prof - (Per-PC) annotated disassembly for getdir() - -pprof http://localhost:1234/ - Enters "interactive" mode -pprof --text localhost:1234 - Outputs one line per procedure for localhost:1234 -pprof --raw localhost:1234 > ./local.raw -pprof --text ./local.raw - Fetches a remote profile for later analysis and then - analyzes it in text mode. -EOF -} - -sub version_string { - return < \$main::opt_help, - "version!" => \$main::opt_version, - "cum!" => \$main::opt_cum, - "base=s" => \$main::opt_base, - "seconds=i" => \$main::opt_seconds, - "add_lib=s" => \$main::opt_lib, - "lib_prefix=s" => \$main::opt_lib_prefix, - "functions!" => \$main::opt_functions, - "lines!" => \$main::opt_lines, - "addresses!" => \$main::opt_addresses, - "files!" => \$main::opt_files, - "text!" => \$main::opt_text, - "callgrind!" => \$main::opt_callgrind, - "list=s" => \$main::opt_list, - "disasm=s" => \$main::opt_disasm, - "symbols!" => \$main::opt_symbols, - "gv!" => \$main::opt_gv, - "evince!" => \$main::opt_evince, - "web!" => \$main::opt_web, - "dot!" => \$main::opt_dot, - "ps!" => \$main::opt_ps, - "pdf!" => \$main::opt_pdf, - "svg!" => \$main::opt_svg, - "gif!" => \$main::opt_gif, - "raw!" => \$main::opt_raw, - "interactive!" => \$main::opt_interactive, - "nodecount=i" => \$main::opt_nodecount, - "nodefraction=f" => \$main::opt_nodefraction, - "edgefraction=f" => \$main::opt_edgefraction, - "maxdegree=i" => \$main::opt_maxdegree, - "focus=s" => \$main::opt_focus, - "ignore=s" => \$main::opt_ignore, - "scale=i" => \$main::opt_scale, - "heapcheck" => \$main::opt_heapcheck, - "inuse_space!" => \$main::opt_inuse_space, - "inuse_objects!" => \$main::opt_inuse_objects, - "alloc_space!" => \$main::opt_alloc_space, - "alloc_objects!" => \$main::opt_alloc_objects, - "show_bytes!" => \$main::opt_show_bytes, - "drop_negative!" => \$main::opt_drop_negative, - "total_delay!" => \$main::opt_total_delay, - "contentions!" => \$main::opt_contentions, - "mean_delay!" => \$main::opt_mean_delay, - "tools=s" => \$main::opt_tools, - "test!" => \$main::opt_test, - "debug!" => \$main::opt_debug, - # Undocumented flags used only by unittests: - "test_stride=i" => \$main::opt_test_stride, - ) || usage("Invalid option(s)"); - - # Deal with the standard --help and --version - if ($main::opt_help) { - print usage_string(); - exit(0); - } - - if ($main::opt_version) { - print version_string(); - exit(0); - } - - # Disassembly/listing/symbols mode requires address-level info - if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) { - $main::opt_functions = 0; - $main::opt_lines = 0; - $main::opt_addresses = 1; - $main::opt_files = 0; - } - - # Check heap-profiling flags - if ($main::opt_inuse_space + - $main::opt_inuse_objects + - $main::opt_alloc_space + - $main::opt_alloc_objects > 1) { - usage("Specify at most on of --inuse/--alloc options"); - } - - # Check output granularities - my $grains = - $main::opt_functions + - $main::opt_lines + - $main::opt_addresses + - $main::opt_files + - 0; - if ($grains > 1) { - usage("Only specify one output granularity option"); - } - if ($grains == 0) { - $main::opt_functions = 1; - } - - # Check output modes - my $modes = - $main::opt_text + - $main::opt_callgrind + - ($main::opt_list eq '' ? 0 : 1) + - ($main::opt_disasm eq '' ? 0 : 1) + - ($main::opt_symbols == 0 ? 0 : 1) + - $main::opt_gv + - $main::opt_evince + - $main::opt_web + - $main::opt_dot + - $main::opt_ps + - $main::opt_pdf + - $main::opt_svg + - $main::opt_gif + - $main::opt_raw + - $main::opt_interactive + - 0; - if ($modes > 1) { - usage("Only specify one output mode"); - } - if ($modes == 0) { - if (-t STDOUT) { # If STDOUT is a tty, activate interactive mode - $main::opt_interactive = 1; - } else { - $main::opt_text = 1; - } - } - - if ($main::opt_test) { - RunUnitTests(); - # Should not return - exit(1); - } - - # Binary name and profile arguments list - $main::prog = ""; - @main::pfile_args = (); - - # Remote profiling without a binary (using $SYMBOL_PAGE instead) - if (@ARGV > 0) { - if (IsProfileURL($ARGV[0])) { - $main::use_symbol_page = 1; - } elsif (IsSymbolizedProfileFile($ARGV[0])) { - $main::use_symbolized_profile = 1; - $main::prog = $UNKNOWN_BINARY; # will be set later from the profile file - } - } - - if ($main::use_symbol_page || $main::use_symbolized_profile) { - # We don't need a binary! - my %disabled = ('--lines' => $main::opt_lines, - '--disasm' => $main::opt_disasm); - for my $option (keys %disabled) { - usage("$option cannot be used without a binary") if $disabled{$option}; - } - # Set $main::prog later... - scalar(@ARGV) || usage("Did not specify profile file"); - } elsif ($main::opt_symbols) { - # --symbols needs a binary-name (to run nm on, etc) but not profiles - $main::prog = shift(@ARGV) || usage("Did not specify program"); - } else { - $main::prog = shift(@ARGV) || usage("Did not specify program"); - scalar(@ARGV) || usage("Did not specify profile file"); - } - - # Parse profile file/location arguments - foreach my $farg (@ARGV) { - if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) { - my $machine = $1; - my $num_machines = $2; - my $path = $3; - for (my $i = 0; $i < $num_machines; $i++) { - unshift(@main::pfile_args, "$i.$machine$path"); - } - } else { - unshift(@main::pfile_args, $farg); - } - } - - if ($main::use_symbol_page) { - unless (IsProfileURL($main::pfile_args[0])) { - error("The first profile should be a remote form to use $SYMBOL_PAGE\n"); - } - CheckSymbolPage(); - $main::prog = FetchProgramName(); - } elsif (!$main::use_symbolized_profile) { # may not need objtools! - ConfigureObjTools($main::prog) - } - - # Break the opt_lib_prefix into the prefix_list array - @prefix_list = split (',', $main::opt_lib_prefix); - - # Remove trailing / from the prefixes, in the list to prevent - # searching things like /my/path//lib/mylib.so - foreach (@prefix_list) { - s|/+$||; - } -} - -sub Main() { - Init(); - $main::collected_profile = undef; - @main::profile_files = (); - $main::op_time = time(); - - # Printing symbols is special and requires a lot less info that most. - if ($main::opt_symbols) { - PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin - return; - } - - # Fetch all profile data - FetchDynamicProfiles(); - - # this will hold symbols that we read from the profile files - my $symbol_map = {}; - - # Read one profile, pick the last item on the list - my $data = ReadProfile($main::prog, pop(@main::profile_files)); - my $profile = $data->{profile}; - my $pcs = $data->{pcs}; - my $libs = $data->{libs}; # Info about main program and shared libraries - $symbol_map = MergeSymbols($symbol_map, $data->{symbols}); - - # Add additional profiles, if available. - if (scalar(@main::profile_files) > 0) { - foreach my $pname (@main::profile_files) { - my $data2 = ReadProfile($main::prog, $pname); - $profile = AddProfile($profile, $data2->{profile}); - $pcs = AddPcs($pcs, $data2->{pcs}); - $symbol_map = MergeSymbols($symbol_map, $data2->{symbols}); - } - } - - # Subtract base from profile, if specified - if ($main::opt_base ne '') { - my $base = ReadProfile($main::prog, $main::opt_base); - $profile = SubtractProfile($profile, $base->{profile}); - $pcs = AddPcs($pcs, $base->{pcs}); - $symbol_map = MergeSymbols($symbol_map, $base->{symbols}); - } - - # Get total data in profile - my $total = TotalProfile($profile); - - # Collect symbols - my $symbols; - if ($main::use_symbolized_profile) { - $symbols = FetchSymbols($pcs, $symbol_map); - } elsif ($main::use_symbol_page) { - $symbols = FetchSymbols($pcs); - } else { - # TODO(csilvers): $libs uses the /proc/self/maps data from profile1, - # which may differ from the data from subsequent profiles, especially - # if they were run on different machines. Use appropriate libs for - # each pc somehow. - $symbols = ExtractSymbols($libs, $pcs); - } - - # Remove uniniteresting stack items - $profile = RemoveUninterestingFrames($symbols, $profile); - - # Focus? - if ($main::opt_focus ne '') { - $profile = FocusProfile($symbols, $profile, $main::opt_focus); - } - - # Ignore? - if ($main::opt_ignore ne '') { - $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore); - } - - my $calls = ExtractCalls($symbols, $profile); - - # Reduce profiles to required output granularity, and also clean - # each stack trace so a given entry exists at most once. - my $reduced = ReduceProfile($symbols, $profile); - - # Get derived profiles - my $flat = FlatProfile($reduced); - my $cumulative = CumulativeProfile($reduced); - - # Print - if (!$main::opt_interactive) { - if ($main::opt_disasm) { - PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm); - } elsif ($main::opt_list) { - PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0); - } elsif ($main::opt_text) { - # Make sure the output is empty when have nothing to report - # (only matters when --heapcheck is given but we must be - # compatible with old branches that did not pass --heapcheck always): - if ($total != 0) { - printf("Total: %s %s\n", Unparse($total), Units()); - } - PrintText($symbols, $flat, $cumulative, -1); - } elsif ($main::opt_raw) { - PrintSymbolizedProfile($symbols, $profile, $main::prog); - } elsif ($main::opt_callgrind) { - PrintCallgrind($calls); - } else { - if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { - if ($main::opt_gv) { - RunGV(TempName($main::next_tmpfile, "ps"), ""); - } elsif ($main::opt_evince) { - RunEvince(TempName($main::next_tmpfile, "pdf"), ""); - } elsif ($main::opt_web) { - my $tmp = TempName($main::next_tmpfile, "svg"); - RunWeb($tmp); - # The command we run might hand the file name off - # to an already running browser instance and then exit. - # Normally, we'd remove $tmp on exit (right now), - # but fork a child to remove $tmp a little later, so that the - # browser has time to load it first. - delete $main::tempnames{$tmp}; - if (fork() == 0) { - sleep 5; - unlink($tmp); - exit(0); - } - } - } else { - cleanup(); - exit(1); - } - } - } else { - InteractiveMode($profile, $symbols, $libs, $total); - } - - cleanup(); - exit(0); -} - -##### Entry Point ##### - -Main(); - -# Temporary code to detect if we're running on a Goobuntu system. -# These systems don't have the right stuff installed for the special -# Readline libraries to work, so as a temporary workaround, we default -# to using the normal stdio code, rather than the fancier readline-based -# code -sub ReadlineMightFail { - if (-e '/lib/libtermcap.so.2') { - return 0; # libtermcap exists, so readline should be okay - } else { - return 1; - } -} - -sub RunGV { - my $fname = shift; - my $bg = shift; # "" or " &" if we should run in background - if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) { - # Options using double dash are supported by this gv version. - # Also, turn on noantialias to better handle bug in gv for - # postscript files with large dimensions. - # TODO: Maybe we should not pass the --noantialias flag - # if the gv version is known to work properly without the flag. - system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname) - . $bg); - } else { - # Old gv version - only supports options that use single dash. - print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n"; - system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg); - } -} - -sub RunEvince { - my $fname = shift; - my $bg = shift; # "" or " &" if we should run in background - system(ShellEscape(@EVINCE, $fname) . $bg); -} - -sub RunWeb { - my $fname = shift; - print STDERR "Loading web page file:///$fname\n"; - - if (`uname` =~ /Darwin/) { - # OS X: open will use standard preference for SVG files. - system("/usr/bin/open", $fname); - return; - } - - # Some kind of Unix; try generic symlinks, then specific browsers. - # (Stop once we find one.) - # Works best if the browser is already running. - my @alt = ( - "/etc/alternatives/gnome-www-browser", - "/etc/alternatives/x-www-browser", - "google-chrome", - "firefox", - ); - foreach my $b (@alt) { - if (system($b, $fname) == 0) { - return; - } - } - - print STDERR "Could not load web browser.\n"; -} - -sub RunKcachegrind { - my $fname = shift; - my $bg = shift; # "" or " &" if we should run in background - print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n"; - system(ShellEscape(@KCACHEGRIND, $fname) . $bg); -} - - -##### Interactive helper routines ##### - -sub InteractiveMode { - $| = 1; # Make output unbuffered for interactive mode - my ($orig_profile, $symbols, $libs, $total) = @_; - - print STDERR "Welcome to pprof! For help, type 'help'.\n"; - - # Use ReadLine if it's installed and input comes from a console. - if ( -t STDIN && - !ReadlineMightFail() && - defined(eval {require Term::ReadLine}) ) { - my $term = new Term::ReadLine 'pprof'; - while ( defined ($_ = $term->readline('(pprof) '))) { - $term->addhistory($_) if /\S/; - if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { - last; # exit when we get an interactive command to quit - } - } - } else { # don't have readline - while (1) { - print STDERR "(pprof) "; - $_ = ; - last if ! defined $_ ; - s/\r//g; # turn windows-looking lines into unix-looking lines - - # Save some flags that might be reset by InteractiveCommand() - my $save_opt_lines = $main::opt_lines; - - if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { - last; # exit when we get an interactive command to quit - } - - # Restore flags - $main::opt_lines = $save_opt_lines; - } - } -} - -# Takes two args: orig profile, and command to run. -# Returns 1 if we should keep going, or 0 if we were asked to quit -sub InteractiveCommand { - my($orig_profile, $symbols, $libs, $total, $command) = @_; - $_ = $command; # just to make future m//'s easier - if (!defined($_)) { - print STDERR "\n"; - return 0; - } - if (m/^\s*quit/) { - return 0; - } - if (m/^\s*help/) { - InteractiveHelpMessage(); - return 1; - } - # Clear all the mode options -- mode is controlled by "$command" - $main::opt_text = 0; - $main::opt_callgrind = 0; - $main::opt_disasm = 0; - $main::opt_list = 0; - $main::opt_gv = 0; - $main::opt_evince = 0; - $main::opt_cum = 0; - - if (m/^\s*(text|top)(\d*)\s*(.*)/) { - $main::opt_text = 1; - - my $line_limit = ($2 ne "") ? int($2) : 10; - - my $routine; - my $ignore; - ($routine, $ignore) = ParseInteractiveArgs($3); - - my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); - my $reduced = ReduceProfile($symbols, $profile); - - # Get derived profiles - my $flat = FlatProfile($reduced); - my $cumulative = CumulativeProfile($reduced); - - PrintText($symbols, $flat, $cumulative, $line_limit); - return 1; - } - if (m/^\s*callgrind\s*([^ \n]*)/) { - $main::opt_callgrind = 1; - - # Get derived profiles - my $calls = ExtractCalls($symbols, $orig_profile); - my $filename = $1; - if ( $1 eq '' ) { - $filename = TempName($main::next_tmpfile, "callgrind"); - } - PrintCallgrind($calls, $filename); - if ( $1 eq '' ) { - RunKcachegrind($filename, " & "); - $main::next_tmpfile++; - } - - return 1; - } - if (m/^\s*(web)?list\s*(.+)/) { - my $html = (defined($1) && ($1 eq "web")); - $main::opt_list = 1; - - my $routine; - my $ignore; - ($routine, $ignore) = ParseInteractiveArgs($2); - - my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); - my $reduced = ReduceProfile($symbols, $profile); - - # Get derived profiles - my $flat = FlatProfile($reduced); - my $cumulative = CumulativeProfile($reduced); - - PrintListing($total, $libs, $flat, $cumulative, $routine, $html); - return 1; - } - if (m/^\s*disasm\s*(.+)/) { - $main::opt_disasm = 1; - - my $routine; - my $ignore; - ($routine, $ignore) = ParseInteractiveArgs($1); - - # Process current profile to account for various settings - my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); - my $reduced = ReduceProfile($symbols, $profile); - - # Get derived profiles - my $flat = FlatProfile($reduced); - my $cumulative = CumulativeProfile($reduced); - - PrintDisassembly($libs, $flat, $cumulative, $routine); - return 1; - } - if (m/^\s*(gv|web|evince)\s*(.*)/) { - $main::opt_gv = 0; - $main::opt_evince = 0; - $main::opt_web = 0; - if ($1 eq "gv") { - $main::opt_gv = 1; - } elsif ($1 eq "evince") { - $main::opt_evince = 1; - } elsif ($1 eq "web") { - $main::opt_web = 1; - } - - my $focus; - my $ignore; - ($focus, $ignore) = ParseInteractiveArgs($2); - - # Process current profile to account for various settings - my $profile = ProcessProfile($total, $orig_profile, $symbols, - $focus, $ignore); - my $reduced = ReduceProfile($symbols, $profile); - - # Get derived profiles - my $flat = FlatProfile($reduced); - my $cumulative = CumulativeProfile($reduced); - - if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { - if ($main::opt_gv) { - RunGV(TempName($main::next_tmpfile, "ps"), " &"); - } elsif ($main::opt_evince) { - RunEvince(TempName($main::next_tmpfile, "pdf"), " &"); - } elsif ($main::opt_web) { - RunWeb(TempName($main::next_tmpfile, "svg")); - } - $main::next_tmpfile++; - } - return 1; - } - if (m/^\s*$/) { - return 1; - } - print STDERR "Unknown command: try 'help'.\n"; - return 1; -} - - -sub ProcessProfile { - my $total_count = shift; - my $orig_profile = shift; - my $symbols = shift; - my $focus = shift; - my $ignore = shift; - - # Process current profile to account for various settings - my $profile = $orig_profile; - printf("Total: %s %s\n", Unparse($total_count), Units()); - if ($focus ne '') { - $profile = FocusProfile($symbols, $profile, $focus); - my $focus_count = TotalProfile($profile); - printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n", - $focus, - Unparse($focus_count), Units(), - Unparse($total_count), ($focus_count*100.0) / $total_count); - } - if ($ignore ne '') { - $profile = IgnoreProfile($symbols, $profile, $ignore); - my $ignore_count = TotalProfile($profile); - printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n", - $ignore, - Unparse($ignore_count), Units(), - Unparse($total_count), - ($ignore_count*100.0) / $total_count); - } - - return $profile; -} - -sub InteractiveHelpMessage { - print STDERR <{$k}; - my @addrs = split(/\n/, $k); - if ($#addrs >= 0) { - my $depth = $#addrs + 1; - # int(foo / 2**32) is the only reliable way to get rid of bottom - # 32 bits on both 32- and 64-bit systems. - print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32)); - print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32)); - - foreach my $full_addr (@addrs) { - my $addr = $full_addr; - $addr =~ s/0x0*//; # strip off leading 0x, zeroes - if (length($addr) > 16) { - print STDERR "Invalid address in profile: $full_addr\n"; - next; - } - my $low_addr = substr($addr, -8); # get last 8 hex chars - my $high_addr = substr($addr, -16, 8); # get up to 8 more hex chars - print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr)); - } - } - } -} - -# Print symbols and profile data -sub PrintSymbolizedProfile { - my $symbols = shift; - my $profile = shift; - my $prog = shift; - - $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $symbol_marker = $&; - - print '--- ', $symbol_marker, "\n"; - if (defined($prog)) { - print 'binary=', $prog, "\n"; - } - while (my ($pc, $name) = each(%{$symbols})) { - my $sep = ' '; - print '0x', $pc; - # We have a list of function names, which include the inlined - # calls. They are separated (and terminated) by --, which is - # illegal in function names. - for (my $j = 2; $j <= $#{$name}; $j += 3) { - print $sep, $name->[$j]; - $sep = '--'; - } - print "\n"; - } - print '---', "\n"; - - $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $profile_marker = $&; - print '--- ', $profile_marker, "\n"; - if (defined($main::collected_profile)) { - # if used with remote fetch, simply dump the collected profile to output. - open(SRC, "<$main::collected_profile"); - while () { - print $_; - } - close(SRC); - } else { - # dump a cpu-format profile to standard out - PrintProfileData($profile); - } -} - -# Print text output -sub PrintText { - my $symbols = shift; - my $flat = shift; - my $cumulative = shift; - my $line_limit = shift; - - my $total = TotalProfile($flat); - - # Which profile to sort by? - my $s = $main::opt_cum ? $cumulative : $flat; - - my $running_sum = 0; - my $lines = 0; - foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b } - keys(%{$cumulative})) { - my $f = GetEntry($flat, $k); - my $c = GetEntry($cumulative, $k); - $running_sum += $f; - - my $sym = $k; - if (exists($symbols->{$k})) { - $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1]; - if ($main::opt_addresses) { - $sym = $k . " " . $sym; - } - } - - if ($f != 0 || $c != 0) { - printf("%8s %6s %6s %8s %6s %s\n", - Unparse($f), - Percent($f, $total), - Percent($running_sum, $total), - Unparse($c), - Percent($c, $total), - $sym); - } - $lines++; - last if ($line_limit >= 0 && $lines >= $line_limit); - } -} - -# Callgrind format has a compression for repeated function and file -# names. You show the name the first time, and just use its number -# subsequently. This can cut down the file to about a third or a -# quarter of its uncompressed size. $key and $val are the key/value -# pair that would normally be printed by callgrind; $map is a map from -# value to number. -sub CompressedCGName { - my($key, $val, $map) = @_; - my $idx = $map->{$val}; - # For very short keys, providing an index hurts rather than helps. - if (length($val) <= 3) { - return "$key=$val\n"; - } elsif (defined($idx)) { - return "$key=($idx)\n"; - } else { - # scalar(keys $map) gives the number of items in the map. - $idx = scalar(keys(%{$map})) + 1; - $map->{$val} = $idx; - return "$key=($idx) $val\n"; - } -} - -# Print the call graph in a way that's suiteable for callgrind. -sub PrintCallgrind { - my $calls = shift; - my $filename; - my %filename_to_index_map; - my %fnname_to_index_map; - - if ($main::opt_interactive) { - $filename = shift; - print STDERR "Writing callgrind file to '$filename'.\n" - } else { - $filename = "&STDOUT"; - } - open(CG, ">$filename"); - printf CG ("events: Hits\n\n"); - foreach my $call ( map { $_->[0] } - sort { $a->[1] cmp $b ->[1] || - $a->[2] <=> $b->[2] } - map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; - [$_, $1, $2] } - keys %$calls ) { - my $count = int($calls->{$call}); - $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; - my ( $caller_file, $caller_line, $caller_function, - $callee_file, $callee_line, $callee_function ) = - ( $1, $2, $3, $5, $6, $7 ); - - # TODO(csilvers): for better compression, collect all the - # caller/callee_files and functions first, before printing - # anything, and only compress those referenced more than once. - printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map); - printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map); - if (defined $6) { - printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map); - printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map); - printf CG ("calls=$count $callee_line\n"); - } - printf CG ("$caller_line $count\n\n"); - } -} - -# Print disassembly for all all routines that match $main::opt_disasm -sub PrintDisassembly { - my $libs = shift; - my $flat = shift; - my $cumulative = shift; - my $disasm_opts = shift; - - my $total = TotalProfile($flat); - - foreach my $lib (@{$libs}) { - my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts); - my $offset = AddressSub($lib->[1], $lib->[3]); - foreach my $routine (sort ByName keys(%{$symbol_table})) { - my $start_addr = $symbol_table->{$routine}->[0]; - my $end_addr = $symbol_table->{$routine}->[1]; - # See if there are any samples in this routine - my $length = hex(AddressSub($end_addr, $start_addr)); - my $addr = AddressAdd($start_addr, $offset); - for (my $i = 0; $i < $length; $i++) { - if (defined($cumulative->{$addr})) { - PrintDisassembledFunction($lib->[0], $offset, - $routine, $flat, $cumulative, - $start_addr, $end_addr, $total); - last; - } - $addr = AddressInc($addr); - } - } - } -} - -# Return reference to array of tuples of the form: -# [start_address, filename, linenumber, instruction, limit_address] -# E.g., -# ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"] -sub Disassemble { - my $prog = shift; - my $offset = shift; - my $start_addr = shift; - my $end_addr = shift; - - my $objdump = $obj_tool_map{"objdump"}; - my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn", - "--start-address=0x$start_addr", - "--stop-address=0x$end_addr", $prog); - open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); - my @result = (); - my $filename = ""; - my $linenumber = -1; - my $last = ["", "", "", ""]; - while () { - s/\r//g; # turn windows-looking lines into unix-looking lines - chop; - if (m|\s*([^:\s]+):(\d+)\s*$|) { - # Location line of the form: - # : - $filename = $1; - $linenumber = $2; - } elsif (m/^ +([0-9a-f]+):\s*(.*)/) { - # Disassembly line -- zero-extend address to full length - my $addr = HexExtend($1); - my $k = AddressAdd($addr, $offset); - $last->[4] = $k; # Store ending address for previous instruction - $last = [$k, $filename, $linenumber, $2, $end_addr]; - push(@result, $last); - } - } - close(OBJDUMP); - return @result; -} - -# The input file should contain lines of the form /proc/maps-like -# output (same format as expected from the profiles) or that looks -# like hex addresses (like "0xDEADBEEF"). We will parse all -# /proc/maps output, and for all the hex addresses, we will output -# "short" symbol names, one per line, in the same order as the input. -sub PrintSymbols { - my $maps_and_symbols_file = shift; - - # ParseLibraries expects pcs to be in a set. Fine by us... - my @pclist = (); # pcs in sorted order - my $pcs = {}; - my $map = ""; - foreach my $line (<$maps_and_symbols_file>) { - $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines - if ($line =~ /\b(0x[0-9a-f]+)\b/i) { - push(@pclist, HexExtend($1)); - $pcs->{$pclist[-1]} = 1; - } else { - $map .= $line; - } - } - - my $libs = ParseLibraries($main::prog, $map, $pcs); - my $symbols = ExtractSymbols($libs, $pcs); - - foreach my $pc (@pclist) { - # ->[0] is the shortname, ->[2] is the full name - print(($symbols->{$pc}->[0] || "??") . "\n"); - } -} - - -# For sorting functions by name -sub ByName { - return ShortFunctionName($a) cmp ShortFunctionName($b); -} - -# Print source-listing for all all routines that match $list_opts -sub PrintListing { - my $total = shift; - my $libs = shift; - my $flat = shift; - my $cumulative = shift; - my $list_opts = shift; - my $html = shift; - - my $output = \*STDOUT; - my $fname = ""; - - if ($html) { - # Arrange to write the output to a temporary file - $fname = TempName($main::next_tmpfile, "html"); - $main::next_tmpfile++; - if (!open(TEMP, ">$fname")) { - print STDERR "$fname: $!\n"; - return; - } - $output = \*TEMP; - print $output HtmlListingHeader(); - printf $output ("
%s
Total: %s %s
\n", - $main::prog, Unparse($total), Units()); - } - - my $listed = 0; - foreach my $lib (@{$libs}) { - my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts); - my $offset = AddressSub($lib->[1], $lib->[3]); - foreach my $routine (sort ByName keys(%{$symbol_table})) { - # Print if there are any samples in this routine - my $start_addr = $symbol_table->{$routine}->[0]; - my $end_addr = $symbol_table->{$routine}->[1]; - my $length = hex(AddressSub($end_addr, $start_addr)); - my $addr = AddressAdd($start_addr, $offset); - for (my $i = 0; $i < $length; $i++) { - if (defined($cumulative->{$addr})) { - $listed += PrintSource( - $lib->[0], $offset, - $routine, $flat, $cumulative, - $start_addr, $end_addr, - $html, - $output); - last; - } - $addr = AddressInc($addr); - } - } - } - - if ($html) { - if ($listed > 0) { - print $output HtmlListingFooter(); - close($output); - RunWeb($fname); - } else { - close($output); - unlink($fname); - } - } -} - -sub HtmlListingHeader { - return <<'EOF'; - - - -Pprof listing - - - - -EOF -} - -sub HtmlListingFooter { - return <<'EOF'; - - -EOF -} - -sub HtmlEscape { - my $text = shift; - $text =~ s/&/&/g; - $text =~ s//>/g; - return $text; -} - -# Returns the indentation of the line, if it has any non-whitespace -# characters. Otherwise, returns -1. -sub Indentation { - my $line = shift; - if (m/^(\s*)\S/) { - return length($1); - } else { - return -1; - } -} - -# If the symbol table contains inlining info, Disassemble() may tag an -# instruction with a location inside an inlined function. But for -# source listings, we prefer to use the location in the function we -# are listing. So use MapToSymbols() to fetch full location -# information for each instruction and then pick out the first -# location from a location list (location list contains callers before -# callees in case of inlining). -# -# After this routine has run, each entry in $instructions contains: -# [0] start address -# [1] filename for function we are listing -# [2] line number for function we are listing -# [3] disassembly -# [4] limit address -# [5] most specific filename (may be different from [1] due to inlining) -# [6] most specific line number (may be different from [2] due to inlining) -sub GetTopLevelLineNumbers { - my ($lib, $offset, $instructions) = @_; - my $pcs = []; - for (my $i = 0; $i <= $#{$instructions}; $i++) { - push(@{$pcs}, $instructions->[$i]->[0]); - } - my $symbols = {}; - MapToSymbols($lib, $offset, $pcs, $symbols); - for (my $i = 0; $i <= $#{$instructions}; $i++) { - my $e = $instructions->[$i]; - push(@{$e}, $e->[1]); - push(@{$e}, $e->[2]); - my $addr = $e->[0]; - my $sym = $symbols->{$addr}; - if (defined($sym)) { - if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) { - $e->[1] = $1; # File name - $e->[2] = $2; # Line number - } - } - } -} - -# Print source-listing for one routine -sub PrintSource { - my $prog = shift; - my $offset = shift; - my $routine = shift; - my $flat = shift; - my $cumulative = shift; - my $start_addr = shift; - my $end_addr = shift; - my $html = shift; - my $output = shift; - - # Disassemble all instructions (just to get line numbers) - my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); - GetTopLevelLineNumbers($prog, $offset, \@instructions); - - # Hack 1: assume that the first source file encountered in the - # disassembly contains the routine - my $filename = undef; - for (my $i = 0; $i <= $#instructions; $i++) { - if ($instructions[$i]->[2] >= 0) { - $filename = $instructions[$i]->[1]; - last; - } - } - if (!defined($filename)) { - print STDERR "no filename found in $routine\n"; - return 0; - } - - # Hack 2: assume that the largest line number from $filename is the - # end of the procedure. This is typically safe since if P1 contains - # an inlined call to P2, then P2 usually occurs earlier in the - # source file. If this does not work, we might have to compute a - # density profile or just print all regions we find. - my $lastline = 0; - for (my $i = 0; $i <= $#instructions; $i++) { - my $f = $instructions[$i]->[1]; - my $l = $instructions[$i]->[2]; - if (($f eq $filename) && ($l > $lastline)) { - $lastline = $l; - } - } - - # Hack 3: assume the first source location from "filename" is the start of - # the source code. - my $firstline = 1; - for (my $i = 0; $i <= $#instructions; $i++) { - if ($instructions[$i]->[1] eq $filename) { - $firstline = $instructions[$i]->[2]; - last; - } - } - - # Hack 4: Extend last line forward until its indentation is less than - # the indentation we saw on $firstline - my $oldlastline = $lastline; - { - if (!open(FILE, "<$filename")) { - print STDERR "$filename: $!\n"; - return 0; - } - my $l = 0; - my $first_indentation = -1; - while () { - s/\r//g; # turn windows-looking lines into unix-looking lines - $l++; - my $indent = Indentation($_); - if ($l >= $firstline) { - if ($first_indentation < 0 && $indent >= 0) { - $first_indentation = $indent; - last if ($first_indentation == 0); - } - } - if ($l >= $lastline && $indent >= 0) { - if ($indent >= $first_indentation) { - $lastline = $l+1; - } else { - last; - } - } - } - close(FILE); - } - - # Assign all samples to the range $firstline,$lastline, - # Hack 4: If an instruction does not occur in the range, its samples - # are moved to the next instruction that occurs in the range. - my $samples1 = {}; # Map from line number to flat count - my $samples2 = {}; # Map from line number to cumulative count - my $running1 = 0; # Unassigned flat counts - my $running2 = 0; # Unassigned cumulative counts - my $total1 = 0; # Total flat counts - my $total2 = 0; # Total cumulative counts - my %disasm = (); # Map from line number to disassembly - my $running_disasm = ""; # Unassigned disassembly - my $skip_marker = "---\n"; - if ($html) { - $skip_marker = ""; - for (my $l = $firstline; $l <= $lastline; $l++) { - $disasm{$l} = ""; - } - } - my $last_dis_filename = ''; - my $last_dis_linenum = -1; - my $last_touched_line = -1; # To detect gaps in disassembly for a line - foreach my $e (@instructions) { - # Add up counts for all address that fall inside this instruction - my $c1 = 0; - my $c2 = 0; - for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { - $c1 += GetEntry($flat, $a); - $c2 += GetEntry($cumulative, $a); - } - - if ($html) { - my $dis = sprintf(" %6s %6s \t\t%8s: %s ", - HtmlPrintNumber($c1), - HtmlPrintNumber($c2), - UnparseAddress($offset, $e->[0]), - CleanDisassembly($e->[3])); - - # Append the most specific source line associated with this instruction - if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) }; - $dis = HtmlEscape($dis); - my $f = $e->[5]; - my $l = $e->[6]; - if ($f ne $last_dis_filename) { - $dis .= sprintf("%s:%d", - HtmlEscape(CleanFileName($f)), $l); - } elsif ($l ne $last_dis_linenum) { - # De-emphasize the unchanged file name portion - $dis .= sprintf("%s" . - ":%d", - HtmlEscape(CleanFileName($f)), $l); - } else { - # De-emphasize the entire location - $dis .= sprintf("%s:%d", - HtmlEscape(CleanFileName($f)), $l); - } - $last_dis_filename = $f; - $last_dis_linenum = $l; - $running_disasm .= $dis; - $running_disasm .= "\n"; - } - - $running1 += $c1; - $running2 += $c2; - $total1 += $c1; - $total2 += $c2; - my $file = $e->[1]; - my $line = $e->[2]; - if (($file eq $filename) && - ($line >= $firstline) && - ($line <= $lastline)) { - # Assign all accumulated samples to this line - AddEntry($samples1, $line, $running1); - AddEntry($samples2, $line, $running2); - $running1 = 0; - $running2 = 0; - if ($html) { - if ($line != $last_touched_line && $disasm{$line} ne '') { - $disasm{$line} .= "\n"; - } - $disasm{$line} .= $running_disasm; - $running_disasm = ''; - $last_touched_line = $line; - } - } - } - - # Assign any leftover samples to $lastline - AddEntry($samples1, $lastline, $running1); - AddEntry($samples2, $lastline, $running2); - if ($html) { - if ($lastline != $last_touched_line && $disasm{$lastline} ne '') { - $disasm{$lastline} .= "\n"; - } - $disasm{$lastline} .= $running_disasm; - } - - if ($html) { - printf $output ( - "

%s

%s\n
\n" .
-      "Total:%6s %6s (flat / cumulative %s)\n",
-      HtmlEscape(ShortFunctionName($routine)),
-      HtmlEscape(CleanFileName($filename)),
-      Unparse($total1),
-      Unparse($total2),
-      Units());
-  } else {
-    printf $output (
-      "ROUTINE ====================== %s in %s\n" .
-      "%6s %6s Total %s (flat / cumulative)\n",
-      ShortFunctionName($routine),
-      CleanFileName($filename),
-      Unparse($total1),
-      Unparse($total2),
-      Units());
-  }
-  if (!open(FILE, "<$filename")) {
-    print STDERR "$filename: $!\n";
-    return 0;
-  }
-  my $l = 0;
-  while () {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    $l++;
-    if ($l >= $firstline - 5 &&
-        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
-      chop;
-      my $text = $_;
-      if ($l == $firstline) { print $output $skip_marker; }
-      my $n1 = GetEntry($samples1, $l);
-      my $n2 = GetEntry($samples2, $l);
-      if ($html) {
-        # Emit a span that has one of the following classes:
-        #    livesrc -- has samples
-        #    deadsrc -- has disassembly, but with no samples
-        #    nop     -- has no matching disasembly
-        # Also emit an optional span containing disassembly.
-        my $dis = $disasm{$l};
-        my $asm = "";
-        if (defined($dis) && $dis ne '') {
-          $asm = "" . $dis . "";
-        }
-        my $source_class = (($n1 + $n2 > 0) 
-                            ? "livesrc" 
-                            : (($asm ne "") ? "deadsrc" : "nop"));
-        printf $output (
-          "%5d " .
-          "%6s %6s %s%s\n",
-          $l, $source_class,
-          HtmlPrintNumber($n1),
-          HtmlPrintNumber($n2),
-          HtmlEscape($text),
-          $asm);
-      } else {
-        printf $output(
-          "%6s %6s %4d: %s\n",
-          UnparseAlt($n1),
-          UnparseAlt($n2),
-          $l,
-          $text);
-      }
-      if ($l == $lastline)  { print $output $skip_marker; }
-    };
-  }
-  close(FILE);
-  if ($html) {
-    print $output "
\n"; - } - return 1; -} - -# Return the source line for the specified file/linenumber. -# Returns undef if not found. -sub SourceLine { - my $file = shift; - my $line = shift; - - # Look in cache - if (!defined($main::source_cache{$file})) { - if (100 < scalar keys(%main::source_cache)) { - # Clear the cache when it gets too big - $main::source_cache = (); - } - - # Read all lines from the file - if (!open(FILE, "<$file")) { - print STDERR "$file: $!\n"; - $main::source_cache{$file} = []; # Cache the negative result - return undef; - } - my $lines = []; - push(@{$lines}, ""); # So we can use 1-based line numbers as indices - while () { - push(@{$lines}, $_); - } - close(FILE); - - # Save the lines in the cache - $main::source_cache{$file} = $lines; - } - - my $lines = $main::source_cache{$file}; - if (($line < 0) || ($line > $#{$lines})) { - return undef; - } else { - return $lines->[$line]; - } -} - -# Print disassembly for one routine with interspersed source if available -sub PrintDisassembledFunction { - my $prog = shift; - my $offset = shift; - my $routine = shift; - my $flat = shift; - my $cumulative = shift; - my $start_addr = shift; - my $end_addr = shift; - my $total = shift; - - # Disassemble all instructions - my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); - - # Make array of counts per instruction - my @flat_count = (); - my @cum_count = (); - my $flat_total = 0; - my $cum_total = 0; - foreach my $e (@instructions) { - # Add up counts for all address that fall inside this instruction - my $c1 = 0; - my $c2 = 0; - for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { - $c1 += GetEntry($flat, $a); - $c2 += GetEntry($cumulative, $a); - } - push(@flat_count, $c1); - push(@cum_count, $c2); - $flat_total += $c1; - $cum_total += $c2; - } - - # Print header with total counts - printf("ROUTINE ====================== %s\n" . - "%6s %6s %s (flat, cumulative) %.1f%% of total\n", - ShortFunctionName($routine), - Unparse($flat_total), - Unparse($cum_total), - Units(), - ($cum_total * 100.0) / $total); - - # Process instructions in order - my $current_file = ""; - for (my $i = 0; $i <= $#instructions; ) { - my $e = $instructions[$i]; - - # Print the new file name whenever we switch files - if ($e->[1] ne $current_file) { - $current_file = $e->[1]; - my $fname = $current_file; - $fname =~ s|^\./||; # Trim leading "./" - - # Shorten long file names - if (length($fname) >= 58) { - $fname = "..." . substr($fname, -55); - } - printf("-------------------- %s\n", $fname); - } - - # TODO: Compute range of lines to print together to deal with - # small reorderings. - my $first_line = $e->[2]; - my $last_line = $first_line; - my %flat_sum = (); - my %cum_sum = (); - for (my $l = $first_line; $l <= $last_line; $l++) { - $flat_sum{$l} = 0; - $cum_sum{$l} = 0; - } - - # Find run of instructions for this range of source lines - my $first_inst = $i; - while (($i <= $#instructions) && - ($instructions[$i]->[2] >= $first_line) && - ($instructions[$i]->[2] <= $last_line)) { - $e = $instructions[$i]; - $flat_sum{$e->[2]} += $flat_count[$i]; - $cum_sum{$e->[2]} += $cum_count[$i]; - $i++; - } - my $last_inst = $i - 1; - - # Print source lines - for (my $l = $first_line; $l <= $last_line; $l++) { - my $line = SourceLine($current_file, $l); - if (!defined($line)) { - $line = "?\n"; - next; - } else { - $line =~ s/^\s+//; - } - printf("%6s %6s %5d: %s", - UnparseAlt($flat_sum{$l}), - UnparseAlt($cum_sum{$l}), - $l, - $line); - } - - # Print disassembly - for (my $x = $first_inst; $x <= $last_inst; $x++) { - my $e = $instructions[$x]; - printf("%6s %6s %8s: %6s\n", - UnparseAlt($flat_count[$x]), - UnparseAlt($cum_count[$x]), - UnparseAddress($offset, $e->[0]), - CleanDisassembly($e->[3])); - } - } -} - -# Print DOT graph -sub PrintDot { - my $prog = shift; - my $symbols = shift; - my $raw = shift; - my $flat = shift; - my $cumulative = shift; - my $overall_total = shift; - - # Get total - my $local_total = TotalProfile($flat); - my $nodelimit = int($main::opt_nodefraction * $local_total); - my $edgelimit = int($main::opt_edgefraction * $local_total); - my $nodecount = $main::opt_nodecount; - - # Find nodes to include - my @list = (sort { abs(GetEntry($cumulative, $b)) <=> - abs(GetEntry($cumulative, $a)) - || $a cmp $b } - keys(%{$cumulative})); - my $last = $nodecount - 1; - if ($last > $#list) { - $last = $#list; - } - while (($last >= 0) && - (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) { - $last--; - } - if ($last < 0) { - print STDERR "No nodes to print\n"; - return 0; - } - - if ($nodelimit > 0 || $edgelimit > 0) { - printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n", - Unparse($nodelimit), Units(), - Unparse($edgelimit), Units()); - } - - # Open DOT output file - my $output; - my $escaped_dot = ShellEscape(@DOT); - my $escaped_ps2pdf = ShellEscape(@PS2PDF); - if ($main::opt_gv) { - my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps")); - $output = "| $escaped_dot -Tps2 >$escaped_outfile"; - } elsif ($main::opt_evince) { - my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf")); - $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile"; - } elsif ($main::opt_ps) { - $output = "| $escaped_dot -Tps2"; - } elsif ($main::opt_pdf) { - $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -"; - } elsif ($main::opt_web || $main::opt_svg) { - # We need to post-process the SVG, so write to a temporary file always. - my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg")); - $output = "| $escaped_dot -Tsvg >$escaped_outfile"; - } elsif ($main::opt_gif) { - $output = "| $escaped_dot -Tgif"; - } else { - $output = ">&STDOUT"; - } - open(DOT, $output) || error("$output: $!\n"); - - # Title - printf DOT ("digraph \"%s; %s %s\" {\n", - $prog, - Unparse($overall_total), - Units()); - if ($main::opt_pdf) { - # The output is more printable if we set the page size for dot. - printf DOT ("size=\"8,11\"\n"); - } - printf DOT ("node [width=0.375,height=0.25];\n"); - - # Print legend - printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," . - "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n", - $prog, - sprintf("Total %s: %s", Units(), Unparse($overall_total)), - sprintf("Focusing on: %s", Unparse($local_total)), - sprintf("Dropped nodes with <= %s abs(%s)", - Unparse($nodelimit), Units()), - sprintf("Dropped edges with <= %s %s", - Unparse($edgelimit), Units()) - ); - - # Print nodes - my %node = (); - my $nextnode = 1; - foreach my $a (@list[0..$last]) { - # Pick font size - my $f = GetEntry($flat, $a); - my $c = GetEntry($cumulative, $a); - - my $fs = 8; - if ($local_total > 0) { - $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total))); - } - - $node{$a} = $nextnode++; - my $sym = $a; - $sym =~ s/\s+/\\n/g; - $sym =~ s/::/\\n/g; - - # Extra cumulative info to print for non-leaves - my $extra = ""; - if ($f != $c) { - $extra = sprintf("\\rof %s (%s)", - Unparse($c), - Percent($c, $local_total)); - } - my $style = ""; - if ($main::opt_heapcheck) { - if ($f > 0) { - # make leak-causing nodes more visible (add a background) - $style = ",style=filled,fillcolor=gray" - } elsif ($f < 0) { - # make anti-leak-causing nodes (which almost never occur) - # stand out as well (triple border) - $style = ",peripheries=3" - } - } - - printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" . - "\",shape=box,fontsize=%.1f%s];\n", - $node{$a}, - $sym, - Unparse($f), - Percent($f, $local_total), - $extra, - $fs, - $style, - ); - } - - # Get edges and counts per edge - my %edge = (); - my $n; - my $fullname_to_shortname_map = {}; - FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); - foreach my $k (keys(%{$raw})) { - # TODO: omit low %age edges - $n = $raw->{$k}; - my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); - for (my $i = 1; $i <= $#translated; $i++) { - my $src = $translated[$i]; - my $dst = $translated[$i-1]; - #next if ($src eq $dst); # Avoid self-edges? - if (exists($node{$src}) && exists($node{$dst})) { - my $edge_label = "$src\001$dst"; - if (!exists($edge{$edge_label})) { - $edge{$edge_label} = 0; - } - $edge{$edge_label} += $n; - } - } - } - - # Print edges (process in order of decreasing counts) - my %indegree = (); # Number of incoming edges added per node so far - my %outdegree = (); # Number of outgoing edges added per node so far - foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) { - my @x = split(/\001/, $e); - $n = $edge{$e}; - - # Initialize degree of kept incoming and outgoing edges if necessary - my $src = $x[0]; - my $dst = $x[1]; - if (!exists($outdegree{$src})) { $outdegree{$src} = 0; } - if (!exists($indegree{$dst})) { $indegree{$dst} = 0; } - - my $keep; - if ($indegree{$dst} == 0) { - # Keep edge if needed for reachability - $keep = 1; - } elsif (abs($n) <= $edgelimit) { - # Drop if we are below --edgefraction - $keep = 0; - } elsif ($outdegree{$src} >= $main::opt_maxdegree || - $indegree{$dst} >= $main::opt_maxdegree) { - # Keep limited number of in/out edges per node - $keep = 0; - } else { - $keep = 1; - } - - if ($keep) { - $outdegree{$src}++; - $indegree{$dst}++; - - # Compute line width based on edge count - my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0); - if ($fraction > 1) { $fraction = 1; } - my $w = $fraction * 2; - if ($w < 1 && ($main::opt_web || $main::opt_svg)) { - # SVG output treats line widths < 1 poorly. - $w = 1; - } - - # Dot sometimes segfaults if given edge weights that are too large, so - # we cap the weights at a large value - my $edgeweight = abs($n) ** 0.7; - if ($edgeweight > 100000) { $edgeweight = 100000; } - $edgeweight = int($edgeweight); - - my $style = sprintf("setlinewidth(%f)", $w); - if ($x[1] =~ m/\(inline\)/) { - $style .= ",dashed"; - } - - # Use a slightly squashed function of the edge count as the weight - printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n", - $node{$x[0]}, - $node{$x[1]}, - Unparse($n), - $edgeweight, - $style); - } - } - - print DOT ("}\n"); - close(DOT); - - if ($main::opt_web || $main::opt_svg) { - # Rewrite SVG to be more usable inside web browser. - RewriteSvg(TempName($main::next_tmpfile, "svg")); - } - - return 1; -} - -sub RewriteSvg { - my $svgfile = shift; - - open(SVG, $svgfile) || die "open temp svg: $!"; - my @svg = ; - close(SVG); - unlink $svgfile; - my $svg = join('', @svg); - - # Dot's SVG output is - # - # - # - # ... - # - # - # - # Change it to - # - # - # $svg_javascript - # - # - # ... - # - # - # - - # Fix width, height; drop viewBox. - $svg =~ s/(?s) above first - my $svg_javascript = SvgJavascript(); - my $viewport = "\n"; - $svg =~ s/ above . - $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/; - $svg =~ s/$svgfile") || die "open $svgfile: $!"; - print SVG $svg; - close(SVG); - } -} - -sub SvgJavascript { - return <<'EOF'; - -EOF -} - -# Provides a map from fullname to shortname for cases where the -# shortname is ambiguous. The symlist has both the fullname and -# shortname for all symbols, which is usually fine, but sometimes -- -# such as overloaded functions -- two different fullnames can map to -# the same shortname. In that case, we use the address of the -# function to disambiguate the two. This function fills in a map that -# maps fullnames to modified shortnames in such cases. If a fullname -# is not present in the map, the 'normal' shortname provided by the -# symlist is the appropriate one to use. -sub FillFullnameToShortnameMap { - my $symbols = shift; - my $fullname_to_shortname_map = shift; - my $shortnames_seen_once = {}; - my $shortnames_seen_more_than_once = {}; - - foreach my $symlist (values(%{$symbols})) { - # TODO(csilvers): deal with inlined symbols too. - my $shortname = $symlist->[0]; - my $fullname = $symlist->[2]; - if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address - next; # the only collisions we care about are when addresses differ - } - if (defined($shortnames_seen_once->{$shortname}) && - $shortnames_seen_once->{$shortname} ne $fullname) { - $shortnames_seen_more_than_once->{$shortname} = 1; - } else { - $shortnames_seen_once->{$shortname} = $fullname; - } - } - - foreach my $symlist (values(%{$symbols})) { - my $shortname = $symlist->[0]; - my $fullname = $symlist->[2]; - # TODO(csilvers): take in a list of addresses we care about, and only - # store in the map if $symlist->[1] is in that list. Saves space. - next if defined($fullname_to_shortname_map->{$fullname}); - if (defined($shortnames_seen_more_than_once->{$shortname})) { - if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it - $fullname_to_shortname_map->{$fullname} = "$shortname\@$1"; - } - } - } -} - -# Return a small number that identifies the argument. -# Multiple calls with the same argument will return the same number. -# Calls with different arguments will return different numbers. -sub ShortIdFor { - my $key = shift; - my $id = $main::uniqueid{$key}; - if (!defined($id)) { - $id = keys(%main::uniqueid) + 1; - $main::uniqueid{$key} = $id; - } - return $id; -} - -# Translate a stack of addresses into a stack of symbols -sub TranslateStack { - my $symbols = shift; - my $fullname_to_shortname_map = shift; - my $k = shift; - - my @addrs = split(/\n/, $k); - my @result = (); - for (my $i = 0; $i <= $#addrs; $i++) { - my $a = $addrs[$i]; - - # Skip large addresses since they sometimes show up as fake entries on RH9 - if (length($a) > 8 && $a gt "7fffffffffffffff") { - next; - } - - if ($main::opt_disasm || $main::opt_list) { - # We want just the address for the key - push(@result, $a); - next; - } - - my $symlist = $symbols->{$a}; - if (!defined($symlist)) { - $symlist = [$a, "", $a]; - } - - # We can have a sequence of symbols for a particular entry - # (more than one symbol in the case of inlining). Callers - # come before callees in symlist, so walk backwards since - # the translated stack should contain callees before callers. - for (my $j = $#{$symlist}; $j >= 2; $j -= 3) { - my $func = $symlist->[$j-2]; - my $fileline = $symlist->[$j-1]; - my $fullfunc = $symlist->[$j]; - if (defined($fullname_to_shortname_map->{$fullfunc})) { - $func = $fullname_to_shortname_map->{$fullfunc}; - } - if ($j > 2) { - $func = "$func (inline)"; - } - - # Do not merge nodes corresponding to Callback::Run since that - # causes confusing cycles in dot display. Instead, we synthesize - # a unique name for this frame per caller. - if ($func =~ m/Callback.*::Run$/) { - my $caller = ($i > 0) ? $addrs[$i-1] : 0; - $func = "Run#" . ShortIdFor($caller); - } - - if ($main::opt_addresses) { - push(@result, "$a $func $fileline"); - } elsif ($main::opt_lines) { - if ($func eq '??' && $fileline eq '??:0') { - push(@result, "$a"); - } else { - push(@result, "$func $fileline"); - } - } elsif ($main::opt_functions) { - if ($func eq '??') { - push(@result, "$a"); - } else { - push(@result, $func); - } - } elsif ($main::opt_files) { - if ($fileline eq '??:0' || $fileline eq '') { - push(@result, "$a"); - } else { - my $f = $fileline; - $f =~ s/:\d+$//; - push(@result, $f); - } - } else { - push(@result, $a); - last; # Do not print inlined info - } - } - } - - # print join(",", @addrs), " => ", join(",", @result), "\n"; - return @result; -} - -# Generate percent string for a number and a total -sub Percent { - my $num = shift; - my $tot = shift; - if ($tot != 0) { - return sprintf("%.1f%%", $num * 100.0 / $tot); - } else { - return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf"); - } -} - -# Generate pretty-printed form of number -sub Unparse { - my $num = shift; - if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { - if ($main::opt_inuse_objects || $main::opt_alloc_objects) { - return sprintf("%d", $num); - } else { - if ($main::opt_show_bytes) { - return sprintf("%d", $num); - } else { - return sprintf("%.1f", $num / 1048576.0); - } - } - } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { - return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds - } else { - return sprintf("%d", $num); - } -} - -# Alternate pretty-printed form: 0 maps to "." -sub UnparseAlt { - my $num = shift; - if ($num == 0) { - return "."; - } else { - return Unparse($num); - } -} - -# Alternate pretty-printed form: 0 maps to "" -sub HtmlPrintNumber { - my $num = shift; - if ($num == 0) { - return ""; - } else { - return Unparse($num); - } -} - -# Return output units -sub Units { - if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { - if ($main::opt_inuse_objects || $main::opt_alloc_objects) { - return "objects"; - } else { - if ($main::opt_show_bytes) { - return "B"; - } else { - return "MB"; - } - } - } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { - return "seconds"; - } else { - return "samples"; - } -} - -##### Profile manipulation code ##### - -# Generate flattened profile: -# If count is charged to stack [a,b,c,d], in generated profile, -# it will be charged to [a] -sub FlatProfile { - my $profile = shift; - my $result = {}; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - if ($#addrs >= 0) { - AddEntry($result, $addrs[0], $count); - } - } - return $result; -} - -# Generate cumulative profile: -# If count is charged to stack [a,b,c,d], in generated profile, -# it will be charged to [a], [b], [c], [d] -sub CumulativeProfile { - my $profile = shift; - my $result = {}; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - foreach my $a (@addrs) { - AddEntry($result, $a, $count); - } - } - return $result; -} - -# If the second-youngest PC on the stack is always the same, returns -# that pc. Otherwise, returns undef. -sub IsSecondPcAlwaysTheSame { - my $profile = shift; - - my $second_pc = undef; - foreach my $k (keys(%{$profile})) { - my @addrs = split(/\n/, $k); - if ($#addrs < 1) { - return undef; - } - if (not defined $second_pc) { - $second_pc = $addrs[1]; - } else { - if ($second_pc ne $addrs[1]) { - return undef; - } - } - } - return $second_pc; -} - -sub ExtractSymbolLocation { - my $symbols = shift; - my $address = shift; - # 'addr2line' outputs "??:0" for unknown locations; we do the - # same to be consistent. - my $location = "??:0:unknown"; - if (exists $symbols->{$address}) { - my $file = $symbols->{$address}->[1]; - if ($file eq "?") { - $file = "??:0" - } - $location = $file . ":" . $symbols->{$address}->[0]; - } - return $location; -} - -# Extracts a graph of calls. -sub ExtractCalls { - my $symbols = shift; - my $profile = shift; - - my $calls = {}; - while( my ($stack_trace, $count) = each %$profile ) { - my @address = split(/\n/, $stack_trace); - my $destination = ExtractSymbolLocation($symbols, $address[0]); - AddEntry($calls, $destination, $count); - for (my $i = 1; $i <= $#address; $i++) { - my $source = ExtractSymbolLocation($symbols, $address[$i]); - my $call = "$source -> $destination"; - AddEntry($calls, $call, $count); - $destination = $source; - } - } - - return $calls; -} - -sub RemoveUninterestingFrames { - my $symbols = shift; - my $profile = shift; - - # List of function names to skip - my %skip = (); - my $skip_regexp = 'NOMATCH'; - if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { - foreach my $name ('calloc', - 'cfree', - 'malloc', - 'free', - 'memalign', - 'posix_memalign', - 'pvalloc', - 'valloc', - 'realloc', - 'tc_calloc', - 'tc_cfree', - 'tc_malloc', - 'tc_free', - 'tc_memalign', - 'tc_posix_memalign', - 'tc_pvalloc', - 'tc_valloc', - 'tc_realloc', - 'tc_new', - 'tc_delete', - 'tc_newarray', - 'tc_deletearray', - 'tc_new_nothrow', - 'tc_newarray_nothrow', - 'do_malloc', - '::do_malloc', # new name -- got moved to an unnamed ns - '::do_malloc_or_cpp_alloc', - 'DoSampledAllocation', - 'simple_alloc::allocate', - '__malloc_alloc_template::allocate', - '__builtin_delete', - '__builtin_new', - '__builtin_vec_delete', - '__builtin_vec_new', - 'operator new', - 'operator new[]', - # The entry to our memory-allocation routines on OS X - 'malloc_zone_malloc', - 'malloc_zone_calloc', - 'malloc_zone_valloc', - 'malloc_zone_realloc', - 'malloc_zone_memalign', - 'malloc_zone_free', - # These mark the beginning/end of our custom sections - '__start_google_malloc', - '__stop_google_malloc', - '__start_malloc_hook', - '__stop_malloc_hook') { - $skip{$name} = 1; - $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything - } - # TODO: Remove TCMalloc once everything has been - # moved into the tcmalloc:: namespace and we have flushed - # old code out of the system. - $skip_regexp = "TCMalloc|^tcmalloc::"; - } elsif ($main::profile_type eq 'contention') { - foreach my $vname ('base::RecordLockProfileData', - 'base::SubmitMutexProfileData', - 'base::SubmitSpinLockProfileData', - 'Mutex::Unlock', - 'Mutex::UnlockSlow', - 'Mutex::ReaderUnlock', - 'MutexLock::~MutexLock', - 'SpinLock::Unlock', - 'SpinLock::SlowUnlock', - 'SpinLockHolder::~SpinLockHolder') { - $skip{$vname} = 1; - } - } elsif ($main::profile_type eq 'cpu') { - # Drop signal handlers used for CPU profile collection - # TODO(dpeng): this should not be necessary; it's taken - # care of by the general 2nd-pc mechanism below. - foreach my $name ('ProfileData::Add', # historical - 'ProfileData::prof_handler', # historical - 'CpuProfiler::prof_handler', - '__FRAME_END__', - '__pthread_sighandler', - '__restore') { - $skip{$name} = 1; - } - } else { - # Nothing skipped for unknown types - } - - if ($main::profile_type eq 'cpu') { - # If all the second-youngest program counters are the same, - # this STRONGLY suggests that it is an artifact of measurement, - # i.e., stack frames pushed by the CPU profiler signal handler. - # Hence, we delete them. - # (The topmost PC is read from the signal structure, not from - # the stack, so it does not get involved.) - while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) { - my $result = {}; - my $func = ''; - if (exists($symbols->{$second_pc})) { - $second_pc = $symbols->{$second_pc}->[0]; - } - print STDERR "Removing $second_pc from all stack traces.\n"; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - splice @addrs, 1, 1; - my $reduced_path = join("\n", @addrs); - AddEntry($result, $reduced_path, $count); - } - $profile = $result; - } - } - - my $result = {}; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - my @path = (); - foreach my $a (@addrs) { - if (exists($symbols->{$a})) { - my $func = $symbols->{$a}->[0]; - if ($skip{$func} || ($func =~ m/$skip_regexp/)) { - next; - } - } - push(@path, $a); - } - my $reduced_path = join("\n", @path); - AddEntry($result, $reduced_path, $count); - } - return $result; -} - -# Reduce profile to granularity given by user -sub ReduceProfile { - my $symbols = shift; - my $profile = shift; - my $result = {}; - my $fullname_to_shortname_map = {}; - FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); - my @path = (); - my %seen = (); - $seen{''} = 1; # So that empty keys are skipped - foreach my $e (@translated) { - # To avoid double-counting due to recursion, skip a stack-trace - # entry if it has already been seen - if (!$seen{$e}) { - $seen{$e} = 1; - push(@path, $e); - } - } - my $reduced_path = join("\n", @path); - AddEntry($result, $reduced_path, $count); - } - return $result; -} - -# Does the specified symbol array match the regexp? -sub SymbolMatches { - my $sym = shift; - my $re = shift; - if (defined($sym)) { - for (my $i = 0; $i < $#{$sym}; $i += 3) { - if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) { - return 1; - } - } - } - return 0; -} - -# Focus only on paths involving specified regexps -sub FocusProfile { - my $symbols = shift; - my $profile = shift; - my $focus = shift; - my $result = {}; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - foreach my $a (@addrs) { - # Reply if it matches either the address/shortname/fileline - if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) { - AddEntry($result, $k, $count); - last; - } - } - } - return $result; -} - -# Focus only on paths not involving specified regexps -sub IgnoreProfile { - my $symbols = shift; - my $profile = shift; - my $ignore = shift; - my $result = {}; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - my $matched = 0; - foreach my $a (@addrs) { - # Reply if it matches either the address/shortname/fileline - if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) { - $matched = 1; - last; - } - } - if (!$matched) { - AddEntry($result, $k, $count); - } - } - return $result; -} - -# Get total count in profile -sub TotalProfile { - my $profile = shift; - my $result = 0; - foreach my $k (keys(%{$profile})) { - $result += $profile->{$k}; - } - return $result; -} - -# Add A to B -sub AddProfile { - my $A = shift; - my $B = shift; - - my $R = {}; - # add all keys in A - foreach my $k (keys(%{$A})) { - my $v = $A->{$k}; - AddEntry($R, $k, $v); - } - # add all keys in B - foreach my $k (keys(%{$B})) { - my $v = $B->{$k}; - AddEntry($R, $k, $v); - } - return $R; -} - -# Merges symbol maps -sub MergeSymbols { - my $A = shift; - my $B = shift; - - my $R = {}; - foreach my $k (keys(%{$A})) { - $R->{$k} = $A->{$k}; - } - if (defined($B)) { - foreach my $k (keys(%{$B})) { - $R->{$k} = $B->{$k}; - } - } - return $R; -} - - -# Add A to B -sub AddPcs { - my $A = shift; - my $B = shift; - - my $R = {}; - # add all keys in A - foreach my $k (keys(%{$A})) { - $R->{$k} = 1 - } - # add all keys in B - foreach my $k (keys(%{$B})) { - $R->{$k} = 1 - } - return $R; -} - -# Subtract B from A -sub SubtractProfile { - my $A = shift; - my $B = shift; - - my $R = {}; - foreach my $k (keys(%{$A})) { - my $v = $A->{$k} - GetEntry($B, $k); - if ($v < 0 && $main::opt_drop_negative) { - $v = 0; - } - AddEntry($R, $k, $v); - } - if (!$main::opt_drop_negative) { - # Take care of when subtracted profile has more entries - foreach my $k (keys(%{$B})) { - if (!exists($A->{$k})) { - AddEntry($R, $k, 0 - $B->{$k}); - } - } - } - return $R; -} - -# Get entry from profile; zero if not present -sub GetEntry { - my $profile = shift; - my $k = shift; - if (exists($profile->{$k})) { - return $profile->{$k}; - } else { - return 0; - } -} - -# Add entry to specified profile -sub AddEntry { - my $profile = shift; - my $k = shift; - my $n = shift; - if (!exists($profile->{$k})) { - $profile->{$k} = 0; - } - $profile->{$k} += $n; -} - -# Add a stack of entries to specified profile, and add them to the $pcs -# list. -sub AddEntries { - my $profile = shift; - my $pcs = shift; - my $stack = shift; - my $count = shift; - my @k = (); - - foreach my $e (split(/\s+/, $stack)) { - my $pc = HexExtend($e); - $pcs->{$pc} = 1; - push @k, $pc; - } - AddEntry($profile, (join "\n", @k), $count); -} - -##### Code to profile a server dynamically ##### - -sub CheckSymbolPage { - my $url = SymbolPageURL(); - my $command = ShellEscape(@URL_FETCHER, $url); - open(SYMBOL, "$command |") or error($command); - my $line = ; - $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines - close(SYMBOL); - unless (defined($line)) { - error("$url doesn't exist\n"); - } - - if ($line =~ /^num_symbols:\s+(\d+)$/) { - if ($1 == 0) { - error("Stripped binary. No symbols available.\n"); - } - } else { - error("Failed to get the number of symbols from $url\n"); - } -} - -sub IsProfileURL { - my $profile_name = shift; - if (-f $profile_name) { - printf STDERR "Using local file $profile_name.\n"; - return 0; - } - return 1; -} - -sub ParseProfileURL { - my $profile_name = shift; - - if (!defined($profile_name) || $profile_name eq "") { - return (); - } - - # Split profile URL - matches all non-empty strings, so no test. - $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,; - - my $proto = $1 || "http://"; - my $hostport = $2; - my $prefix = $3; - my $profile = $4 || "/"; - - my $host = $hostport; - $host =~ s/:.*//; - - my $baseurl = "$proto$hostport$prefix"; - return ($host, $baseurl, $profile); -} - -# We fetch symbols from the first profile argument. -sub SymbolPageURL { - my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); - return "$baseURL$SYMBOL_PAGE"; -} - -sub FetchProgramName() { - my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); - my $url = "$baseURL$PROGRAM_NAME_PAGE"; - my $command_line = ShellEscape(@URL_FETCHER, $url); - open(CMDLINE, "$command_line |") or error($command_line); - my $cmdline = ; - $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines - close(CMDLINE); - error("Failed to get program name from $url\n") unless defined($cmdline); - $cmdline =~ s/\x00.+//; # Remove argv[1] and latters. - $cmdline =~ s!\n!!g; # Remove LFs. - return $cmdline; -} - -# Gee, curl's -L (--location) option isn't reliable at least -# with its 7.12.3 version. Curl will forget to post data if -# there is a redirection. This function is a workaround for -# curl. Redirection happens on borg hosts. -sub ResolveRedirectionForCurl { - my $url = shift; - my $command_line = ShellEscape(@URL_FETCHER, "--head", $url); - open(CMDLINE, "$command_line |") or error($command_line); - while () { - s/\r//g; # turn windows-looking lines into unix-looking lines - if (/^Location: (.*)/) { - $url = $1; - } - } - close(CMDLINE); - return $url; -} - -# Add a timeout flat to URL_FETCHER. Returns a new list. -sub AddFetchTimeout { - my $timeout = shift; - my @fetcher = shift; - if (defined($timeout)) { - if (join(" ", @fetcher) =~ m/\bcurl -s/) { - push(@fetcher, "--max-time", sprintf("%d", $timeout)); - } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) { - push(@fetcher, sprintf("--deadline=%d", $timeout)); - } - } - return @fetcher; -} - -# Reads a symbol map from the file handle name given as $1, returning -# the resulting symbol map. Also processes variables relating to symbols. -# Currently, the only variable processed is 'binary=' which updates -# $main::prog to have the correct program name. -sub ReadSymbols { - my $in = shift; - my $map = {}; - while (<$in>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - # Removes all the leading zeroes from the symbols, see comment below. - if (m/^0x0*([0-9a-f]+)\s+(.+)/) { - $map->{$1} = $2; - } elsif (m/^---/) { - last; - } elsif (m/^([a-z][^=]*)=(.*)$/ ) { - my ($variable, $value) = ($1, $2); - for ($variable, $value) { - s/^\s+//; - s/\s+$//; - } - if ($variable eq "binary") { - if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) { - printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n", - $main::prog, $value); - } - $main::prog = $value; - } else { - printf STDERR ("Ignoring unknown variable in symbols list: " . - "'%s' = '%s'\n", $variable, $value); - } - } - } - return $map; -} - -# Fetches and processes symbols to prepare them for use in the profile output -# code. If the optional 'symbol_map' arg is not given, fetches symbols from -# $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols -# are assumed to have already been fetched into 'symbol_map' and are simply -# extracted and processed. -sub FetchSymbols { - my $pcset = shift; - my $symbol_map = shift; - - my %seen = (); - my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq - - if (!defined($symbol_map)) { - my $post_data = join("+", sort((map {"0x" . "$_"} @pcs))); - - open(POSTFILE, ">$main::tmpfile_sym"); - print POSTFILE $post_data; - close(POSTFILE); - - my $url = SymbolPageURL(); - - my $command_line; - if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) { - $url = ResolveRedirectionForCurl($url); - $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym", - $url); - } else { - $command_line = (ShellEscape(@URL_FETCHER, "--post", $url) - . " < " . ShellEscape($main::tmpfile_sym)); - } - # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols. - my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"}); - open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line); - $symbol_map = ReadSymbols(*SYMBOL{IO}); - close(SYMBOL); - } - - my $symbols = {}; - foreach my $pc (@pcs) { - my $fullname; - # For 64 bits binaries, symbols are extracted with 8 leading zeroes. - # Then /symbol reads the long symbols in as uint64, and outputs - # the result with a "0x%08llx" format which get rid of the zeroes. - # By removing all the leading zeroes in both $pc and the symbols from - # /symbol, the symbols match and are retrievable from the map. - my $shortpc = $pc; - $shortpc =~ s/^0*//; - # Each line may have a list of names, which includes the function - # and also other functions it has inlined. They are separated (in - # PrintSymbolizedProfile), by --, which is illegal in function names. - my $fullnames; - if (defined($symbol_map->{$shortpc})) { - $fullnames = $symbol_map->{$shortpc}; - } else { - $fullnames = "0x" . $pc; # Just use addresses - } - my $sym = []; - $symbols->{$pc} = $sym; - foreach my $fullname (split("--", $fullnames)) { - my $name = ShortFunctionName($fullname); - push(@{$sym}, $name, "?", $fullname); - } - } - return $symbols; -} - -sub BaseName { - my $file_name = shift; - $file_name =~ s!^.*/!!; # Remove directory name - return $file_name; -} - -sub MakeProfileBaseName { - my ($binary_name, $profile_name) = @_; - my ($host, $baseURL, $path) = ParseProfileURL($profile_name); - my $binary_shortname = BaseName($binary_name); - return sprintf("%s.%s.%s", - $binary_shortname, $main::op_time, $host); -} - -sub FetchDynamicProfile { - my $binary_name = shift; - my $profile_name = shift; - my $fetch_name_only = shift; - my $encourage_patience = shift; - - if (!IsProfileURL($profile_name)) { - return $profile_name; - } else { - my ($host, $baseURL, $path) = ParseProfileURL($profile_name); - if ($path eq "" || $path eq "/") { - # Missing type specifier defaults to cpu-profile - $path = $PROFILE_PAGE; - } - - my $profile_file = MakeProfileBaseName($binary_name, $profile_name); - - my $url = "$baseURL$path"; - my $fetch_timeout = undef; - if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) { - if ($path =~ m/[?]/) { - $url .= "&"; - } else { - $url .= "?"; - } - $url .= sprintf("seconds=%d", $main::opt_seconds); - $fetch_timeout = $main::opt_seconds * 1.01 + 60; - } else { - # For non-CPU profiles, we add a type-extension to - # the target profile file name. - my $suffix = $path; - $suffix =~ s,/,.,g; - $profile_file .= $suffix; - } - - my $profile_dir = $ENV{"PPROF_TMPDIR"} || ($ENV{HOME} . "/pprof"); - if (! -d $profile_dir) { - mkdir($profile_dir) - || die("Unable to create profile directory $profile_dir: $!\n"); - } - my $tmp_profile = "$profile_dir/.tmp.$profile_file"; - my $real_profile = "$profile_dir/$profile_file"; - - if ($fetch_name_only > 0) { - return $real_profile; - } - - my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER); - my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile); - if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){ - print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n"; - if ($encourage_patience) { - print STDERR "Be patient...\n"; - } - } else { - print STDERR "Fetching $path profile from $url to\n ${real_profile}\n"; - } - - (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n"); - (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n"); - print STDERR "Wrote profile to $real_profile\n"; - $main::collected_profile = $real_profile; - return $main::collected_profile; - } -} - -# Collect profiles in parallel -sub FetchDynamicProfiles { - my $items = scalar(@main::pfile_args); - my $levels = log($items) / log(2); - - if ($items == 1) { - $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1); - } else { - # math rounding issues - if ((2 ** $levels) < $items) { - $levels++; - } - my $count = scalar(@main::pfile_args); - for (my $i = 0; $i < $count; $i++) { - $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0); - } - print STDERR "Fetching $count profiles, Be patient...\n"; - FetchDynamicProfilesRecurse($levels, 0, 0); - $main::collected_profile = join(" \\\n ", @main::profile_files); - } -} - -# Recursively fork a process to get enough processes -# collecting profiles -sub FetchDynamicProfilesRecurse { - my $maxlevel = shift; - my $level = shift; - my $position = shift; - - if (my $pid = fork()) { - $position = 0 | ($position << 1); - TryCollectProfile($maxlevel, $level, $position); - wait; - } else { - $position = 1 | ($position << 1); - TryCollectProfile($maxlevel, $level, $position); - cleanup(); - exit(0); - } -} - -# Collect a single profile -sub TryCollectProfile { - my $maxlevel = shift; - my $level = shift; - my $position = shift; - - if ($level >= ($maxlevel - 1)) { - if ($position < scalar(@main::pfile_args)) { - FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0); - } - } else { - FetchDynamicProfilesRecurse($maxlevel, $level+1, $position); - } -} - -##### Parsing code ##### - -# Provide a small streaming-read module to handle very large -# cpu-profile files. Stream in chunks along a sliding window. -# Provides an interface to get one 'slot', correctly handling -# endian-ness differences. A slot is one 32-bit or 64-bit word -# (depending on the input profile). We tell endianness and bit-size -# for the profile by looking at the first 8 bytes: in cpu profiles, -# the second slot is always 3 (we'll accept anything that's not 0). -BEGIN { - package CpuProfileStream; - - sub new { - my ($class, $file, $fname) = @_; - my $self = { file => $file, - base => 0, - stride => 512 * 1024, # must be a multiple of bitsize/8 - slots => [], - unpack_code => "", # N for big-endian, V for little - perl_is_64bit => 1, # matters if profile is 64-bit - }; - bless $self, $class; - # Let unittests adjust the stride - if ($main::opt_test_stride > 0) { - $self->{stride} = $main::opt_test_stride; - } - # Read the first two slots to figure out bitsize and endianness. - my $slots = $self->{slots}; - my $str; - read($self->{file}, $str, 8); - # Set the global $address_length based on what we see here. - # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars). - $address_length = ($str eq (chr(0)x8)) ? 16 : 8; - if ($address_length == 8) { - if (substr($str, 6, 2) eq chr(0)x2) { - $self->{unpack_code} = 'V'; # Little-endian. - } elsif (substr($str, 4, 2) eq chr(0)x2) { - $self->{unpack_code} = 'N'; # Big-endian - } else { - ::error("$fname: header size >= 2**16\n"); - } - @$slots = unpack($self->{unpack_code} . "*", $str); - } else { - # If we're a 64-bit profile, check if we're a 64-bit-capable - # perl. Otherwise, each slot will be represented as a float - # instead of an int64, losing precision and making all the - # 64-bit addresses wrong. We won't complain yet, but will - # later if we ever see a value that doesn't fit in 32 bits. - my $has_q = 0; - eval { $has_q = pack("Q", "1") ? 1 : 1; }; - if (!$has_q) { - $self->{perl_is_64bit} = 0; - } - read($self->{file}, $str, 8); - if (substr($str, 4, 4) eq chr(0)x4) { - # We'd love to use 'Q', but it's a) not universal, b) not endian-proof. - $self->{unpack_code} = 'V'; # Little-endian. - } elsif (substr($str, 0, 4) eq chr(0)x4) { - $self->{unpack_code} = 'N'; # Big-endian - } else { - ::error("$fname: header size >= 2**32\n"); - } - my @pair = unpack($self->{unpack_code} . "*", $str); - # Since we know one of the pair is 0, it's fine to just add them. - @$slots = (0, $pair[0] + $pair[1]); - } - return $self; - } - - # Load more data when we access slots->get(X) which is not yet in memory. - sub overflow { - my ($self) = @_; - my $slots = $self->{slots}; - $self->{base} += $#$slots + 1; # skip over data we're replacing - my $str; - read($self->{file}, $str, $self->{stride}); - if ($address_length == 8) { # the 32-bit case - # This is the easy case: unpack provides 32-bit unpacking primitives. - @$slots = unpack($self->{unpack_code} . "*", $str); - } else { - # We need to unpack 32 bits at a time and combine. - my @b32_values = unpack($self->{unpack_code} . "*", $str); - my @b64_values = (); - for (my $i = 0; $i < $#b32_values; $i += 2) { - # TODO(csilvers): if this is a 32-bit perl, the math below - # could end up in a too-large int, which perl will promote - # to a double, losing necessary precision. Deal with that. - # Right now, we just die. - my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]); - if ($self->{unpack_code} eq 'N') { # big-endian - ($lo, $hi) = ($hi, $lo); - } - my $value = $lo + $hi * (2**32); - if (!$self->{perl_is_64bit} && # check value is exactly represented - (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) { - ::error("Need a 64-bit perl to process this 64-bit profile.\n"); - } - push(@b64_values, $value); - } - @$slots = @b64_values; - } - } - - # Access the i-th long in the file (logically), or -1 at EOF. - sub get { - my ($self, $idx) = @_; - my $slots = $self->{slots}; - while ($#$slots >= 0) { - if ($idx < $self->{base}) { - # The only time we expect a reference to $slots[$i - something] - # after referencing $slots[$i] is reading the very first header. - # Since $stride > |header|, that shouldn't cause any lookback - # errors. And everything after the header is sequential. - print STDERR "Unexpected look-back reading CPU profile"; - return -1; # shrug, don't know what better to return - } elsif ($idx > $self->{base} + $#$slots) { - $self->overflow(); - } else { - return $slots->[$idx - $self->{base}]; - } - } - # If we get here, $slots is [], which means we've reached EOF - return -1; # unique since slots is supposed to hold unsigned numbers - } -} - -# Reads the top, 'header' section of a profile, and returns the last -# line of the header, commonly called a 'header line'. The header -# section of a profile consists of zero or more 'command' lines that -# are instructions to pprof, which pprof executes when reading the -# header. All 'command' lines start with a %. After the command -# lines is the 'header line', which is a profile-specific line that -# indicates what type of profile it is, and perhaps other global -# information about the profile. For instance, here's a header line -# for a heap profile: -# heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile -# For historical reasons, the CPU profile does not contain a text- -# readable header line. If the profile looks like a CPU profile, -# this function returns "". If no header line could be found, this -# function returns undef. -# -# The following commands are recognized: -# %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:' -# -# The input file should be in binmode. -sub ReadProfileHeader { - local *PROFILE = shift; - my $firstchar = ""; - my $line = ""; - read(PROFILE, $firstchar, 1); - seek(PROFILE, -1, 1); # unread the firstchar - if ($firstchar !~ /[[:print:]]/) { # is not a text character - return ""; - } - while (defined($line = )) { - $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines - if ($line =~ /^%warn\s+(.*)/) { # 'warn' command - # Note this matches both '%warn blah\n' and '%warn\n'. - print STDERR "WARNING: $1\n"; # print the rest of the line - } elsif ($line =~ /^%/) { - print STDERR "Ignoring unknown command from profile header: $line"; - } else { - # End of commands, must be the header line. - return $line; - } - } - return undef; # got to EOF without seeing a header line -} - -sub IsSymbolizedProfileFile { - my $file_name = shift; - if (!(-e $file_name) || !(-r $file_name)) { - return 0; - } - # Check if the file contains a symbol-section marker. - open(TFILE, "<$file_name"); - binmode TFILE; - my $firstline = ReadProfileHeader(*TFILE); - close(TFILE); - if (!$firstline) { - return 0; - } - $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $symbol_marker = $&; - return $firstline =~ /^--- *$symbol_marker/; -} - -# Parse profile generated by common/profiler.cc and return a reference -# to a map: -# $result->{version} Version number of profile file -# $result->{period} Sampling period (in microseconds) -# $result->{profile} Profile object -# $result->{map} Memory map info from profile -# $result->{pcs} Hash of all PC values seen, key is hex address -sub ReadProfile { - my $prog = shift; - my $fname = shift; - my $result; # return value - - $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $contention_marker = $&; - $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $growth_marker = $&; - $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $symbol_marker = $&; - $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $profile_marker = $&; - - # Look at first line to see if it is a heap or a CPU profile. - # CPU profile may start with no header at all, and just binary data - # (starting with \0\0\0\0) -- in that case, don't try to read the - # whole firstline, since it may be gigabytes(!) of data. - open(PROFILE, "<$fname") || error("$fname: $!\n"); - binmode PROFILE; # New perls do UTF-8 processing - my $header = ReadProfileHeader(*PROFILE); - if (!defined($header)) { # means "at EOF" - error("Profile is empty.\n"); - } - - my $symbols; - if ($header =~ m/^--- *$symbol_marker/o) { - # Verify that the user asked for a symbolized profile - if (!$main::use_symbolized_profile) { - # we have both a binary and symbolized profiles, abort - error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " . - "a binary arg. Try again without passing\n $prog\n"); - } - # Read the symbol section of the symbolized profile file. - $symbols = ReadSymbols(*PROFILE{IO}); - # Read the next line to get the header for the remaining profile. - $header = ReadProfileHeader(*PROFILE) || ""; - } - - $main::profile_type = ''; - if ($header =~ m/^heap profile:.*$growth_marker/o) { - $main::profile_type = 'growth'; - $result = ReadHeapProfile($prog, *PROFILE, $header); - } elsif ($header =~ m/^heap profile:/) { - $main::profile_type = 'heap'; - $result = ReadHeapProfile($prog, *PROFILE, $header); - } elsif ($header =~ m/^--- *$contention_marker/o) { - $main::profile_type = 'contention'; - $result = ReadSynchProfile($prog, *PROFILE); - } elsif ($header =~ m/^--- *Stacks:/) { - print STDERR - "Old format contention profile: mistakenly reports " . - "condition variable signals as lock contentions.\n"; - $main::profile_type = 'contention'; - $result = ReadSynchProfile($prog, *PROFILE); - } elsif ($header =~ m/^--- *$profile_marker/) { - # the binary cpu profile data starts immediately after this line - $main::profile_type = 'cpu'; - $result = ReadCPUProfile($prog, $fname, *PROFILE); - } else { - if (defined($symbols)) { - # a symbolized profile contains a format we don't recognize, bail out - error("$fname: Cannot recognize profile section after symbols.\n"); - } - # no ascii header present -- must be a CPU profile - $main::profile_type = 'cpu'; - $result = ReadCPUProfile($prog, $fname, *PROFILE); - } - - close(PROFILE); - - # if we got symbols along with the profile, return those as well - if (defined($symbols)) { - $result->{symbols} = $symbols; - } - - return $result; -} - -# Subtract one from caller pc so we map back to call instr. -# However, don't do this if we're reading a symbolized profile -# file, in which case the subtract-one was done when the file -# was written. -# -# We apply the same logic to all readers, though ReadCPUProfile uses an -# independent implementation. -sub FixCallerAddresses { - my $stack = shift; - if ($main::use_symbolized_profile) { - return $stack; - } else { - $stack =~ /(\s)/; - my $delimiter = $1; - my @addrs = split(' ', $stack); - my @fixedaddrs; - $#fixedaddrs = $#addrs; - if ($#addrs >= 0) { - $fixedaddrs[0] = $addrs[0]; - } - for (my $i = 1; $i <= $#addrs; $i++) { - $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1"); - } - return join $delimiter, @fixedaddrs; - } -} - -# CPU profile reader -sub ReadCPUProfile { - my $prog = shift; - my $fname = shift; # just used for logging - local *PROFILE = shift; - my $version; - my $period; - my $i; - my $profile = {}; - my $pcs = {}; - - # Parse string into array of slots. - my $slots = CpuProfileStream->new(*PROFILE, $fname); - - # Read header. The current header version is a 5-element structure - # containing: - # 0: header count (always 0) - # 1: header "words" (after this one: 3) - # 2: format version (0) - # 3: sampling period (usec) - # 4: unused padding (always 0) - if ($slots->get(0) != 0 ) { - error("$fname: not a profile file, or old format profile file\n"); - } - $i = 2 + $slots->get(1); - $version = $slots->get(2); - $period = $slots->get(3); - # Do some sanity checking on these header values. - if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) { - error("$fname: not a profile file, or corrupted profile file\n"); - } - - # Parse profile - while ($slots->get($i) != -1) { - my $n = $slots->get($i++); - my $d = $slots->get($i++); - if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth? - my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8)); - print STDERR "At index $i (address $addr):\n"; - error("$fname: stack trace depth >= 2**32\n"); - } - if ($slots->get($i) == 0) { - # End of profile data marker - $i += $d; - last; - } - - # Make key out of the stack entries - my @k = (); - for (my $j = 0; $j < $d; $j++) { - my $pc = $slots->get($i+$j); - # Subtract one from caller pc so we map back to call instr. - # However, don't do this if we're reading a symbolized profile - # file, in which case the subtract-one was done when the file - # was written. - if ($j > 0 && !$main::use_symbolized_profile) { - $pc--; - } - $pc = sprintf("%0*x", $address_length, $pc); - $pcs->{$pc} = 1; - push @k, $pc; - } - - AddEntry($profile, (join "\n", @k), $n); - $i += $d; - } - - # Parse map - my $map = ''; - seek(PROFILE, $i * 4, 0); - read(PROFILE, $map, (stat PROFILE)[7]); - - my $r = {}; - $r->{version} = $version; - $r->{period} = $period; - $r->{profile} = $profile; - $r->{libs} = ParseLibraries($prog, $map, $pcs); - $r->{pcs} = $pcs; - - return $r; -} - -sub ReadHeapProfile { - my $prog = shift; - local *PROFILE = shift; - my $header = shift; - - my $index = 1; - if ($main::opt_inuse_space) { - $index = 1; - } elsif ($main::opt_inuse_objects) { - $index = 0; - } elsif ($main::opt_alloc_space) { - $index = 3; - } elsif ($main::opt_alloc_objects) { - $index = 2; - } - - # Find the type of this profile. The header line looks like: - # heap profile: 1246: 8800744 [ 1246: 8800744] @ /266053 - # There are two pairs , the first inuse objects/space, and the - # second allocated objects/space. This is followed optionally by a profile - # type, and if that is present, optionally by a sampling frequency. - # For remote heap profiles (v1): - # The interpretation of the sampling frequency is that the profiler, for - # each sample, calculates a uniformly distributed random integer less than - # the given value, and records the next sample after that many bytes have - # been allocated. Therefore, the expected sample interval is half of the - # given frequency. By default, if not specified, the expected sample - # interval is 128KB. Only remote-heap-page profiles are adjusted for - # sample size. - # For remote heap profiles (v2): - # The sampling frequency is the rate of a Poisson process. This means that - # the probability of sampling an allocation of size X with sampling rate Y - # is 1 - exp(-X/Y) - # For version 2, a typical header line might look like this: - # heap profile: 1922: 127792360 [ 1922: 127792360] @ _v2/524288 - # the trailing number (524288) is the sampling rate. (Version 1 showed - # double the 'rate' here) - my $sampling_algorithm = 0; - my $sample_adjustment = 0; - chomp($header); - my $type = "unknown"; - if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") { - if (defined($6) && ($6 ne '')) { - $type = $6; - my $sample_period = $8; - # $type is "heapprofile" for profiles generated by the - # heap-profiler, and either "heap" or "heap_v2" for profiles - # generated by sampling directly within tcmalloc. It can also - # be "growth" for heap-growth profiles. The first is typically - # found for profiles generated locally, and the others for - # remote profiles. - if (($type eq "heapprofile") || ($type !~ /heap/) ) { - # No need to adjust for the sampling rate with heap-profiler-derived data - $sampling_algorithm = 0; - } elsif ($type =~ /_v2/) { - $sampling_algorithm = 2; # version 2 sampling - if (defined($sample_period) && ($sample_period ne '')) { - $sample_adjustment = int($sample_period); - } - } else { - $sampling_algorithm = 1; # version 1 sampling - if (defined($sample_period) && ($sample_period ne '')) { - $sample_adjustment = int($sample_period)/2; - } - } - } else { - # We detect whether or not this is a remote-heap profile by checking - # that the total-allocated stats ($n2,$s2) are exactly the - # same as the in-use stats ($n1,$s1). It is remotely conceivable - # that a non-remote-heap profile may pass this check, but it is hard - # to imagine how that could happen. - # In this case it's so old it's guaranteed to be remote-heap version 1. - my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); - if (($n1 == $n2) && ($s1 == $s2)) { - # This is likely to be a remote-heap based sample profile - $sampling_algorithm = 1; - } - } - } - - if ($sampling_algorithm > 0) { - # For remote-heap generated profiles, adjust the counts and sizes to - # account for the sample rate (we sample once every 128KB by default). - if ($sample_adjustment == 0) { - # Turn on profile adjustment. - $sample_adjustment = 128*1024; - print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n"; - } else { - printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n", - $sample_adjustment); - } - if ($sampling_algorithm > 1) { - # We don't bother printing anything for the original version (version 1) - printf STDERR "Heap version $sampling_algorithm\n"; - } - } - - my $profile = {}; - my $pcs = {}; - my $map = ""; - - while () { - s/\r//g; # turn windows-looking lines into unix-looking lines - if (/^MAPPED_LIBRARIES:/) { - # Read the /proc/self/maps data - while () { - s/\r//g; # turn windows-looking lines into unix-looking lines - $map .= $_; - } - last; - } - - if (/^--- Memory map:/) { - # Read /proc/self/maps data as formatted by DumpAddressMap() - my $buildvar = ""; - while () { - s/\r//g; # turn windows-looking lines into unix-looking lines - # Parse "build=" specification if supplied - if (m/^\s*build=(.*)\n/) { - $buildvar = $1; - } - - # Expand "$build" variable if available - $_ =~ s/\$build\b/$buildvar/g; - - $map .= $_; - } - last; - } - - # Read entry of the form: - # : [: ] @ a1 a2 a3 ... an - s/^\s*//; - s/\s*$//; - if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) { - my $stack = $5; - my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); - - if ($sample_adjustment) { - if ($sampling_algorithm == 2) { - # Remote-heap version 2 - # The sampling frequency is the rate of a Poisson process. - # This means that the probability of sampling an allocation of - # size X with sampling rate Y is 1 - exp(-X/Y) - if ($n1 != 0) { - my $ratio = (($s1*1.0)/$n1)/($sample_adjustment); - my $scale_factor = 1/(1 - exp(-$ratio)); - $n1 *= $scale_factor; - $s1 *= $scale_factor; - } - if ($n2 != 0) { - my $ratio = (($s2*1.0)/$n2)/($sample_adjustment); - my $scale_factor = 1/(1 - exp(-$ratio)); - $n2 *= $scale_factor; - $s2 *= $scale_factor; - } - } else { - # Remote-heap version 1 - my $ratio; - $ratio = (($s1*1.0)/$n1)/($sample_adjustment); - if ($ratio < 1) { - $n1 /= $ratio; - $s1 /= $ratio; - } - $ratio = (($s2*1.0)/$n2)/($sample_adjustment); - if ($ratio < 1) { - $n2 /= $ratio; - $s2 /= $ratio; - } - } - } - - my @counts = ($n1, $s1, $n2, $s2); - AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); - } - } - - my $r = {}; - $r->{version} = "heap"; - $r->{period} = 1; - $r->{profile} = $profile; - $r->{libs} = ParseLibraries($prog, $map, $pcs); - $r->{pcs} = $pcs; - return $r; -} - -sub ReadSynchProfile { - my $prog = shift; - local *PROFILE = shift; - my $header = shift; - - my $map = ''; - my $profile = {}; - my $pcs = {}; - my $sampling_period = 1; - my $cyclespernanosec = 2.8; # Default assumption for old binaries - my $seen_clockrate = 0; - my $line; - - my $index = 0; - if ($main::opt_total_delay) { - $index = 0; - } elsif ($main::opt_contentions) { - $index = 1; - } elsif ($main::opt_mean_delay) { - $index = 2; - } - - while ( $line = ) { - $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines - if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) { - my ($cycles, $count, $stack) = ($1, $2, $3); - - # Convert cycles to nanoseconds - $cycles /= $cyclespernanosec; - - # Adjust for sampling done by application - $cycles *= $sampling_period; - $count *= $sampling_period; - - my @values = ($cycles, $count, $cycles / $count); - AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]); - - } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ || - $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) { - my ($cycles, $stack) = ($1, $2); - if ($cycles !~ /^\d+$/) { - next; - } - - # Convert cycles to nanoseconds - $cycles /= $cyclespernanosec; - - # Adjust for sampling done by application - $cycles *= $sampling_period; - - AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles); - - } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) { - my ($variable, $value) = ($1,$2); - for ($variable, $value) { - s/^\s+//; - s/\s+$//; - } - if ($variable eq "cycles/second") { - $cyclespernanosec = $value / 1e9; - $seen_clockrate = 1; - } elsif ($variable eq "sampling period") { - $sampling_period = $value; - } elsif ($variable eq "ms since reset") { - # Currently nothing is done with this value in pprof - # So we just silently ignore it for now - } elsif ($variable eq "discarded samples") { - # Currently nothing is done with this value in pprof - # So we just silently ignore it for now - } else { - printf STDERR ("Ignoring unnknown variable in /contention output: " . - "'%s' = '%s'\n",$variable,$value); - } - } else { - # Memory map entry - $map .= $line; - } - } - - if (!$seen_clockrate) { - printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n", - $cyclespernanosec); - } - - my $r = {}; - $r->{version} = 0; - $r->{period} = $sampling_period; - $r->{profile} = $profile; - $r->{libs} = ParseLibraries($prog, $map, $pcs); - $r->{pcs} = $pcs; - return $r; -} - -# Given a hex value in the form "0x1abcd" or "1abcd", return either -# "0001abcd" or "000000000001abcd", depending on the current (global) -# address length. -sub HexExtend { - my $addr = shift; - - $addr =~ s/^(0x)?0*//; - my $zeros_needed = $address_length - length($addr); - if ($zeros_needed < 0) { - printf STDERR "Warning: address $addr is longer than address length $address_length\n"; - return $addr; - } - return ("0" x $zeros_needed) . $addr; -} - -##### Symbol extraction ##### - -# Aggressively search the lib_prefix values for the given library -# If all else fails, just return the name of the library unmodified. -# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so" -# it will search the following locations in this order, until it finds a file: -# /my/path/lib/dir/mylib.so -# /other/path/lib/dir/mylib.so -# /my/path/dir/mylib.so -# /other/path/dir/mylib.so -# /my/path/mylib.so -# /other/path/mylib.so -# /lib/dir/mylib.so (returned as last resort) -sub FindLibrary { - my $file = shift; - my $suffix = $file; - - # Search for the library as described above - do { - foreach my $prefix (@prefix_list) { - my $fullpath = $prefix . $suffix; - if (-e $fullpath) { - return $fullpath; - } - } - } while ($suffix =~ s|^/[^/]+/|/|); - return $file; -} - -# Return path to library with debugging symbols. -# For libc libraries, the copy in /usr/lib/debug contains debugging symbols -sub DebuggingLibrary { - my $file = shift; - if ($file =~ m|^/|) { - if (-f "/usr/lib/debug$file") { - return "/usr/lib/debug$file"; - } elsif (-f "/usr/lib/debug$file.debug") { - return "/usr/lib/debug$file.debug"; - } - } - return undef; -} - -# Parse text section header of a library using objdump -sub ParseTextSectionHeaderFromObjdump { - my $lib = shift; - - my $size = undef; - my $vma; - my $file_offset; - # Get objdump output from the library file to figure out how to - # map between mapped addresses and addresses in the library. - my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib); - open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); - while () { - s/\r//g; # turn windows-looking lines into unix-looking lines - # Idx Name Size VMA LMA File off Algn - # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4 - # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file - # offset may still be 8. But AddressSub below will still handle that. - my @x = split; - if (($#x >= 6) && ($x[1] eq '.text')) { - $size = $x[2]; - $vma = $x[3]; - $file_offset = $x[5]; - last; - } - } - close(OBJDUMP); - - if (!defined($size)) { - return undef; - } - - my $r = {}; - $r->{size} = $size; - $r->{vma} = $vma; - $r->{file_offset} = $file_offset; - - return $r; -} - -# Parse text section header of a library using otool (on OS X) -sub ParseTextSectionHeaderFromOtool { - my $lib = shift; - - my $size = undef; - my $vma = undef; - my $file_offset = undef; - # Get otool output from the library file to figure out how to - # map between mapped addresses and addresses in the library. - my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib); - open(OTOOL, "$command |") || error("$command: $!\n"); - my $cmd = ""; - my $sectname = ""; - my $segname = ""; - foreach my $line () { - $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines - # Load command <#> - # cmd LC_SEGMENT - # [...] - # Section - # sectname __text - # segname __TEXT - # addr 0x000009f8 - # size 0x00018b9e - # offset 2552 - # align 2^2 (4) - # We will need to strip off the leading 0x from the hex addresses, - # and convert the offset into hex. - if ($line =~ /Load command/) { - $cmd = ""; - $sectname = ""; - $segname = ""; - } elsif ($line =~ /Section/) { - $sectname = ""; - $segname = ""; - } elsif ($line =~ /cmd (\w+)/) { - $cmd = $1; - } elsif ($line =~ /sectname (\w+)/) { - $sectname = $1; - } elsif ($line =~ /segname (\w+)/) { - $segname = $1; - } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") && - $sectname eq "__text" && - $segname eq "__TEXT")) { - next; - } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) { - $vma = $1; - } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) { - $size = $1; - } elsif ($line =~ /\boffset ([0-9]+)/) { - $file_offset = sprintf("%016x", $1); - } - if (defined($vma) && defined($size) && defined($file_offset)) { - last; - } - } - close(OTOOL); - - if (!defined($vma) || !defined($size) || !defined($file_offset)) { - return undef; - } - - my $r = {}; - $r->{size} = $size; - $r->{vma} = $vma; - $r->{file_offset} = $file_offset; - - return $r; -} - -sub ParseTextSectionHeader { - # obj_tool_map("otool") is only defined if we're in a Mach-O environment - if (defined($obj_tool_map{"otool"})) { - my $r = ParseTextSectionHeaderFromOtool(@_); - if (defined($r)){ - return $r; - } - } - # If otool doesn't work, or we don't have it, fall back to objdump - return ParseTextSectionHeaderFromObjdump(@_); -} - -# Split /proc/pid/maps dump into a list of libraries -sub ParseLibraries { - return if $main::use_symbol_page; # We don't need libraries info. - my $prog = shift; - my $map = shift; - my $pcs = shift; - - my $result = []; - my $h = "[a-f0-9]+"; - my $zero_offset = HexExtend("0"); - - my $buildvar = ""; - foreach my $l (split("\n", $map)) { - if ($l =~ m/^\s*build=(.*)$/) { - $buildvar = $1; - } - - my $start; - my $finish; - my $offset; - my $lib; - if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) { - # Full line from /proc/self/maps. Example: - # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so - $start = HexExtend($1); - $finish = HexExtend($2); - $offset = HexExtend($3); - $lib = $4; - $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths - } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) { - # Cooked line from DumpAddressMap. Example: - # 40000000-40015000: /lib/ld-2.3.2.so - $start = HexExtend($1); - $finish = HexExtend($2); - $offset = $zero_offset; - $lib = $3; - } - # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in - # function procfs_doprocmap (sys/fs/procfs/procfs_map.c) - # - # Example: - # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s - # o.1 NCH -1 - elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) { - $start = HexExtend($1); - $finish = HexExtend($2); - $offset = $zero_offset; - $lib = FindLibrary($5); - - } else { - next; - } - - # Expand "$build" variable if available - $lib =~ s/\$build\b/$buildvar/g; - - $lib = FindLibrary($lib); - - # Check for pre-relocated libraries, which use pre-relocated symbol tables - # and thus require adjusting the offset that we'll use to translate - # VM addresses into symbol table addresses. - # Only do this if we're not going to fetch the symbol table from a - # debugging copy of the library. - if (!DebuggingLibrary($lib)) { - my $text = ParseTextSectionHeader($lib); - if (defined($text)) { - my $vma_offset = AddressSub($text->{vma}, $text->{file_offset}); - $offset = AddressAdd($offset, $vma_offset); - } - } - - if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; } - push(@{$result}, [$lib, $start, $finish, $offset]); - } - - # Append special entry for additional library (not relocated) - if ($main::opt_lib ne "") { - my $text = ParseTextSectionHeader($main::opt_lib); - if (defined($text)) { - my $start = $text->{vma}; - my $finish = AddressAdd($start, $text->{size}); - - push(@{$result}, [$main::opt_lib, $start, $finish, $start]); - } - } - - # Append special entry for the main program. This covers - # 0..max_pc_value_seen, so that we assume pc values not found in one - # of the library ranges will be treated as coming from the main - # program binary. - my $min_pc = HexExtend("0"); - my $max_pc = $min_pc; # find the maximal PC value in any sample - foreach my $pc (keys(%{$pcs})) { - if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); } - } - push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]); - - return $result; -} - -# Add two hex addresses of length $address_length. -# Run pprof --test for unit test if this is changed. -sub AddressAdd { - my $addr1 = shift; - my $addr2 = shift; - my $sum; - - if ($address_length == 8) { - # Perl doesn't cope with wraparound arithmetic, so do it explicitly: - $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16); - return sprintf("%08x", $sum); - - } else { - # Do the addition in 7-nibble chunks to trivialize carry handling. - - if ($main::opt_debug and $main::opt_test) { - print STDERR "AddressAdd $addr1 + $addr2 = "; - } - - my $a1 = substr($addr1,-7); - $addr1 = substr($addr1,0,-7); - my $a2 = substr($addr2,-7); - $addr2 = substr($addr2,0,-7); - $sum = hex($a1) + hex($a2); - my $c = 0; - if ($sum > 0xfffffff) { - $c = 1; - $sum -= 0x10000000; - } - my $r = sprintf("%07x", $sum); - - $a1 = substr($addr1,-7); - $addr1 = substr($addr1,0,-7); - $a2 = substr($addr2,-7); - $addr2 = substr($addr2,0,-7); - $sum = hex($a1) + hex($a2) + $c; - $c = 0; - if ($sum > 0xfffffff) { - $c = 1; - $sum -= 0x10000000; - } - $r = sprintf("%07x", $sum) . $r; - - $sum = hex($addr1) + hex($addr2) + $c; - if ($sum > 0xff) { $sum -= 0x100; } - $r = sprintf("%02x", $sum) . $r; - - if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; } - - return $r; - } -} - - -# Subtract two hex addresses of length $address_length. -# Run pprof --test for unit test if this is changed. -sub AddressSub { - my $addr1 = shift; - my $addr2 = shift; - my $diff; - - if ($address_length == 8) { - # Perl doesn't cope with wraparound arithmetic, so do it explicitly: - $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16); - return sprintf("%08x", $diff); - - } else { - # Do the addition in 7-nibble chunks to trivialize borrow handling. - # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; } - - my $a1 = hex(substr($addr1,-7)); - $addr1 = substr($addr1,0,-7); - my $a2 = hex(substr($addr2,-7)); - $addr2 = substr($addr2,0,-7); - my $b = 0; - if ($a2 > $a1) { - $b = 1; - $a1 += 0x10000000; - } - $diff = $a1 - $a2; - my $r = sprintf("%07x", $diff); - - $a1 = hex(substr($addr1,-7)); - $addr1 = substr($addr1,0,-7); - $a2 = hex(substr($addr2,-7)) + $b; - $addr2 = substr($addr2,0,-7); - $b = 0; - if ($a2 > $a1) { - $b = 1; - $a1 += 0x10000000; - } - $diff = $a1 - $a2; - $r = sprintf("%07x", $diff) . $r; - - $a1 = hex($addr1); - $a2 = hex($addr2) + $b; - if ($a2 > $a1) { $a1 += 0x100; } - $diff = $a1 - $a2; - $r = sprintf("%02x", $diff) . $r; - - # if ($main::opt_debug) { print STDERR "$r\n"; } - - return $r; - } -} - -# Increment a hex addresses of length $address_length. -# Run pprof --test for unit test if this is changed. -sub AddressInc { - my $addr = shift; - my $sum; - - if ($address_length == 8) { - # Perl doesn't cope with wraparound arithmetic, so do it explicitly: - $sum = (hex($addr)+1) % (0x10000000 * 16); - return sprintf("%08x", $sum); - - } else { - # Do the addition in 7-nibble chunks to trivialize carry handling. - # We are always doing this to step through the addresses in a function, - # and will almost never overflow the first chunk, so we check for this - # case and exit early. - - # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; } - - my $a1 = substr($addr,-7); - $addr = substr($addr,0,-7); - $sum = hex($a1) + 1; - my $r = sprintf("%07x", $sum); - if ($sum <= 0xfffffff) { - $r = $addr . $r; - # if ($main::opt_debug) { print STDERR "$r\n"; } - return HexExtend($r); - } else { - $r = "0000000"; - } - - $a1 = substr($addr,-7); - $addr = substr($addr,0,-7); - $sum = hex($a1) + 1; - $r = sprintf("%07x", $sum) . $r; - if ($sum <= 0xfffffff) { - $r = $addr . $r; - # if ($main::opt_debug) { print STDERR "$r\n"; } - return HexExtend($r); - } else { - $r = "00000000000000"; - } - - $sum = hex($addr) + 1; - if ($sum > 0xff) { $sum -= 0x100; } - $r = sprintf("%02x", $sum) . $r; - - # if ($main::opt_debug) { print STDERR "$r\n"; } - return $r; - } -} - -# Extract symbols for all PC values found in profile -sub ExtractSymbols { - my $libs = shift; - my $pcset = shift; - - my $symbols = {}; - - # Map each PC value to the containing library. To make this faster, - # we sort libraries by their starting pc value (highest first), and - # advance through the libraries as we advance the pc. Sometimes the - # addresses of libraries may overlap with the addresses of the main - # binary, so to make sure the libraries 'win', we iterate over the - # libraries in reverse order (which assumes the binary doesn't start - # in the middle of a library, which seems a fair assumption). - my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings - foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) { - my $libname = $lib->[0]; - my $start = $lib->[1]; - my $finish = $lib->[2]; - my $offset = $lib->[3]; - - # Use debug library if it exists - my $debug_libname = DebuggingLibrary($libname); - if ($debug_libname) { - $libname = $debug_libname; - } - - # Get list of pcs that belong in this library. - my $contained = []; - my ($start_pc_index, $finish_pc_index); - # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index]. - for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0; - $finish_pc_index--) { - last if $pcs[$finish_pc_index - 1] le $finish; - } - # Find smallest start_pc_index such that $start <= $pc[$start_pc_index]. - for ($start_pc_index = $finish_pc_index; $start_pc_index > 0; - $start_pc_index--) { - last if $pcs[$start_pc_index - 1] lt $start; - } - # This keeps PC values higher than $pc[$finish_pc_index] in @pcs, - # in case there are overlaps in libraries and the main binary. - @{$contained} = splice(@pcs, $start_pc_index, - $finish_pc_index - $start_pc_index); - # Map to symbols - MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols); - } - - return $symbols; -} - -# Map list of PC values to symbols for a given image -sub MapToSymbols { - my $image = shift; - my $offset = shift; - my $pclist = shift; - my $symbols = shift; - - my $debug = 0; - - # Ignore empty binaries - if ($#{$pclist} < 0) { return; } - - # Figure out the addr2line command to use - my $addr2line = $obj_tool_map{"addr2line"}; - my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image); - if (exists $obj_tool_map{"addr2line_pdb"}) { - $addr2line = $obj_tool_map{"addr2line_pdb"}; - $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image); - } - - # If "addr2line" isn't installed on the system at all, just use - # nm to get what info we can (function names, but not line numbers). - if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) { - MapSymbolsWithNM($image, $offset, $pclist, $symbols); - return; - } - - # "addr2line -i" can produce a variable number of lines per input - # address, with no separator that allows us to tell when data for - # the next address starts. So we find the address for a special - # symbol (_fini) and interleave this address between all real - # addresses passed to addr2line. The name of this special symbol - # can then be used as a separator. - $sep_address = undef; # May be filled in by MapSymbolsWithNM() - my $nm_symbols = {}; - MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols); - if (defined($sep_address)) { - # Only add " -i" to addr2line if the binary supports it. - # addr2line --help returns 0, but not if it sees an unknown flag first. - if (system("$cmd -i --help >$dev_null 2>&1") == 0) { - $cmd .= " -i"; - } else { - $sep_address = undef; # no need for sep_address if we don't support -i - } - } - - # Make file with all PC values with intervening 'sep_address' so - # that we can reliably detect the end of inlined function list - open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n"); - if ($debug) { print("---- $image ---\n"); } - for (my $i = 0; $i <= $#{$pclist}; $i++) { - # addr2line always reads hex addresses, and does not need '0x' prefix. - if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); } - printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset)); - if (defined($sep_address)) { - printf ADDRESSES ("%s\n", $sep_address); - } - } - close(ADDRESSES); - if ($debug) { - print("----\n"); - system("cat", $main::tmpfile_sym); - print("----\n"); - system("$cmd < " . ShellEscape($main::tmpfile_sym)); - print("----\n"); - } - - open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |") - || error("$cmd: $!\n"); - my $count = 0; # Index in pclist - while () { - # Read fullfunction and filelineinfo from next pair of lines - s/\r?\n$//g; - my $fullfunction = $_; - $_ = ; - s/\r?\n$//g; - my $filelinenum = $_; - - if (defined($sep_address) && $fullfunction eq $sep_symbol) { - # Terminating marker for data for this address - $count++; - next; - } - - $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths - - my $pcstr = $pclist->[$count]; - my $function = ShortFunctionName($fullfunction); - my $nms = $nm_symbols->{$pcstr}; - if (defined($nms)) { - if ($fullfunction eq '??') { - # nm found a symbol for us. - $function = $nms->[0]; - $fullfunction = $nms->[2]; - } else { - # MapSymbolsWithNM tags each routine with its starting address, - # useful in case the image has multiple occurrences of this - # routine. (It uses a syntax that resembles template paramters, - # that are automatically stripped out by ShortFunctionName().) - # addr2line does not provide the same information. So we check - # if nm disambiguated our symbol, and if so take the annotated - # (nm) version of the routine-name. TODO(csilvers): this won't - # catch overloaded, inlined symbols, which nm doesn't see. - # Better would be to do a check similar to nm's, in this fn. - if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn - $function = $nms->[0]; - $fullfunction = $nms->[2]; - } - } - } - - # Prepend to accumulated symbols for pcstr - # (so that caller comes before callee) - my $sym = $symbols->{$pcstr}; - if (!defined($sym)) { - $sym = []; - $symbols->{$pcstr} = $sym; - } - unshift(@{$sym}, $function, $filelinenum, $fullfunction); - if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); } - if (!defined($sep_address)) { - # Inlining is off, so this entry ends immediately - $count++; - } - } - close(SYMBOLS); -} - -# Use nm to map the list of referenced PCs to symbols. Return true iff we -# are able to read procedure information via nm. -sub MapSymbolsWithNM { - my $image = shift; - my $offset = shift; - my $pclist = shift; - my $symbols = shift; - - # Get nm output sorted by increasing address - my $symbol_table = GetProcedureBoundaries($image, "."); - if (!%{$symbol_table}) { - return 0; - } - # Start addresses are already the right length (8 or 16 hex digits). - my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] } - keys(%{$symbol_table}); - - if ($#names < 0) { - # No symbols: just use addresses - foreach my $pc (@{$pclist}) { - my $pcstr = "0x" . $pc; - $symbols->{$pc} = [$pcstr, "?", $pcstr]; - } - return 0; - } - - # Sort addresses so we can do a join against nm output - my $index = 0; - my $fullname = $names[0]; - my $name = ShortFunctionName($fullname); - foreach my $pc (sort { $a cmp $b } @{$pclist}) { - # Adjust for mapped offset - my $mpc = AddressSub($pc, $offset); - while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){ - $index++; - $fullname = $names[$index]; - $name = ShortFunctionName($fullname); - } - if ($mpc lt $symbol_table->{$fullname}->[1]) { - $symbols->{$pc} = [$name, "?", $fullname]; - } else { - my $pcstr = "0x" . $pc; - $symbols->{$pc} = [$pcstr, "?", $pcstr]; - } - } - return 1; -} - -sub ShortFunctionName { - my $function = shift; - while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types - while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments - $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type - return $function; -} - -# Trim overly long symbols found in disassembler output -sub CleanDisassembly { - my $d = shift; - while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax) - while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments - return $d; -} - -# Clean file name for display -sub CleanFileName { - my ($f) = @_; - $f =~ s|^/proc/self/cwd/||; - $f =~ s|^\./||; - return $f; -} - -# Make address relative to section and clean up for display -sub UnparseAddress { - my ($offset, $address) = @_; - $address = AddressSub($address, $offset); - $address =~ s/^0x//; - $address =~ s/^0*//; - return $address; -} - -##### Miscellaneous ##### - -# Find the right versions of the above object tools to use. The -# argument is the program file being analyzed, and should be an ELF -# 32-bit or ELF 64-bit executable file. The location of the tools -# is determined by considering the following options in this order: -# 1) --tools option, if set -# 2) PPROF_TOOLS environment variable, if set -# 3) the environment -sub ConfigureObjTools { - my $prog_file = shift; - - # Check for the existence of $prog_file because /usr/bin/file does not - # predictably return error status in prod. - (-e $prog_file) || error("$prog_file does not exist.\n"); - - my $file_type = undef; - if (-e "/usr/bin/file") { - # Follow symlinks (at least for systems where "file" supports that). - my $escaped_prog_file = ShellEscape($prog_file); - $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null || - /usr/bin/file $escaped_prog_file`; - } elsif ($^O == "MSWin32") { - $file_type = "MS Windows"; - } else { - print STDERR "WARNING: Can't determine the file type of $prog_file"; - } - - if ($file_type =~ /64-bit/) { - # Change $address_length to 16 if the program file is ELF 64-bit. - # We can't detect this from many (most?) heap or lock contention - # profiles, since the actual addresses referenced are generally in low - # memory even for 64-bit programs. - $address_length = 16; - } - - if ($file_type =~ /MS Windows/) { - # For windows, we provide a version of nm and addr2line as part of - # the opensource release, which is capable of parsing - # Windows-style PDB executables. It should live in the path, or - # in the same directory as pprof. - $obj_tool_map{"nm_pdb"} = "nm-pdb"; - $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb"; - } - - if ($file_type =~ /Mach-O/) { - # OS X uses otool to examine Mach-O files, rather than objdump. - $obj_tool_map{"otool"} = "otool"; - $obj_tool_map{"addr2line"} = "false"; # no addr2line - $obj_tool_map{"objdump"} = "false"; # no objdump - } - - # Go fill in %obj_tool_map with the pathnames to use: - foreach my $tool (keys %obj_tool_map) { - $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool}); - } -} - -# Returns the path of a caller-specified object tool. If --tools or -# PPROF_TOOLS are specified, then returns the full path to the tool -# with that prefix. Otherwise, returns the path unmodified (which -# means we will look for it on PATH). -sub ConfigureTool { - my $tool = shift; - my $path; - - # --tools (or $PPROF_TOOLS) is a comma separated list, where each - # item is either a) a pathname prefix, or b) a map of the form - # :. First we look for an entry of type (b) for our - # tool. If one is found, we use it. Otherwise, we consider all the - # pathname prefixes in turn, until one yields an existing file. If - # none does, we use a default path. - my $tools = $main::opt_tools || $ENV{"PPROF_TOOLS"} || ""; - if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) { - $path = $2; - # TODO(csilvers): sanity-check that $path exists? Hard if it's relative. - } elsif ($tools ne '') { - foreach my $prefix (split(',', $tools)) { - next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list - if (-x $prefix . $tool) { - $path = $prefix . $tool; - last; - } - } - if (!$path) { - error("No '$tool' found with prefix specified by " . - "--tools (or \$PPROF_TOOLS) '$tools'\n"); - } - } else { - # ... otherwise use the version that exists in the same directory as - # pprof. If there's nothing there, use $PATH. - $0 =~ m,[^/]*$,; # this is everything after the last slash - my $dirname = $`; # this is everything up to and including the last slash - if (-x "$dirname$tool") { - $path = "$dirname$tool"; - } else { - $path = $tool; - } - } - if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; } - return $path; -} - -sub ShellEscape { - my @escaped_words = (); - foreach my $word (@_) { - my $escaped_word = $word; - if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist - $escaped_word =~ s/'/'\\''/; - $escaped_word = "'$escaped_word'"; - } - push(@escaped_words, $escaped_word); - } - return join(" ", @escaped_words); -} - -sub cleanup { - unlink($main::tmpfile_sym); - unlink(keys %main::tempnames); - - # We leave any collected profiles in $HOME/pprof in case the user wants - # to look at them later. We print a message informing them of this. - if ((scalar(@main::profile_files) > 0) && - defined($main::collected_profile)) { - if (scalar(@main::profile_files) == 1) { - print STDERR "Dynamically gathered profile is in $main::collected_profile\n"; - } - print STDERR "If you want to investigate this profile further, you can do:\n"; - print STDERR "\n"; - print STDERR " pprof \\\n"; - print STDERR " $main::prog \\\n"; - print STDERR " $main::collected_profile\n"; - print STDERR "\n"; - } -} - -sub sighandler { - cleanup(); - exit(1); -} - -sub error { - my $msg = shift; - print STDERR $msg; - cleanup(); - exit(1); -} - - -# Run $nm_command and get all the resulting procedure boundaries whose -# names match "$regexp" and returns them in a hashtable mapping from -# procedure name to a two-element vector of [start address, end address] -sub GetProcedureBoundariesViaNm { - my $escaped_nm_command = shift; # shell-escaped - my $regexp = shift; - - my $symbol_table = {}; - open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n"); - my $last_start = "0"; - my $routine = ""; - while () { - s/\r//g; # turn windows-looking lines into unix-looking lines - if (m/^\s*([0-9a-f]+) (.) (..*)/) { - my $start_val = $1; - my $type = $2; - my $this_routine = $3; - - # It's possible for two symbols to share the same address, if - # one is a zero-length variable (like __start_google_malloc) or - # one symbol is a weak alias to another (like __libc_malloc). - # In such cases, we want to ignore all values except for the - # actual symbol, which in nm-speak has type "T". The logic - # below does this, though it's a bit tricky: what happens when - # we have a series of lines with the same address, is the first - # one gets queued up to be processed. However, it won't - # *actually* be processed until later, when we read a line with - # a different address. That means that as long as we're reading - # lines with the same address, we have a chance to replace that - # item in the queue, which we do whenever we see a 'T' entry -- - # that is, a line with type 'T'. If we never see a 'T' entry, - # we'll just go ahead and process the first entry (which never - # got touched in the queue), and ignore the others. - if ($start_val eq $last_start && $type =~ /t/i) { - # We are the 'T' symbol at this address, replace previous symbol. - $routine = $this_routine; - next; - } elsif ($start_val eq $last_start) { - # We're not the 'T' symbol at this address, so ignore us. - next; - } - - if ($this_routine eq $sep_symbol) { - $sep_address = HexExtend($start_val); - } - - # Tag this routine with the starting address in case the image - # has multiple occurrences of this routine. We use a syntax - # that resembles template parameters that are automatically - # stripped out by ShortFunctionName() - $this_routine .= "<$start_val>"; - - if (defined($routine) && $routine =~ m/$regexp/) { - $symbol_table->{$routine} = [HexExtend($last_start), - HexExtend($start_val)]; - } - $last_start = $start_val; - $routine = $this_routine; - } elsif (m/^Loaded image name: (.+)/) { - # The win32 nm workalike emits information about the binary it is using. - if ($main::opt_debug) { print STDERR "Using Image $1\n"; } - } elsif (m/^PDB file name: (.+)/) { - # The win32 nm workalike emits information about the pdb it is using. - if ($main::opt_debug) { print STDERR "Using PDB $1\n"; } - } - } - close(NM); - # Handle the last line in the nm output. Unfortunately, we don't know - # how big this last symbol is, because we don't know how big the file - # is. For now, we just give it a size of 0. - # TODO(csilvers): do better here. - if (defined($routine) && $routine =~ m/$regexp/) { - $symbol_table->{$routine} = [HexExtend($last_start), - HexExtend($last_start)]; - } - return $symbol_table; -} - -# Gets the procedure boundaries for all routines in "$image" whose names -# match "$regexp" and returns them in a hashtable mapping from procedure -# name to a two-element vector of [start address, end address]. -# Will return an empty map if nm is not installed or not working properly. -sub GetProcedureBoundaries { - my $image = shift; - my $regexp = shift; - - # If $image doesn't start with /, then put ./ in front of it. This works - # around an obnoxious bug in our probing of nm -f behavior. - # "nm -f $image" is supposed to fail on GNU nm, but if: - # - # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND - # b. you have a.out in your current directory (a not uncommon occurence) - # - # then "nm -f $image" succeeds because -f only looks at the first letter of - # the argument, which looks valid because it's [BbSsPp], and then since - # there's no image provided, it looks for a.out and finds it. - # - # This regex makes sure that $image starts with . or /, forcing the -f - # parsing to fail since . and / are not valid formats. - $image =~ s#^[^/]#./$&#; - - # For libc libraries, the copy in /usr/lib/debug contains debugging symbols - my $debugging = DebuggingLibrary($image); - if ($debugging) { - $image = $debugging; - } - - my $nm = $obj_tool_map{"nm"}; - my $cppfilt = $obj_tool_map{"c++filt"}; - - # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm - # binary doesn't support --demangle. In addition, for OS X we need - # to use the -f flag to get 'flat' nm output (otherwise we don't sort - # properly and get incorrect results). Unfortunately, GNU nm uses -f - # in an incompatible way. So first we test whether our nm supports - # --demangle and -f. - my $demangle_flag = ""; - my $cppfilt_flag = ""; - my $to_devnull = ">$dev_null 2>&1"; - if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) { - # In this mode, we do "nm --demangle " - $demangle_flag = "--demangle"; - $cppfilt_flag = ""; - } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) { - # In this mode, we do "nm | c++filt" - $cppfilt_flag = " | " . ShellEscape($cppfilt); - }; - my $flatten_flag = ""; - if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) { - $flatten_flag = "-f"; - } - - # Finally, in the case $imagie isn't a debug library, we try again with - # -D to at least get *exported* symbols. If we can't use --demangle, - # we use c++filt instead, if it exists on this system. - my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag, - $image) . " 2>$dev_null $cppfilt_flag", - ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag, - $image) . " 2>$dev_null $cppfilt_flag", - # 6nm is for Go binaries - ShellEscape("6nm", "$image") . " 2>$dev_null | sort", - ); - - # If the executable is an MS Windows PDB-format executable, we'll - # have set up obj_tool_map("nm_pdb"). In this case, we actually - # want to use both unix nm and windows-specific nm_pdb, since - # PDB-format executables can apparently include dwarf .o files. - if (exists $obj_tool_map{"nm_pdb"}) { - push(@nm_commands, - ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image) - . " 2>$dev_null"); - } - - foreach my $nm_command (@nm_commands) { - my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp); - return $symbol_table if (%{$symbol_table}); - } - my $symbol_table = {}; - return $symbol_table; -} - - -# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings. -# To make them more readable, we add underscores at interesting places. -# This routine removes the underscores, producing the canonical representation -# used by pprof to represent addresses, particularly in the tested routines. -sub CanonicalHex { - my $arg = shift; - return join '', (split '_',$arg); -} - - -# Unit test for AddressAdd: -sub AddressAddUnitTest { - my $test_data_8 = shift; - my $test_data_16 = shift; - my $error_count = 0; - my $fail_count = 0; - my $pass_count = 0; - # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n"; - - # First a few 8-nibble addresses. Note that this implementation uses - # plain old arithmetic, so a quick sanity check along with verifying what - # happens to overflow (we want it to wrap): - $address_length = 8; - foreach my $row (@{$test_data_8}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressAdd ($row->[0], $row->[1]); - if ($sum ne $row->[2]) { - printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, - $row->[0], $row->[1], $row->[2]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count = $fail_count; - $fail_count = 0; - $pass_count = 0; - - # Now 16-nibble addresses. - $address_length = 16; - foreach my $row (@{$test_data_16}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1])); - my $expected = join '', (split '_',$row->[2]); - if ($sum ne CanonicalHex($row->[2])) { - printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, - $row->[0], $row->[1], $row->[2]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count += $fail_count; - - return $error_count; -} - - -# Unit test for AddressSub: -sub AddressSubUnitTest { - my $test_data_8 = shift; - my $test_data_16 = shift; - my $error_count = 0; - my $fail_count = 0; - my $pass_count = 0; - # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n"; - - # First a few 8-nibble addresses. Note that this implementation uses - # plain old arithmetic, so a quick sanity check along with verifying what - # happens to overflow (we want it to wrap): - $address_length = 8; - foreach my $row (@{$test_data_8}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressSub ($row->[0], $row->[1]); - if ($sum ne $row->[3]) { - printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, - $row->[0], $row->[1], $row->[3]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count = $fail_count; - $fail_count = 0; - $pass_count = 0; - - # Now 16-nibble addresses. - $address_length = 16; - foreach my $row (@{$test_data_16}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1])); - if ($sum ne CanonicalHex($row->[3])) { - printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, - $row->[0], $row->[1], $row->[3]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count += $fail_count; - - return $error_count; -} - - -# Unit test for AddressInc: -sub AddressIncUnitTest { - my $test_data_8 = shift; - my $test_data_16 = shift; - my $error_count = 0; - my $fail_count = 0; - my $pass_count = 0; - # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n"; - - # First a few 8-nibble addresses. Note that this implementation uses - # plain old arithmetic, so a quick sanity check along with verifying what - # happens to overflow (we want it to wrap): - $address_length = 8; - foreach my $row (@{$test_data_8}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressInc ($row->[0]); - if ($sum ne $row->[4]) { - printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, - $row->[0], $row->[4]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count = $fail_count; - $fail_count = 0; - $pass_count = 0; - - # Now 16-nibble addresses. - $address_length = 16; - foreach my $row (@{$test_data_16}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressInc (CanonicalHex($row->[0])); - if ($sum ne CanonicalHex($row->[4])) { - printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, - $row->[0], $row->[4]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count += $fail_count; - - return $error_count; -} - - -# Driver for unit tests. -# Currently just the address add/subtract/increment routines for 64-bit. -sub RunUnitTests { - my $error_count = 0; - - # This is a list of tuples [a, b, a+b, a-b, a+1] - my $unit_test_data_8 = [ - [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)], - [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)], - [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)], - [qw(00000001 ffffffff 00000000 00000002 00000002)], - [qw(00000001 fffffff0 fffffff1 00000011 00000002)], - ]; - my $unit_test_data_16 = [ - # The implementation handles data in 7-nibble chunks, so those are the - # interesting boundaries. - [qw(aaaaaaaa 50505050 - 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)], - [qw(50505050 aaaaaaaa - 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)], - [qw(ffffffff aaaaaaaa - 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)], - [qw(00000001 ffffffff - 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)], - [qw(00000001 fffffff0 - 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)], - - [qw(00_a00000a_aaaaaaa 50505050 - 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)], - [qw(0f_fff0005_0505050 aaaaaaaa - 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)], - [qw(00_000000f_fffffff 01_800000a_aaaaaaa - 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)], - [qw(00_0000000_0000001 ff_fffffff_fffffff - 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)], - [qw(00_0000000_0000001 ff_fffffff_ffffff0 - ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)], - ]; - - $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16); - $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16); - $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16); - if ($error_count > 0) { - print STDERR $error_count, " errors: FAILED\n"; - } else { - print STDERR "PASS\n"; - } - exit ($error_count); -} diff --git a/lib/jemalloc-3.6.0/config.guess b/lib/jemalloc-3.6.0/config.guess deleted file mode 100755 index b79252d..0000000 --- a/lib/jemalloc-3.6.0/config.guess +++ /dev/null @@ -1,1558 +0,0 @@ -#! /bin/sh -# Attempt to guess a canonical system name. -# Copyright 1992-2013 Free Software Foundation, Inc. - -timestamp='2013-06-10' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that -# program. This Exception is an additional permission under section 7 -# of the GNU General Public License, version 3 ("GPLv3"). -# -# Originally written by Per Bothner. -# -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD -# -# Please send patches with a ChangeLog entry to config-patches@gnu.org. - - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] - -Output the configuration name of the system \`$me' is run on. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.guess ($timestamp) - -Originally written by Per Bothner. -Copyright 1992-2013 Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" >&2 - exit 1 ;; - * ) - break ;; - esac -done - -if test $# != 0; then - echo "$me: too many arguments$help" >&2 - exit 1 -fi - -trap 'exit 1' 1 2 15 - -# CC_FOR_BUILD -- compiler used by this script. Note that the use of a -# compiler to aid in system detection is discouraged as it requires -# temporary files to be created and, as you can see below, it is a -# headache to deal with in a portable fashion. - -# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still -# use `HOST_CC' if defined, but it is deprecated. - -# Portable tmp directory creation inspired by the Autoconf team. - -set_cc_for_build=' -trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; -trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; -: ${TMPDIR=/tmp} ; - { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || - { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || - { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || - { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; -dummy=$tmp/dummy ; -tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; -case $CC_FOR_BUILD,$HOST_CC,$CC in - ,,) echo "int x;" > $dummy.c ; - for c in cc gcc c89 c99 ; do - if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then - CC_FOR_BUILD="$c"; break ; - fi ; - done ; - if test x"$CC_FOR_BUILD" = x ; then - CC_FOR_BUILD=no_compiler_found ; - fi - ;; - ,,*) CC_FOR_BUILD=$CC ;; - ,*,*) CC_FOR_BUILD=$HOST_CC ;; -esac ; set_cc_for_build= ;' - -# This is needed to find uname on a Pyramid OSx when run in the BSD universe. -# (ghazi@noc.rutgers.edu 1994-08-24) -if (test -f /.attbin/uname) >/dev/null 2>&1 ; then - PATH=$PATH:/.attbin ; export PATH -fi - -UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown -UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown -UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown -UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown - -case "${UNAME_SYSTEM}" in -Linux|GNU|GNU/*) - # If the system lacks a compiler, then just pick glibc. - # We could probably try harder. - LIBC=gnu - - eval $set_cc_for_build - cat <<-EOF > $dummy.c - #include - #if defined(__UCLIBC__) - LIBC=uclibc - #elif defined(__dietlibc__) - LIBC=dietlibc - #else - LIBC=gnu - #endif - EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` - ;; -esac - -# Note: order is significant - the case branches are not exclusive. - -case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in - *:NetBSD:*:*) - # NetBSD (nbsd) targets should (where applicable) match one or - # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, - # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently - # switched to ELF, *-*-netbsd* would select the old - # object file format. This provides both forward - # compatibility and a consistent mechanism for selecting the - # object file format. - # - # Note: NetBSD doesn't particularly care about the vendor - # portion of the name. We always set it to "unknown". - sysctl="sysctl -n hw.machine_arch" - UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || echo unknown)` - case "${UNAME_MACHINE_ARCH}" in - armeb) machine=armeb-unknown ;; - arm*) machine=arm-unknown ;; - sh3el) machine=shl-unknown ;; - sh3eb) machine=sh-unknown ;; - sh5el) machine=sh5le-unknown ;; - *) machine=${UNAME_MACHINE_ARCH}-unknown ;; - esac - # The Operating System including object format, if it has switched - # to ELF recently, or will in the future. - case "${UNAME_MACHINE_ARCH}" in - arm*|i386|m68k|ns32k|sh3*|sparc|vax) - eval $set_cc_for_build - if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ELF__ - then - # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). - # Return netbsd for either. FIX? - os=netbsd - else - os=netbsdelf - fi - ;; - *) - os=netbsd - ;; - esac - # The OS release - # Debian GNU/NetBSD machines have a different userland, and - # thus, need a distinct triplet. However, they do not need - # kernel version information, so it can be replaced with a - # suitable tag, in the style of linux-gnu. - case "${UNAME_VERSION}" in - Debian*) - release='-gnu' - ;; - *) - release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` - ;; - esac - # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: - # contains redundant information, the shorter form: - # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}" - exit ;; - *:Bitrig:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} - exit ;; - *:OpenBSD:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} - exit ;; - *:ekkoBSD:*:*) - echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} - exit ;; - *:SolidBSD:*:*) - echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} - exit ;; - macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd${UNAME_RELEASE} - exit ;; - *:MirBSD:*:*) - echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} - exit ;; - alpha:OSF1:*:*) - case $UNAME_RELEASE in - *4.0) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` - ;; - *5.*) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` - ;; - esac - # According to Compaq, /usr/sbin/psrinfo has been available on - # OSF/1 and Tru64 systems produced since 1995. I hope that - # covers most systems running today. This code pipes the CPU - # types through head -n 1, so we only detect the type of CPU 0. - ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` - case "$ALPHA_CPU_TYPE" in - "EV4 (21064)") - UNAME_MACHINE="alpha" ;; - "EV4.5 (21064)") - UNAME_MACHINE="alpha" ;; - "LCA4 (21066/21068)") - UNAME_MACHINE="alpha" ;; - "EV5 (21164)") - UNAME_MACHINE="alphaev5" ;; - "EV5.6 (21164A)") - UNAME_MACHINE="alphaev56" ;; - "EV5.6 (21164PC)") - UNAME_MACHINE="alphapca56" ;; - "EV5.7 (21164PC)") - UNAME_MACHINE="alphapca57" ;; - "EV6 (21264)") - UNAME_MACHINE="alphaev6" ;; - "EV6.7 (21264A)") - UNAME_MACHINE="alphaev67" ;; - "EV6.8CB (21264C)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8AL (21264B)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8CX (21264D)") - UNAME_MACHINE="alphaev68" ;; - "EV6.9A (21264/EV69A)") - UNAME_MACHINE="alphaev69" ;; - "EV7 (21364)") - UNAME_MACHINE="alphaev7" ;; - "EV7.9 (21364A)") - UNAME_MACHINE="alphaev79" ;; - esac - # A Pn.n version is a patched version. - # A Vn.n version is a released version. - # A Tn.n version is a released field test version. - # A Xn.n version is an unreleased experimental baselevel. - # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - # Reset EXIT trap before exiting to avoid spurious non-zero exit code. - exitcode=$? - trap '' 0 - exit $exitcode ;; - Alpha\ *:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # Should we change UNAME_MACHINE based on the output of uname instead - # of the specific Alpha model? - echo alpha-pc-interix - exit ;; - 21064:Windows_NT:50:3) - echo alpha-dec-winnt3.5 - exit ;; - Amiga*:UNIX_System_V:4.0:*) - echo m68k-unknown-sysv4 - exit ;; - *:[Aa]miga[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-amigaos - exit ;; - *:[Mm]orph[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-morphos - exit ;; - *:OS/390:*:*) - echo i370-ibm-openedition - exit ;; - *:z/VM:*:*) - echo s390-ibm-zvmoe - exit ;; - *:OS400:*:*) - echo powerpc-ibm-os400 - exit ;; - arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix${UNAME_RELEASE} - exit ;; - arm*:riscos:*:*|arm*:RISCOS:*:*) - echo arm-unknown-riscos - exit ;; - SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) - echo hppa1.1-hitachi-hiuxmpp - exit ;; - Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) - # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. - if test "`(/bin/universe) 2>/dev/null`" = att ; then - echo pyramid-pyramid-sysv3 - else - echo pyramid-pyramid-bsd - fi - exit ;; - NILE*:*:*:dcosx) - echo pyramid-pyramid-svr4 - exit ;; - DRS?6000:unix:4.0:6*) - echo sparc-icl-nx6 - exit ;; - DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) - case `/usr/bin/uname -p` in - sparc) echo sparc-icl-nx7; exit ;; - esac ;; - s390x:SunOS:*:*) - echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux${UNAME_RELEASE} - exit ;; - i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) - eval $set_cc_for_build - SUN_ARCH="i386" - # If there is a compiler, see if it is configured for 64-bit objects. - # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. - # This test works for both compilers. - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - SUN_ARCH="x86_64" - fi - fi - echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:6*:*) - # According to config.sub, this is the proper way to canonicalize - # SunOS6. Hard to guess exactly what SunOS6 will be like, but - # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:*:*) - case "`/usr/bin/arch -k`" in - Series*|S4*) - UNAME_RELEASE=`uname -v` - ;; - esac - # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` - exit ;; - sun3*:SunOS:*:*) - echo m68k-sun-sunos${UNAME_RELEASE} - exit ;; - sun*:*:4.2BSD:*) - UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 - case "`/bin/arch`" in - sun3) - echo m68k-sun-sunos${UNAME_RELEASE} - ;; - sun4) - echo sparc-sun-sunos${UNAME_RELEASE} - ;; - esac - exit ;; - aushp:SunOS:*:*) - echo sparc-auspex-sunos${UNAME_RELEASE} - exit ;; - # The situation for MiNT is a little confusing. The machine name - # can be virtually everything (everything which is not - # "atarist" or "atariste" at least should have a processor - # > m68000). The system name ranges from "MiNT" over "FreeMiNT" - # to the lowercase version "mint" (or "freemint"). Finally - # the system name "TOS" denotes a system which is actually not - # MiNT. But MiNT is downward compatible to TOS, so this should - # be no problem. - atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint${UNAME_RELEASE} - exit ;; - hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint${UNAME_RELEASE} - exit ;; - *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint${UNAME_RELEASE} - exit ;; - m68k:machten:*:*) - echo m68k-apple-machten${UNAME_RELEASE} - exit ;; - powerpc:machten:*:*) - echo powerpc-apple-machten${UNAME_RELEASE} - exit ;; - RISC*:Mach:*:*) - echo mips-dec-mach_bsd4.3 - exit ;; - RISC*:ULTRIX:*:*) - echo mips-dec-ultrix${UNAME_RELEASE} - exit ;; - VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix${UNAME_RELEASE} - exit ;; - 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix${UNAME_RELEASE} - exit ;; - mips:*:*:UMIPS | mips:*:*:RISCos) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c -#ifdef __cplusplus -#include /* for printf() prototype */ - int main (int argc, char *argv[]) { -#else - int main (argc, argv) int argc; char *argv[]; { -#endif - #if defined (host_mips) && defined (MIPSEB) - #if defined (SYSTYPE_SYSV) - printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_SVR4) - printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) - printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); - #endif - #endif - exit (-1); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && - dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && - SYSTEM_NAME=`$dummy $dummyarg` && - { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos${UNAME_RELEASE} - exit ;; - Motorola:PowerMAX_OS:*:*) - echo powerpc-motorola-powermax - exit ;; - Motorola:*:4.3:PL8-*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:Power_UNIX:*:*) - echo powerpc-harris-powerunix - exit ;; - m88k:CX/UX:7*:*) - echo m88k-harris-cxux7 - exit ;; - m88k:*:4*:R4*) - echo m88k-motorola-sysv4 - exit ;; - m88k:*:3*:R3*) - echo m88k-motorola-sysv3 - exit ;; - AViiON:dgux:*:*) - # DG/UX returns AViiON for all architectures - UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] - then - if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ - [ ${TARGET_BINARY_INTERFACE}x = x ] - then - echo m88k-dg-dgux${UNAME_RELEASE} - else - echo m88k-dg-dguxbcs${UNAME_RELEASE} - fi - else - echo i586-dg-dgux${UNAME_RELEASE} - fi - exit ;; - M88*:DolphinOS:*:*) # DolphinOS (SVR3) - echo m88k-dolphin-sysv3 - exit ;; - M88*:*:R3*:*) - # Delta 88k system running SVR3 - echo m88k-motorola-sysv3 - exit ;; - XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) - echo m88k-tektronix-sysv3 - exit ;; - Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) - echo m68k-tektronix-bsd - exit ;; - *:IRIX*:*:*) - echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` - exit ;; - ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. - echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id - exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' - i*86:AIX:*:*) - echo i386-ibm-aix - exit ;; - ia64:AIX:*:*) - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} - exit ;; - *:AIX:2:3) - if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - - main() - { - if (!__power_pc()) - exit(1); - puts("powerpc-ibm-aix3.2.5"); - exit(0); - } -EOF - if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` - then - echo "$SYSTEM_NAME" - else - echo rs6000-ibm-aix3.2.5 - fi - elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then - echo rs6000-ibm-aix3.2.4 - else - echo rs6000-ibm-aix3.2 - fi - exit ;; - *:AIX:*:[4567]) - IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` - if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then - IBM_ARCH=rs6000 - else - IBM_ARCH=powerpc - fi - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${IBM_ARCH}-ibm-aix${IBM_REV} - exit ;; - *:AIX:*:*) - echo rs6000-ibm-aix - exit ;; - ibmrt:4.4BSD:*|romp-ibm:BSD:*) - echo romp-ibm-bsd4.4 - exit ;; - ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to - exit ;; # report: romp-ibm BSD 4.3 - *:BOSX:*:*) - echo rs6000-bull-bosx - exit ;; - DPX/2?00:B.O.S.:*:*) - echo m68k-bull-sysv3 - exit ;; - 9000/[34]??:4.3bsd:1.*:*) - echo m68k-hp-bsd - exit ;; - hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) - echo m68k-hp-bsd4.4 - exit ;; - 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - case "${UNAME_MACHINE}" in - 9000/31? ) HP_ARCH=m68000 ;; - 9000/[34]?? ) HP_ARCH=m68k ;; - 9000/[678][0-9][0-9]) - if [ -x /usr/bin/getconf ]; then - sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` - sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "${sc_cpu_version}" in - 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 - 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 - 532) # CPU_PA_RISC2_0 - case "${sc_kernel_bits}" in - 32) HP_ARCH="hppa2.0n" ;; - 64) HP_ARCH="hppa2.0w" ;; - '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 - esac ;; - esac - fi - if [ "${HP_ARCH}" = "" ]; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - - #define _HPUX_SOURCE - #include - #include - - int main () - { - #if defined(_SC_KERNEL_BITS) - long bits = sysconf(_SC_KERNEL_BITS); - #endif - long cpu = sysconf (_SC_CPU_VERSION); - - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1"); break; - case CPU_PA_RISC2_0: - #if defined(_SC_KERNEL_BITS) - switch (bits) - { - case 64: puts ("hppa2.0w"); break; - case 32: puts ("hppa2.0n"); break; - default: puts ("hppa2.0"); break; - } break; - #else /* !defined(_SC_KERNEL_BITS) */ - puts ("hppa2.0"); break; - #endif - default: puts ("hppa1.0"); break; - } - exit (0); - } -EOF - (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` - test -z "$HP_ARCH" && HP_ARCH=hppa - fi ;; - esac - if [ ${HP_ARCH} = "hppa2.0w" ] - then - eval $set_cc_for_build - - # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating - # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler - # generating 64-bit code. GNU and HP use different nomenclature: - # - # $ CC_FOR_BUILD=cc ./config.guess - # => hppa2.0w-hp-hpux11.23 - # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess - # => hppa64-hp-hpux11.23 - - if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | - grep -q __LP64__ - then - HP_ARCH="hppa2.0w" - else - HP_ARCH="hppa64" - fi - fi - echo ${HP_ARCH}-hp-hpux${HPUX_REV} - exit ;; - ia64:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux${HPUX_REV} - exit ;; - 3050*:HI-UX:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - int - main () - { - long cpu = sysconf (_SC_CPU_VERSION); - /* The order matters, because CPU_IS_HP_MC68K erroneously returns - true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct - results, however. */ - if (CPU_IS_PA_RISC (cpu)) - { - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; - case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; - default: puts ("hppa-hitachi-hiuxwe2"); break; - } - } - else if (CPU_IS_HP_MC68K (cpu)) - puts ("m68k-hitachi-hiuxwe2"); - else puts ("unknown-hitachi-hiuxwe2"); - exit (0); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - echo unknown-hitachi-hiuxwe2 - exit ;; - 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) - echo hppa1.1-hp-bsd - exit ;; - 9000/8??:4.3bsd:*:*) - echo hppa1.0-hp-bsd - exit ;; - *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) - echo hppa1.0-hp-mpeix - exit ;; - hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) - echo hppa1.1-hp-osf - exit ;; - hp8??:OSF1:*:*) - echo hppa1.0-hp-osf - exit ;; - i*86:OSF1:*:*) - if [ -x /usr/sbin/sysversion ] ; then - echo ${UNAME_MACHINE}-unknown-osf1mk - else - echo ${UNAME_MACHINE}-unknown-osf1 - fi - exit ;; - parisc*:Lites*:*:*) - echo hppa1.1-hp-lites - exit ;; - C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) - echo c1-convex-bsd - exit ;; - C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) - echo c34-convex-bsd - exit ;; - C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) - echo c38-convex-bsd - exit ;; - C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) - echo c4-convex-bsd - exit ;; - CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*[A-Z]90:*:*:*) - echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ - | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ - -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ - -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*TS:*:*:*) - echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*SV1:*:*:*) - echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) - FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` - echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - 5000:UNIX_System_V:4.*:*) - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` - echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} - exit ;; - sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:BSD/OS:*:*) - echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:FreeBSD:*:*) - UNAME_PROCESSOR=`/usr/bin/uname -p` - case ${UNAME_PROCESSOR} in - amd64) - echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - *) - echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - esac - exit ;; - i*:CYGWIN*:*) - echo ${UNAME_MACHINE}-pc-cygwin - exit ;; - *:MINGW64*:*) - echo ${UNAME_MACHINE}-pc-mingw64 - exit ;; - *:MINGW*:*) - echo ${UNAME_MACHINE}-pc-mingw32 - exit ;; - i*:MSYS*:*) - echo ${UNAME_MACHINE}-pc-msys - exit ;; - i*:windows32*:*) - # uname -m includes "-pc" on this system. - echo ${UNAME_MACHINE}-mingw32 - exit ;; - i*:PW*:*) - echo ${UNAME_MACHINE}-pc-pw32 - exit ;; - *:Interix*:*) - case ${UNAME_MACHINE} in - x86) - echo i586-pc-interix${UNAME_RELEASE} - exit ;; - authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix${UNAME_RELEASE} - exit ;; - IA64) - echo ia64-unknown-interix${UNAME_RELEASE} - exit ;; - esac ;; - [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) - echo i${UNAME_MACHINE}-pc-mks - exit ;; - 8664:Windows_NT:*) - echo x86_64-pc-mks - exit ;; - i*:Windows_NT*:* | Pentium*:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we - # UNAME_MACHINE based on the output of uname instead of i386? - echo i586-pc-interix - exit ;; - i*:UWIN*:*) - echo ${UNAME_MACHINE}-pc-uwin - exit ;; - amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) - echo x86_64-unknown-cygwin - exit ;; - p*:CYGWIN*:*) - echo powerpcle-unknown-cygwin - exit ;; - prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - *:GNU:*:*) - # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` - exit ;; - *:GNU/*:*:*) - # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} - exit ;; - i*86:Minix:*:*) - echo ${UNAME_MACHINE}-pc-minix - exit ;; - aarch64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - aarch64_be:Linux:*:*) - UNAME_MACHINE=aarch64_be - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - alpha:Linux:*:*) - case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in - EV5) UNAME_MACHINE=alphaev5 ;; - EV56) UNAME_MACHINE=alphaev56 ;; - PCA56) UNAME_MACHINE=alphapca56 ;; - PCA57) UNAME_MACHINE=alphapca56 ;; - EV6) UNAME_MACHINE=alphaev6 ;; - EV67) UNAME_MACHINE=alphaev67 ;; - EV68*) UNAME_MACHINE=alphaev68 ;; - esac - objdump --private-headers /bin/sh | grep -q ld.so.1 - if test "$?" = 0 ; then LIBC="gnulibc1" ; fi - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - arc:Linux:*:* | arceb:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - arm*:Linux:*:*) - eval $set_cc_for_build - if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ARM_EABI__ - then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - else - if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ARM_PCS_VFP - then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi - else - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf - fi - fi - exit ;; - avr32*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - cris:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} - exit ;; - crisv32:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} - exit ;; - frv:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - hexagon:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - i*86:Linux:*:*) - echo ${UNAME_MACHINE}-pc-linux-${LIBC} - exit ;; - ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - mips:Linux:*:* | mips64:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #undef CPU - #undef ${UNAME_MACHINE} - #undef ${UNAME_MACHINE}el - #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) - CPU=${UNAME_MACHINE}el - #else - #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) - CPU=${UNAME_MACHINE} - #else - CPU= - #endif - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } - ;; - or1k:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - or32:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - padre:Linux:*:*) - echo sparc-unknown-linux-${LIBC} - exit ;; - parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-${LIBC} - exit ;; - parisc:Linux:*:* | hppa:Linux:*:*) - # Look for CPU level - case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; - PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; - *) echo hppa-unknown-linux-${LIBC} ;; - esac - exit ;; - ppc64:Linux:*:*) - echo powerpc64-unknown-linux-${LIBC} - exit ;; - ppc:Linux:*:*) - echo powerpc-unknown-linux-${LIBC} - exit ;; - ppc64le:Linux:*:*) - echo powerpc64le-unknown-linux-${LIBC} - exit ;; - ppcle:Linux:*:*) - echo powerpcle-unknown-linux-${LIBC} - exit ;; - s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux-${LIBC} - exit ;; - sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - tile*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - vax:Linux:*:*) - echo ${UNAME_MACHINE}-dec-linux-${LIBC} - exit ;; - x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - xtensa*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - i*86:DYNIX/ptx:4*:*) - # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. - # earlier versions are messed up and put the nodename in both - # sysname and nodename. - echo i386-sequent-sysv4 - exit ;; - i*86:UNIX_SV:4.2MP:2.*) - # Unixware is an offshoot of SVR4, but it has its own version - # number series starting with 2... - # I am not positive that other SVR4 systems won't match this, - # I just have to hope. -- rms. - # Use sysv4.2uw... so that sysv4* matches it. - echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} - exit ;; - i*86:OS/2:*:*) - # If we were able to find `uname', then EMX Unix compatibility - # is probably installed. - echo ${UNAME_MACHINE}-pc-os2-emx - exit ;; - i*86:XTS-300:*:STOP) - echo ${UNAME_MACHINE}-unknown-stop - exit ;; - i*86:atheos:*:*) - echo ${UNAME_MACHINE}-unknown-atheos - exit ;; - i*86:syllable:*:*) - echo ${UNAME_MACHINE}-pc-syllable - exit ;; - i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos${UNAME_RELEASE} - exit ;; - i*86:*DOS:*:*) - echo ${UNAME_MACHINE}-pc-msdosdjgpp - exit ;; - i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) - UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` - if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} - else - echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} - fi - exit ;; - i*86:*:5:[678]*) - # UnixWare 7.x, OpenUNIX and OpenServer 6. - case `/bin/uname -X | grep "^Machine"` in - *486*) UNAME_MACHINE=i486 ;; - *Pentium) UNAME_MACHINE=i586 ;; - *Pent*|*Celeron) UNAME_MACHINE=i686 ;; - esac - echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} - exit ;; - i*86:*:3.2:*) - if test -f /usr/options/cb.name; then - UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then - UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` - (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 - (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ - && UNAME_MACHINE=i586 - (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ - && UNAME_MACHINE=i686 - (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ - && UNAME_MACHINE=i686 - echo ${UNAME_MACHINE}-pc-sco$UNAME_REL - else - echo ${UNAME_MACHINE}-pc-sysv32 - fi - exit ;; - pc:*:*:*) - # Left here for compatibility: - # uname -m prints for DJGPP always 'pc', but it prints nothing about - # the processor, so we play safe by assuming i586. - # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that - # this is a cross-build. - echo i586-pc-msdosdjgpp - exit ;; - Intel:Mach:3*:*) - echo i386-pc-mach3 - exit ;; - paragon:*:*:*) - echo i860-intel-osf1 - exit ;; - i860:*:4.*:*) # i860-SVR4 - if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 - else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 - fi - exit ;; - mini*:CTIX:SYS*5:*) - # "miniframe" - echo m68010-convergent-sysv - exit ;; - mc68k:UNIX:SYSTEM5:3.51m) - echo m68k-convergent-sysv - exit ;; - M680?0:D-NIX:5.3:*) - echo m68k-diab-dnix - exit ;; - M68*:*:R3V[5678]*:*) - test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; - 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) - OS_REL='' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4; exit; } ;; - NCR*:*:4.2:* | MPRAS*:*:4.2:*) - OS_REL='.3' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos${UNAME_RELEASE} - exit ;; - mc68030:UNIX_System_V:4.*:*) - echo m68k-atari-sysv4 - exit ;; - TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos${UNAME_RELEASE} - exit ;; - rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos${UNAME_RELEASE} - exit ;; - PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos${UNAME_RELEASE} - exit ;; - SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv${UNAME_RELEASE} - exit ;; - RM*:ReliantUNIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - RM*:SINIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - *:SINIX-*:*:*) - if uname -p 2>/dev/null >/dev/null ; then - UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo ${UNAME_MACHINE}-sni-sysv4 - else - echo ns32k-sni-sysv - fi - exit ;; - PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort - # says - echo i586-unisys-sysv4 - exit ;; - *:UNIX_System_V:4*:FTX*) - # From Gerald Hewes . - # How about differentiating between stratus architectures? -djm - echo hppa1.1-stratus-sysv4 - exit ;; - *:*:*:FTX*) - # From seanf@swdc.stratus.com. - echo i860-stratus-sysv4 - exit ;; - i*86:VOS:*:*) - # From Paul.Green@stratus.com. - echo ${UNAME_MACHINE}-stratus-vos - exit ;; - *:VOS:*:*) - # From Paul.Green@stratus.com. - echo hppa1.1-stratus-vos - exit ;; - mc68*:A/UX:*:*) - echo m68k-apple-aux${UNAME_RELEASE} - exit ;; - news*:NEWS-OS:6*:*) - echo mips-sony-newsos6 - exit ;; - R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) - if [ -d /usr/nec ]; then - echo mips-nec-sysv${UNAME_RELEASE} - else - echo mips-unknown-sysv${UNAME_RELEASE} - fi - exit ;; - BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. - echo powerpc-be-beos - exit ;; - BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. - echo powerpc-apple-beos - exit ;; - BePC:BeOS:*:*) # BeOS running on Intel PC compatible. - echo i586-pc-beos - exit ;; - BePC:Haiku:*:*) # Haiku running on Intel PC compatible. - echo i586-pc-haiku - exit ;; - x86_64:Haiku:*:*) - echo x86_64-unknown-haiku - exit ;; - SX-4:SUPER-UX:*:*) - echo sx4-nec-superux${UNAME_RELEASE} - exit ;; - SX-5:SUPER-UX:*:*) - echo sx5-nec-superux${UNAME_RELEASE} - exit ;; - SX-6:SUPER-UX:*:*) - echo sx6-nec-superux${UNAME_RELEASE} - exit ;; - SX-7:SUPER-UX:*:*) - echo sx7-nec-superux${UNAME_RELEASE} - exit ;; - SX-8:SUPER-UX:*:*) - echo sx8-nec-superux${UNAME_RELEASE} - exit ;; - SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux${UNAME_RELEASE} - exit ;; - Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Rhapsody:*:*) - echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Darwin:*:*) - UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - eval $set_cc_for_build - if test "$UNAME_PROCESSOR" = unknown ; then - UNAME_PROCESSOR=powerpc - fi - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - case $UNAME_PROCESSOR in - i386) UNAME_PROCESSOR=x86_64 ;; - powerpc) UNAME_PROCESSOR=powerpc64 ;; - esac - fi - fi - echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} - exit ;; - *:procnto*:*:* | *:QNX:[0123456789]*:*) - UNAME_PROCESSOR=`uname -p` - if test "$UNAME_PROCESSOR" = "x86"; then - UNAME_PROCESSOR=i386 - UNAME_MACHINE=pc - fi - echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} - exit ;; - *:QNX:*:4*) - echo i386-pc-qnx - exit ;; - NEO-?:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk${UNAME_RELEASE} - exit ;; - NSE-*:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk${UNAME_RELEASE} - exit ;; - NSR-?:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk${UNAME_RELEASE} - exit ;; - *:NonStop-UX:*:*) - echo mips-compaq-nonstopux - exit ;; - BS2000:POSIX*:*:*) - echo bs2000-siemens-sysv - exit ;; - DS/*:UNIX_System_V:*:*) - echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} - exit ;; - *:Plan9:*:*) - # "uname -m" is not consistent, so use $cputype instead. 386 - # is converted to i386 for consistency with other x86 - # operating systems. - if test "$cputype" = "386"; then - UNAME_MACHINE=i386 - else - UNAME_MACHINE="$cputype" - fi - echo ${UNAME_MACHINE}-unknown-plan9 - exit ;; - *:TOPS-10:*:*) - echo pdp10-unknown-tops10 - exit ;; - *:TENEX:*:*) - echo pdp10-unknown-tenex - exit ;; - KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) - echo pdp10-dec-tops20 - exit ;; - XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) - echo pdp10-xkl-tops20 - exit ;; - *:TOPS-20:*:*) - echo pdp10-unknown-tops20 - exit ;; - *:ITS:*:*) - echo pdp10-unknown-its - exit ;; - SEI:*:*:SEIUX) - echo mips-sei-seiux${UNAME_RELEASE} - exit ;; - *:DragonFly:*:*) - echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` - exit ;; - *:*VMS:*:*) - UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "${UNAME_MACHINE}" in - A*) echo alpha-dec-vms ; exit ;; - I*) echo ia64-dec-vms ; exit ;; - V*) echo vax-dec-vms ; exit ;; - esac ;; - *:XENIX:*:SysV) - echo i386-pc-xenix - exit ;; - i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' - exit ;; - i*86:rdos:*:*) - echo ${UNAME_MACHINE}-pc-rdos - exit ;; - i*86:AROS:*:*) - echo ${UNAME_MACHINE}-pc-aros - exit ;; - x86_64:VMkernel:*:*) - echo ${UNAME_MACHINE}-unknown-esx - exit ;; -esac - -eval $set_cc_for_build -cat >$dummy.c < -# include -#endif -main () -{ -#if defined (sony) -#if defined (MIPSEB) - /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, - I don't know.... */ - printf ("mips-sony-bsd\n"); exit (0); -#else -#include - printf ("m68k-sony-newsos%s\n", -#ifdef NEWSOS4 - "4" -#else - "" -#endif - ); exit (0); -#endif -#endif - -#if defined (__arm) && defined (__acorn) && defined (__unix) - printf ("arm-acorn-riscix\n"); exit (0); -#endif - -#if defined (hp300) && !defined (hpux) - printf ("m68k-hp-bsd\n"); exit (0); -#endif - -#if defined (NeXT) -#if !defined (__ARCHITECTURE__) -#define __ARCHITECTURE__ "m68k" -#endif - int version; - version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; - if (version < 4) - printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); - else - printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); - exit (0); -#endif - -#if defined (MULTIMAX) || defined (n16) -#if defined (UMAXV) - printf ("ns32k-encore-sysv\n"); exit (0); -#else -#if defined (CMU) - printf ("ns32k-encore-mach\n"); exit (0); -#else - printf ("ns32k-encore-bsd\n"); exit (0); -#endif -#endif -#endif - -#if defined (__386BSD__) - printf ("i386-pc-bsd\n"); exit (0); -#endif - -#if defined (sequent) -#if defined (i386) - printf ("i386-sequent-dynix\n"); exit (0); -#endif -#if defined (ns32000) - printf ("ns32k-sequent-dynix\n"); exit (0); -#endif -#endif - -#if defined (_SEQUENT_) - struct utsname un; - - uname(&un); - - if (strncmp(un.version, "V2", 2) == 0) { - printf ("i386-sequent-ptx2\n"); exit (0); - } - if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ - printf ("i386-sequent-ptx1\n"); exit (0); - } - printf ("i386-sequent-ptx\n"); exit (0); - -#endif - -#if defined (vax) -# if !defined (ultrix) -# include -# if defined (BSD) -# if BSD == 43 - printf ("vax-dec-bsd4.3\n"); exit (0); -# else -# if BSD == 199006 - printf ("vax-dec-bsd4.3reno\n"); exit (0); -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# endif -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# else - printf ("vax-dec-ultrix\n"); exit (0); -# endif -#endif - -#if defined (alliant) && defined (i860) - printf ("i860-alliant-bsd\n"); exit (0); -#endif - - exit (1); -} -EOF - -$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - -# Apollos put the system type in the environment. - -test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } - -# Convex versions that predate uname can use getsysinfo(1) - -if [ -x /usr/convex/getsysinfo ] -then - case `getsysinfo -f cpu_type` in - c1*) - echo c1-convex-bsd - exit ;; - c2*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - c34*) - echo c34-convex-bsd - exit ;; - c38*) - echo c38-convex-bsd - exit ;; - c4*) - echo c4-convex-bsd - exit ;; - esac -fi - -cat >&2 < in order to provide the needed -information to handle your system. - -config.guess timestamp = $timestamp - -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null` - -hostinfo = `(hostinfo) 2>/dev/null` -/bin/universe = `(/bin/universe) 2>/dev/null` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` -/bin/arch = `(/bin/arch) 2>/dev/null` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` - -UNAME_MACHINE = ${UNAME_MACHINE} -UNAME_RELEASE = ${UNAME_RELEASE} -UNAME_SYSTEM = ${UNAME_SYSTEM} -UNAME_VERSION = ${UNAME_VERSION} -EOF - -exit 1 - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff --git a/lib/jemalloc-3.6.0/config.stamp.in b/lib/jemalloc-3.6.0/config.stamp.in deleted file mode 100644 index e69de29..0000000 diff --git a/lib/jemalloc-3.6.0/config.sub b/lib/jemalloc-3.6.0/config.sub deleted file mode 100755 index 61cb4bc..0000000 --- a/lib/jemalloc-3.6.0/config.sub +++ /dev/null @@ -1,1793 +0,0 @@ -#! /bin/sh -# Configuration validation subroutine script. -# Copyright 1992-2013 Free Software Foundation, Inc. - -timestamp='2013-10-01' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that -# program. This Exception is an additional permission under section 7 -# of the GNU General Public License, version 3 ("GPLv3"). - - -# Please send patches with a ChangeLog entry to config-patches@gnu.org. -# -# Configuration subroutine to validate and canonicalize a configuration type. -# Supply the specified configuration type as an argument. -# If it is invalid, we print an error message on stderr and exit with code 1. -# Otherwise, we print the canonical config type on stdout and succeed. - -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD - -# This file is supposed to be the same for all GNU packages -# and recognize all the CPU types, system types and aliases -# that are meaningful with *any* GNU software. -# Each package is responsible for reporting which valid configurations -# it does not support. The user should be able to distinguish -# a failure to support a valid configuration from a meaningless -# configuration. - -# The goal of this file is to map all the various variations of a given -# machine specification into a single specification in the form: -# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM -# or in some cases, the newer four-part form: -# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM -# It is wrong to echo any other type of specification. - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS - $0 [OPTION] ALIAS - -Canonicalize a configuration name. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.sub ($timestamp) - -Copyright 1992-2013 Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" - exit 1 ;; - - *local*) - # First pass through any local machine types. - echo $1 - exit ;; - - * ) - break ;; - esac -done - -case $# in - 0) echo "$me: missing argument$help" >&2 - exit 1;; - 1) ;; - *) echo "$me: too many arguments$help" >&2 - exit 1;; -esac - -# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). -# Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` -case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | \ - kopensolaris*-gnu* | \ - storm-chaos* | os2-emx* | rtmk-nova*) - os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` - ;; - android-linux) - os=-linux-android - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown - ;; - *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` - else os=; fi - ;; -esac - -### Let's recognize common machines as not being operating systems so -### that things like config.sub decstation-3100 work. We also -### recognize some manufacturers as not being operating systems, so we -### can provide default operating systems below. -case $os in - -sun*os*) - # Prevent following clause from handling this invalid input. - ;; - -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ - -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ - -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ - -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ - -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ - -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ - -apple | -axis | -knuth | -cray | -microblaze*) - os= - basic_machine=$1 - ;; - -bluegene*) - os=-cnk - ;; - -sim | -cisco | -oki | -wec | -winbond) - os= - basic_machine=$1 - ;; - -scout) - ;; - -wrs) - os=-vxworks - basic_machine=$1 - ;; - -chorusos*) - os=-chorusos - basic_machine=$1 - ;; - -chorusrdb) - os=-chorusrdb - basic_machine=$1 - ;; - -hiux*) - os=-hiuxwe2 - ;; - -sco6) - os=-sco5v6 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5) - os=-sco3.2v5 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco4) - os=-sco3.2v4 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2.[4-9]*) - os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2v[4-9]*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5v6*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco*) - os=-sco3.2v2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -udk*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -isc) - os=-isc2.2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -clix*) - basic_machine=clipper-intergraph - ;; - -isc*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -lynx*178) - os=-lynxos178 - ;; - -lynx*5) - os=-lynxos5 - ;; - -lynx*) - os=-lynxos - ;; - -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` - ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` - ;; - -psos*) - os=-psos - ;; - -mint | -mint[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; -esac - -# Decode aliases for certain CPU-COMPANY combinations. -case $basic_machine in - # Recognize the basic CPU types without company name. - # Some are omitted here because they have special meanings below. - 1750a | 580 \ - | a29k \ - | aarch64 | aarch64_be \ - | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ - | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ - | am33_2.0 \ - | arc | arceb \ - | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ - | avr | avr32 \ - | be32 | be64 \ - | bfin \ - | c4x | c8051 | clipper \ - | d10v | d30v | dlx | dsp16xx \ - | epiphany \ - | fido | fr30 | frv \ - | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ - | hexagon \ - | i370 | i860 | i960 | ia64 \ - | ip2k | iq2000 \ - | k1om \ - | le32 | le64 \ - | lm32 \ - | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64el \ - | mips64octeon | mips64octeonel \ - | mips64orion | mips64orionel \ - | mips64r5900 | mips64r5900el \ - | mips64vr | mips64vrel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipsr5900 | mipsr5900el \ - | mipstx39 | mipstx39el \ - | mn10200 | mn10300 \ - | moxie \ - | mt \ - | msp430 \ - | nds32 | nds32le | nds32be \ - | nios | nios2 | nios2eb | nios2el \ - | ns16k | ns32k \ - | open8 \ - | or1k | or32 \ - | pdp10 | pdp11 | pj | pjl \ - | powerpc | powerpc64 | powerpc64le | powerpcle \ - | pyramid \ - | rl78 | rx \ - | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ - | sh64 | sh64le \ - | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ - | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ - | spu \ - | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ - | ubicom32 \ - | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | we32k \ - | x86 | xc16x | xstormy16 | xtensa \ - | z8k | z80) - basic_machine=$basic_machine-unknown - ;; - c54x) - basic_machine=tic54x-unknown - ;; - c55x) - basic_machine=tic55x-unknown - ;; - c6x) - basic_machine=tic6x-unknown - ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) - basic_machine=$basic_machine-unknown - os=-none - ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) - ;; - ms1) - basic_machine=mt-unknown - ;; - - strongarm | thumb | xscale) - basic_machine=arm-unknown - ;; - xgate) - basic_machine=$basic_machine-unknown - os=-none - ;; - xscaleeb) - basic_machine=armeb-unknown - ;; - - xscaleel) - basic_machine=armel-unknown - ;; - - # We use `pc' rather than `unknown' - # because (1) that's what they normally are, and - # (2) the word "unknown" tends to confuse beginning users. - i*86 | x86_64) - basic_machine=$basic_machine-pc - ;; - # Object if more than one company name word. - *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; - # Recognize the basic CPU types with company name. - 580-* \ - | a29k-* \ - | aarch64-* | aarch64_be-* \ - | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ - | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ - | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ - | avr-* | avr32-* \ - | be32-* | be64-* \ - | bfin-* | bs2000-* \ - | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | c8051-* | clipper-* | craynv-* | cydra-* \ - | d10v-* | d30v-* | dlx-* \ - | elxsi-* \ - | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ - | h8300-* | h8500-* \ - | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ - | hexagon-* \ - | i*86-* | i860-* | i960-* | ia64-* \ - | ip2k-* | iq2000-* \ - | k1om-* \ - | le32-* | le64-* \ - | lm32-* \ - | m32c-* | m32r-* | m32rle-* \ - | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ - | microblaze-* | microblazeel-* \ - | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ - | mips16-* \ - | mips64-* | mips64el-* \ - | mips64octeon-* | mips64octeonel-* \ - | mips64orion-* | mips64orionel-* \ - | mips64r5900-* | mips64r5900el-* \ - | mips64vr-* | mips64vrel-* \ - | mips64vr4100-* | mips64vr4100el-* \ - | mips64vr4300-* | mips64vr4300el-* \ - | mips64vr5000-* | mips64vr5000el-* \ - | mips64vr5900-* | mips64vr5900el-* \ - | mipsisa32-* | mipsisa32el-* \ - | mipsisa32r2-* | mipsisa32r2el-* \ - | mipsisa64-* | mipsisa64el-* \ - | mipsisa64r2-* | mipsisa64r2el-* \ - | mipsisa64sb1-* | mipsisa64sb1el-* \ - | mipsisa64sr71k-* | mipsisa64sr71kel-* \ - | mipsr5900-* | mipsr5900el-* \ - | mipstx39-* | mipstx39el-* \ - | mmix-* \ - | mt-* \ - | msp430-* \ - | nds32-* | nds32le-* | nds32be-* \ - | nios-* | nios2-* | nios2eb-* | nios2el-* \ - | none-* | np1-* | ns16k-* | ns32k-* \ - | open8-* \ - | orion-* \ - | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ - | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ - | pyramid-* \ - | rl78-* | romp-* | rs6000-* | rx-* \ - | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ - | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ - | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ - | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ - | tahoe-* \ - | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ - | tile*-* \ - | tron-* \ - | ubicom32-* \ - | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ - | vax-* \ - | we32k-* \ - | x86-* | x86_64-* | xc16x-* | xps100-* \ - | xstormy16-* | xtensa*-* \ - | ymp-* \ - | z8k-* | z80-*) - ;; - # Recognize the basic CPU types without company name, with glob match. - xtensa*) - basic_machine=$basic_machine-unknown - ;; - # Recognize the various machine names and aliases which stand - # for a CPU type and a company and sometimes even an OS. - 386bsd) - basic_machine=i386-unknown - os=-bsd - ;; - 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) - basic_machine=m68000-att - ;; - 3b*) - basic_machine=we32k-att - ;; - a29khif) - basic_machine=a29k-amd - os=-udi - ;; - abacus) - basic_machine=abacus-unknown - ;; - adobe68k) - basic_machine=m68010-adobe - os=-scout - ;; - alliant | fx80) - basic_machine=fx80-alliant - ;; - altos | altos3068) - basic_machine=m68k-altos - ;; - am29k) - basic_machine=a29k-none - os=-bsd - ;; - amd64) - basic_machine=x86_64-pc - ;; - amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - amdahl) - basic_machine=580-amdahl - os=-sysv - ;; - amiga | amiga-*) - basic_machine=m68k-unknown - ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=-amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=-sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=-sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=-bsd - ;; - aros) - basic_machine=i386-pc - os=-aros - ;; - aux) - basic_machine=m68k-apple - os=-aux - ;; - balance) - basic_machine=ns32k-sequent - os=-dynix - ;; - blackfin) - basic_machine=bfin-unknown - os=-linux - ;; - blackfin-*) - basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - bluegene*) - basic_machine=powerpc-ibm - os=-cnk - ;; - c54x-*) - basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c55x-*) - basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c6x-*) - basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c90) - basic_machine=c90-cray - os=-unicos - ;; - cegcc) - basic_machine=arm-unknown - os=-cegcc - ;; - convex-c1) - basic_machine=c1-convex - os=-bsd - ;; - convex-c2) - basic_machine=c2-convex - os=-bsd - ;; - convex-c32) - basic_machine=c32-convex - os=-bsd - ;; - convex-c34) - basic_machine=c34-convex - os=-bsd - ;; - convex-c38) - basic_machine=c38-convex - os=-bsd - ;; - cray | j90) - basic_machine=j90-cray - os=-unicos - ;; - craynv) - basic_machine=craynv-cray - os=-unicosmp - ;; - cr16 | cr16-*) - basic_machine=cr16-unknown - os=-elf - ;; - crds | unos) - basic_machine=m68k-crds - ;; - crisv32 | crisv32-* | etraxfs*) - basic_machine=crisv32-axis - ;; - cris | cris-* | etrax*) - basic_machine=cris-axis - ;; - crx) - basic_machine=crx-unknown - os=-elf - ;; - da30 | da30-*) - basic_machine=m68k-da30 - ;; - decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) - basic_machine=mips-dec - ;; - decsystem10* | dec10*) - basic_machine=pdp10-dec - os=-tops10 - ;; - decsystem20* | dec20*) - basic_machine=pdp10-dec - os=-tops20 - ;; - delta | 3300 | motorola-3300 | motorola-delta \ - | 3300-motorola | delta-motorola) - basic_machine=m68k-motorola - ;; - delta88) - basic_machine=m88k-motorola - os=-sysv3 - ;; - dicos) - basic_machine=i686-pc - os=-dicos - ;; - djgpp) - basic_machine=i586-pc - os=-msdosdjgpp - ;; - dpx20 | dpx20-*) - basic_machine=rs6000-bull - os=-bosx - ;; - dpx2* | dpx2*-bull) - basic_machine=m68k-bull - os=-sysv3 - ;; - ebmon29k) - basic_machine=a29k-amd - os=-ebmon - ;; - elxsi) - basic_machine=elxsi-elxsi - os=-bsd - ;; - encore | umax | mmax) - basic_machine=ns32k-encore - ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=-ose - ;; - fx2800) - basic_machine=i860-alliant - ;; - genix) - basic_machine=ns32k-ns - ;; - gmicro) - basic_machine=tron-gmicro - os=-sysv - ;; - go32) - basic_machine=i386-pc - os=-go32 - ;; - h3050r* | hiux*) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=-hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=-xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=-hms - ;; - harris) - basic_machine=m88k-harris - os=-sysv3 - ;; - hp300-*) - basic_machine=m68k-hp - ;; - hp300bsd) - basic_machine=m68k-hp - os=-bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=-hpux - ;; - hp3k9[0-9][0-9] | hp9[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k2[0-9][0-9] | hp9k31[0-9]) - basic_machine=m68000-hp - ;; - hp9k3[2-9][0-9]) - basic_machine=m68k-hp - ;; - hp9k6[0-9][0-9] | hp6[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k7[0-79][0-9] | hp7[0-79][0-9]) - basic_machine=hppa1.1-hp - ;; - hp9k78[0-9] | hp78[0-9]) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][13679] | hp8[0-9][13679]) - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][0-9] | hp8[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hppa-next) - os=-nextstep3 - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=-osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=-proelf - ;; - i370-ibm* | ibm*) - basic_machine=i370-ibm - ;; - i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv32 - ;; - i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv4 - ;; - i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv - ;; - i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-solaris2 - ;; - i386mach) - basic_machine=i386-mach - os=-mach - ;; - i386-vsta | vsta) - basic_machine=i386-unknown - os=-vsta - ;; - iris | iris4d) - basic_machine=mips-sgi - case $os in - -irix*) - ;; - *) - os=-irix4 - ;; - esac - ;; - isi68 | isi) - basic_machine=m68k-isi - os=-sysv - ;; - m68knommu) - basic_machine=m68k-unknown - os=-linux - ;; - m68knommu-*) - basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - m88k-omron*) - basic_machine=m88k-omron - ;; - magnum | m3230) - basic_machine=mips-mips - os=-sysv - ;; - merlin) - basic_machine=ns32k-utek - os=-sysv - ;; - microblaze*) - basic_machine=microblaze-xilinx - ;; - mingw64) - basic_machine=x86_64-pc - os=-mingw64 - ;; - mingw32) - basic_machine=i686-pc - os=-mingw32 - ;; - mingw32ce) - basic_machine=arm-unknown - os=-mingw32ce - ;; - miniframe) - basic_machine=m68000-convergent - ;; - *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; - mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` - ;; - mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown - ;; - monitor) - basic_machine=m68k-rom68k - os=-coff - ;; - morphos) - basic_machine=powerpc-unknown - os=-morphos - ;; - msdos) - basic_machine=i386-pc - os=-msdos - ;; - ms1-*) - basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` - ;; - msys) - basic_machine=i686-pc - os=-msys - ;; - mvs) - basic_machine=i370-ibm - os=-mvs - ;; - nacl) - basic_machine=le32-unknown - os=-nacl - ;; - ncr3000) - basic_machine=i486-ncr - os=-sysv4 - ;; - netbsd386) - basic_machine=i386-unknown - os=-netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=-linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=-newsos - ;; - news1000) - basic_machine=m68030-sony - os=-newsos - ;; - news-3600 | risc-news) - basic_machine=mips-sony - os=-newsos - ;; - necv70) - basic_machine=v70-nec - os=-sysv - ;; - next | m*-next ) - basic_machine=m68k-next - case $os in - -nextstep* ) - ;; - -ns2*) - os=-nextstep2 - ;; - *) - os=-nextstep3 - ;; - esac - ;; - nh3000) - basic_machine=m68k-harris - os=-cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=-cxux - ;; - nindy960) - basic_machine=i960-intel - os=-nindy - ;; - mon960) - basic_machine=i960-intel - os=-mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=-nonstopux - ;; - np1) - basic_machine=np1-gould - ;; - neo-tandem) - basic_machine=neo-tandem - ;; - nse-tandem) - basic_machine=nse-tandem - ;; - nsr-tandem) - basic_machine=nsr-tandem - ;; - op50n-* | op60c-*) - basic_machine=hppa1.1-oki - os=-proelf - ;; - openrisc | openrisc-*) - basic_machine=or32-unknown - ;; - os400) - basic_machine=powerpc-ibm - os=-os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=-ose - ;; - os68k) - basic_machine=m68k-none - os=-os68k - ;; - pa-hitachi) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - paragon) - basic_machine=i860-intel - os=-osf - ;; - parisc) - basic_machine=hppa-unknown - os=-linux - ;; - parisc-*) - basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - pbd) - basic_machine=sparc-tti - ;; - pbb) - basic_machine=m68k-tti - ;; - pc532 | pc532-*) - basic_machine=ns32k-pc532 - ;; - pc98) - basic_machine=i386-pc - ;; - pc98-*) - basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium | p5 | k5 | k6 | nexgen | viac3) - basic_machine=i586-pc - ;; - pentiumpro | p6 | 6x86 | athlon | athlon_*) - basic_machine=i686-pc - ;; - pentiumii | pentium2 | pentiumiii | pentium3) - basic_machine=i686-pc - ;; - pentium4) - basic_machine=i786-pc - ;; - pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pn) - basic_machine=pn-gould - ;; - power) basic_machine=power-ibm - ;; - ppc | ppcbe) basic_machine=powerpc-unknown - ;; - ppc-* | ppcbe-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppcle | powerpclittle | ppc-le | powerpc-little) - basic_machine=powerpcle-unknown - ;; - ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64) basic_machine=powerpc64-unknown - ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) - basic_machine=powerpc64le-unknown - ;; - ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ps2) - basic_machine=i386-ibm - ;; - pw32) - basic_machine=i586-unknown - os=-pw32 - ;; - rdos | rdos64) - basic_machine=x86_64-pc - os=-rdos - ;; - rdos32) - basic_machine=i386-pc - os=-rdos - ;; - rom68k) - basic_machine=m68k-rom68k - os=-coff - ;; - rm[46]00) - basic_machine=mips-siemens - ;; - rtpc | rtpc-*) - basic_machine=romp-ibm - ;; - s390 | s390-*) - basic_machine=s390-ibm - ;; - s390x | s390x-*) - basic_machine=s390x-ibm - ;; - sa29200) - basic_machine=a29k-amd - os=-udi - ;; - sb1) - basic_machine=mipsisa64sb1-unknown - ;; - sb1el) - basic_machine=mipsisa64sb1el-unknown - ;; - sde) - basic_machine=mipsisa32-sde - os=-elf - ;; - sei) - basic_machine=mips-sei - os=-seiux - ;; - sequent) - basic_machine=i386-sequent - ;; - sh) - basic_machine=sh-hitachi - os=-hms - ;; - sh5el) - basic_machine=sh5le-unknown - ;; - sh64) - basic_machine=sh64-unknown - ;; - sparclite-wrs | simso-wrs) - basic_machine=sparclite-wrs - os=-vxworks - ;; - sps7) - basic_machine=m68k-bull - os=-sysv2 - ;; - spur) - basic_machine=spur-unknown - ;; - st2000) - basic_machine=m68k-tandem - ;; - stratus) - basic_machine=i860-stratus - os=-sysv4 - ;; - strongarm-* | thumb-*) - basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - sun2) - basic_machine=m68000-sun - ;; - sun2os3) - basic_machine=m68000-sun - os=-sunos3 - ;; - sun2os4) - basic_machine=m68000-sun - os=-sunos4 - ;; - sun3os3) - basic_machine=m68k-sun - os=-sunos3 - ;; - sun3os4) - basic_machine=m68k-sun - os=-sunos4 - ;; - sun4os3) - basic_machine=sparc-sun - os=-sunos3 - ;; - sun4os4) - basic_machine=sparc-sun - os=-sunos4 - ;; - sun4sol2) - basic_machine=sparc-sun - os=-solaris2 - ;; - sun3 | sun3-*) - basic_machine=m68k-sun - ;; - sun4) - basic_machine=sparc-sun - ;; - sun386 | sun386i | roadrunner) - basic_machine=i386-sun - ;; - sv1) - basic_machine=sv1-cray - os=-unicos - ;; - symmetry) - basic_machine=i386-sequent - os=-dynix - ;; - t3e) - basic_machine=alphaev5-cray - os=-unicos - ;; - t90) - basic_machine=t90-cray - os=-unicos - ;; - tile*) - basic_machine=$basic_machine-unknown - os=-linux-gnu - ;; - tx39) - basic_machine=mipstx39-unknown - ;; - tx39el) - basic_machine=mipstx39el-unknown - ;; - toad1) - basic_machine=pdp10-xkl - os=-tops20 - ;; - tower | tower-32) - basic_machine=m68k-ncr - ;; - tpf) - basic_machine=s390x-ibm - os=-tpf - ;; - udi29k) - basic_machine=a29k-amd - os=-udi - ;; - ultra3) - basic_machine=a29k-nyu - os=-sym1 - ;; - v810 | necv810) - basic_machine=v810-nec - os=-none - ;; - vaxv) - basic_machine=vax-dec - os=-sysv - ;; - vms) - basic_machine=vax-dec - os=-vms - ;; - vpp*|vx|vx-*) - basic_machine=f301-fujitsu - ;; - vxworks960) - basic_machine=i960-wrs - os=-vxworks - ;; - vxworks68) - basic_machine=m68k-wrs - os=-vxworks - ;; - vxworks29k) - basic_machine=a29k-wrs - os=-vxworks - ;; - w65*) - basic_machine=w65-wdc - os=-none - ;; - w89k-*) - basic_machine=hppa1.1-winbond - os=-proelf - ;; - xbox) - basic_machine=i686-pc - os=-mingw32 - ;; - xps | xps100) - basic_machine=xps100-honeywell - ;; - xscale-* | xscalee[bl]-*) - basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` - ;; - ymp) - basic_machine=ymp-cray - os=-unicos - ;; - z8k-*-coff) - basic_machine=z8k-unknown - os=-sim - ;; - z80-*-coff) - basic_machine=z80-unknown - os=-sim - ;; - none) - basic_machine=none-none - os=-none - ;; - -# Here we handle the default manufacturer of certain CPU types. It is in -# some cases the only manufacturer, in others, it is the most popular. - w89k) - basic_machine=hppa1.1-winbond - ;; - op50n) - basic_machine=hppa1.1-oki - ;; - op60c) - basic_machine=hppa1.1-oki - ;; - romp) - basic_machine=romp-ibm - ;; - mmix) - basic_machine=mmix-knuth - ;; - rs6000) - basic_machine=rs6000-ibm - ;; - vax) - basic_machine=vax-dec - ;; - pdp10) - # there are many clones, so DEC is not a safe bet - basic_machine=pdp10-unknown - ;; - pdp11) - basic_machine=pdp11-dec - ;; - we32k) - basic_machine=we32k-att - ;; - sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) - basic_machine=sh-unknown - ;; - sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) - basic_machine=sparc-sun - ;; - cydra) - basic_machine=cydra-cydrome - ;; - orion) - basic_machine=orion-highlevel - ;; - orion105) - basic_machine=clipper-highlevel - ;; - mac | mpw | mac-mpw) - basic_machine=m68k-apple - ;; - pmac | pmac-mpw) - basic_machine=powerpc-apple - ;; - *-unknown) - # Make sure to match an already-canonicalized machine name. - ;; - *) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; -esac - -# Here we canonicalize certain aliases for manufacturers. -case $basic_machine in - *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` - ;; - *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` - ;; - *) - ;; -esac - -# Decode manufacturer-specific aliases for certain operating systems. - -if [ x"$os" != x"" ] -then -case $os in - # First match some system type aliases - # that might get confused with valid system types. - # -solaris* is a basic system type, with this one exception. - -auroraux) - os=-auroraux - ;; - -solaris1 | -solaris1.*) - os=`echo $os | sed -e 's|solaris1|sunos4|'` - ;; - -solaris) - os=-solaris2 - ;; - -svr4*) - os=-sysv4 - ;; - -unixware*) - os=-sysv4.2uw - ;; - -gnu/linux*) - os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` - ;; - # First accept the basic system types. - # The portable systems comes first. - # Each alternative MUST END IN A *, to match a version number. - # -sysv* is not here because it comes later, after sysvr4. - -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ - | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ - | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ - | -sym* | -kopensolaris* | -plan9* \ - | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* | -aros* \ - | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ - | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -bitrig* | -openbsd* | -solidbsd* \ - | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ - | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ - | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ - | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* | -cegcc* \ - | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ - | -linux-newlib* | -linux-musl* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* \ - | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ - | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ - | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ - | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ - | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ - | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) - # Remember, each alternative MUST END IN *, to match a version number. - ;; - -qnx*) - case $basic_machine in - x86-* | i*86-*) - ;; - *) - os=-nto$os - ;; - esac - ;; - -nto-qnx*) - ;; - -nto*) - os=`echo $os | sed -e 's|nto|nto-qnx|'` - ;; - -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ - | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ - | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) - ;; - -mac*) - os=`echo $os | sed -e 's|mac|macos|'` - ;; - -linux-dietlibc) - os=-linux-dietlibc - ;; - -linux*) - os=`echo $os | sed -e 's|linux|linux-gnu|'` - ;; - -sunos5*) - os=`echo $os | sed -e 's|sunos5|solaris2|'` - ;; - -sunos6*) - os=`echo $os | sed -e 's|sunos6|solaris3|'` - ;; - -opened*) - os=-openedition - ;; - -os400*) - os=-os400 - ;; - -wince*) - os=-wince - ;; - -osfrose*) - os=-osfrose - ;; - -osf*) - os=-osf - ;; - -utek*) - os=-bsd - ;; - -dynix*) - os=-bsd - ;; - -acis*) - os=-aos - ;; - -atheos*) - os=-atheos - ;; - -syllable*) - os=-syllable - ;; - -386bsd) - os=-bsd - ;; - -ctix* | -uts*) - os=-sysv - ;; - -nova*) - os=-rtmk-nova - ;; - -ns2 ) - os=-nextstep2 - ;; - -nsk*) - os=-nsk - ;; - # Preserve the version number of sinix5. - -sinix5.*) - os=`echo $os | sed -e 's|sinix|sysv|'` - ;; - -sinix*) - os=-sysv4 - ;; - -tpf*) - os=-tpf - ;; - -triton*) - os=-sysv3 - ;; - -oss*) - os=-sysv3 - ;; - -svr4) - os=-sysv4 - ;; - -svr3) - os=-sysv3 - ;; - -sysvr4) - os=-sysv4 - ;; - # This must come after -sysvr4. - -sysv*) - ;; - -ose*) - os=-ose - ;; - -es1800*) - os=-ose - ;; - -xenix) - os=-xenix - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - os=-mint - ;; - -aros*) - os=-aros - ;; - -zvmoe) - os=-zvmoe - ;; - -dicos*) - os=-dicos - ;; - -nacl*) - ;; - -none) - ;; - *) - # Get rid of the `-' at the beginning of $os. - os=`echo $os | sed 's/[^-]*-//'` - echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 - exit 1 - ;; -esac -else - -# Here we handle the default operating systems that come with various machines. -# The value should be what the vendor currently ships out the door with their -# machine or put another way, the most popular os provided with the machine. - -# Note that if you're going to try to match "-MANUFACTURER" here (say, -# "-sun"), then you have to tell the case statement up towards the top -# that MANUFACTURER isn't an operating system. Otherwise, code above -# will signal an error saying that MANUFACTURER isn't an operating -# system, and we'll never get to this point. - -case $basic_machine in - score-*) - os=-elf - ;; - spu-*) - os=-elf - ;; - *-acorn) - os=-riscix1.2 - ;; - arm*-rebel) - os=-linux - ;; - arm*-semi) - os=-aout - ;; - c4x-* | tic4x-*) - os=-coff - ;; - c8051-*) - os=-elf - ;; - hexagon-*) - os=-elf - ;; - tic54x-*) - os=-coff - ;; - tic55x-*) - os=-coff - ;; - tic6x-*) - os=-coff - ;; - # This must come before the *-dec entry. - pdp10-*) - os=-tops20 - ;; - pdp11-*) - os=-none - ;; - *-dec | vax-*) - os=-ultrix4.2 - ;; - m68*-apollo) - os=-domain - ;; - i386-sun) - os=-sunos4.0.2 - ;; - m68000-sun) - os=-sunos3 - ;; - m68*-cisco) - os=-aout - ;; - mep-*) - os=-elf - ;; - mips*-cisco) - os=-elf - ;; - mips*-*) - os=-elf - ;; - or1k-*) - os=-elf - ;; - or32-*) - os=-coff - ;; - *-tti) # must be before sparc entry or we get the wrong os. - os=-sysv3 - ;; - sparc-* | *-sun) - os=-sunos4.1.1 - ;; - *-be) - os=-beos - ;; - *-haiku) - os=-haiku - ;; - *-ibm) - os=-aix - ;; - *-knuth) - os=-mmixware - ;; - *-wec) - os=-proelf - ;; - *-winbond) - os=-proelf - ;; - *-oki) - os=-proelf - ;; - *-hp) - os=-hpux - ;; - *-hitachi) - os=-hiux - ;; - i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=-sysv - ;; - *-cbm) - os=-amigaos - ;; - *-dg) - os=-dgux - ;; - *-dolphin) - os=-sysv3 - ;; - m68k-ccur) - os=-rtu - ;; - m88k-omron*) - os=-luna - ;; - *-next ) - os=-nextstep - ;; - *-sequent) - os=-ptx - ;; - *-crds) - os=-unos - ;; - *-ns) - os=-genix - ;; - i370-*) - os=-mvs - ;; - *-next) - os=-nextstep3 - ;; - *-gould) - os=-sysv - ;; - *-highlevel) - os=-bsd - ;; - *-encore) - os=-bsd - ;; - *-sgi) - os=-irix - ;; - *-siemens) - os=-sysv4 - ;; - *-masscomp) - os=-rtu - ;; - f30[01]-fujitsu | f700-fujitsu) - os=-uxpv - ;; - *-rom68k) - os=-coff - ;; - *-*bug) - os=-coff - ;; - *-apple) - os=-macos - ;; - *-atari*) - os=-mint - ;; - *) - os=-none - ;; -esac -fi - -# Here we handle the case where we know the os, and the CPU type, but not the -# manufacturer. We pick the logical manufacturer. -vendor=unknown -case $basic_machine in - *-unknown) - case $os in - -riscix*) - vendor=acorn - ;; - -sunos*) - vendor=sun - ;; - -cnk*|-aix*) - vendor=ibm - ;; - -beos*) - vendor=be - ;; - -hpux*) - vendor=hp - ;; - -mpeix*) - vendor=hp - ;; - -hiux*) - vendor=hitachi - ;; - -unos*) - vendor=crds - ;; - -dgux*) - vendor=dg - ;; - -luna*) - vendor=omron - ;; - -genix*) - vendor=ns - ;; - -mvs* | -opened*) - vendor=ibm - ;; - -os400*) - vendor=ibm - ;; - -ptx*) - vendor=sequent - ;; - -tpf*) - vendor=ibm - ;; - -vxsim* | -vxworks* | -windiss*) - vendor=wrs - ;; - -aux*) - vendor=apple - ;; - -hms*) - vendor=hitachi - ;; - -mpw* | -macos*) - vendor=apple - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - vendor=atari - ;; - -vos*) - vendor=stratus - ;; - esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` - ;; -esac - -echo $basic_machine$os -exit - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff --git a/lib/jemalloc-3.6.0/configure.ac b/lib/jemalloc-3.6.0/configure.ac deleted file mode 100644 index 4de81dc..0000000 --- a/lib/jemalloc-3.6.0/configure.ac +++ /dev/null @@ -1,1487 +0,0 @@ -dnl Process this file with autoconf to produce a configure script. -AC_INIT([Makefile.in]) - -dnl ============================================================================ -dnl Custom macro definitions. - -dnl JE_CFLAGS_APPEND(cflag) -AC_DEFUN([JE_CFLAGS_APPEND], -[ -AC_MSG_CHECKING([whether compiler supports $1]) -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="$1" -else - CFLAGS="${CFLAGS} $1" -fi -AC_COMPILE_IFELSE([AC_LANG_PROGRAM( -[[ -]], [[ - return 0; -]])], - [je_cv_cflags_appended=$1] - AC_MSG_RESULT([yes]), - [je_cv_cflags_appended=] - AC_MSG_RESULT([no]) - [CFLAGS="${TCFLAGS}"] -) -]) - -dnl JE_COMPILABLE(label, hcode, mcode, rvar) -dnl -dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors -dnl cause failure. -AC_DEFUN([JE_COMPILABLE], -[ -AC_CACHE_CHECK([whether $1 is compilable], - [$4], - [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2], - [$3])], - [$4=yes], - [$4=no])]) -]) - -dnl ============================================================================ - -dnl Library revision. -rev=1 -AC_SUBST([rev]) - -srcroot=$srcdir -if test "x${srcroot}" = "x." ; then - srcroot="" -else - srcroot="${srcroot}/" -fi -AC_SUBST([srcroot]) -abs_srcroot="`cd \"${srcdir}\"; pwd`/" -AC_SUBST([abs_srcroot]) - -objroot="" -AC_SUBST([objroot]) -abs_objroot="`pwd`/" -AC_SUBST([abs_objroot]) - -dnl Munge install path variables. -if test "x$prefix" = "xNONE" ; then - prefix="/usr/local" -fi -if test "x$exec_prefix" = "xNONE" ; then - exec_prefix=$prefix -fi -PREFIX=$prefix -AC_SUBST([PREFIX]) -BINDIR=`eval echo $bindir` -BINDIR=`eval echo $BINDIR` -AC_SUBST([BINDIR]) -INCLUDEDIR=`eval echo $includedir` -INCLUDEDIR=`eval echo $INCLUDEDIR` -AC_SUBST([INCLUDEDIR]) -LIBDIR=`eval echo $libdir` -LIBDIR=`eval echo $LIBDIR` -AC_SUBST([LIBDIR]) -DATADIR=`eval echo $datadir` -DATADIR=`eval echo $DATADIR` -AC_SUBST([DATADIR]) -MANDIR=`eval echo $mandir` -MANDIR=`eval echo $MANDIR` -AC_SUBST([MANDIR]) - -dnl Support for building documentation. -AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH]) -if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then - DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" -elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then - DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" -else - dnl Documentation building will fail if this default gets used. - DEFAULT_XSLROOT="" -fi -AC_ARG_WITH([xslroot], - [AS_HELP_STRING([--with-xslroot=], [XSL stylesheet root path])], [ -if test "x$with_xslroot" = "xno" ; then - XSLROOT="${DEFAULT_XSLROOT}" -else - XSLROOT="${with_xslroot}" -fi -], - XSLROOT="${DEFAULT_XSLROOT}" -) -AC_SUBST([XSLROOT]) - -dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise, -dnl just prevent autoconf from molesting CFLAGS. -CFLAGS=$CFLAGS -AC_PROG_CC -if test "x$GCC" != "xyes" ; then - AC_CACHE_CHECK([whether compiler is MSVC], - [je_cv_msvc], - [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], - [ -#ifndef _MSC_VER - int fail[-1]; -#endif -])], - [je_cv_msvc=yes], - [je_cv_msvc=no])]) -fi - -if test "x$CFLAGS" = "x" ; then - no_CFLAGS="yes" - if test "x$GCC" = "xyes" ; then - JE_CFLAGS_APPEND([-std=gnu99]) - if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then - AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) - fi - JE_CFLAGS_APPEND([-Wall]) - JE_CFLAGS_APPEND([-pipe]) - JE_CFLAGS_APPEND([-g3]) - elif test "x$je_cv_msvc" = "xyes" ; then - CC="$CC -nologo" - JE_CFLAGS_APPEND([-Zi]) - JE_CFLAGS_APPEND([-MT]) - JE_CFLAGS_APPEND([-W3]) - CPPFLAGS="$CPPFLAGS -I${srcroot}/include/msvc_compat" - fi -fi -dnl Append EXTRA_CFLAGS to CFLAGS, if defined. -if test "x$EXTRA_CFLAGS" != "x" ; then - JE_CFLAGS_APPEND([$EXTRA_CFLAGS]) -fi -AC_PROG_CPP - -AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0]) -if test "x${ac_cv_big_endian}" = "x1" ; then - AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ]) -fi - -AC_CHECK_SIZEOF([void *]) -if test "x${ac_cv_sizeof_void_p}" = "x8" ; then - LG_SIZEOF_PTR=3 -elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then - LG_SIZEOF_PTR=2 -else - AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}]) -fi -AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR]) - -AC_CHECK_SIZEOF([int]) -if test "x${ac_cv_sizeof_int}" = "x8" ; then - LG_SIZEOF_INT=3 -elif test "x${ac_cv_sizeof_int}" = "x4" ; then - LG_SIZEOF_INT=2 -else - AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}]) -fi -AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT]) - -AC_CHECK_SIZEOF([long]) -if test "x${ac_cv_sizeof_long}" = "x8" ; then - LG_SIZEOF_LONG=3 -elif test "x${ac_cv_sizeof_long}" = "x4" ; then - LG_SIZEOF_LONG=2 -else - AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}]) -fi -AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG]) - -AC_CHECK_SIZEOF([intmax_t]) -if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then - LG_SIZEOF_INTMAX_T=4 -elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then - LG_SIZEOF_INTMAX_T=3 -elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then - LG_SIZEOF_INTMAX_T=2 -else - AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}]) -fi -AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T]) - -AC_CANONICAL_HOST -dnl CPU-specific settings. -CPU_SPINWAIT="" -case "${host_cpu}" in - i[[345]]86) - ;; - i686|x86_64) - JE_COMPILABLE([pause instruction], [], - [[__asm__ volatile("pause"); return 0;]], - [je_cv_pause]) - if test "x${je_cv_pause}" = "xyes" ; then - CPU_SPINWAIT='__asm__ volatile("pause")' - fi - dnl emmintrin.h fails to compile unless MMX, SSE, and SSE2 are - dnl supported. - JE_COMPILABLE([SSE2 intrinsics], [ -#include -], [], [je_cv_sse2]) - if test "x${je_cv_sse2}" = "xyes" ; then - AC_DEFINE_UNQUOTED([HAVE_SSE2], [ ]) - fi - ;; - powerpc) - AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ]) - ;; - *) - ;; -esac -AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT]) - -LD_PRELOAD_VAR="LD_PRELOAD" -so="so" -importlib="${so}" -o="$ac_objext" -a="a" -exe="$ac_exeext" -libprefix="lib" -DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' -RPATH='-Wl,-rpath,$(1)' -SOREV="${so}.${rev}" -PIC_CFLAGS='-fPIC -DPIC' -CTARGET='-o $@' -LDTARGET='-o $@' -EXTRA_LDFLAGS= -ARFLAGS='crus' -AROUT=' $@' -CC_MM=1 - -AN_MAKEVAR([AR], [AC_PROG_AR]) -AN_PROGRAM([ar], [AC_PROG_AR]) -AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)]) -AC_PROG_AR - -dnl Platform-specific settings. abi and RPATH can probably be determined -dnl programmatically, but doing so is error-prone, which makes it generally -dnl not worth the trouble. -dnl -dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the -dnl definitions need to be seen before any headers are included, which is a pain -dnl to make happen otherwise. -default_munmap="1" -JEMALLOC_USABLE_SIZE_CONST="const" -case "${host}" in - *-*-darwin*) - CFLAGS="$CFLAGS" - abi="macho" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) - RPATH="" - LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" - so="dylib" - importlib="${so}" - force_tls="0" - DSO_LDFLAGS='-shared -Wl,-dylib_install_name,$(@F)' - SOREV="${rev}.${so}" - sbrk_deprecated="1" - ;; - *-*-freebsd*) - CFLAGS="$CFLAGS" - abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) - force_lazy_lock="1" - ;; - *-*-linux*) - CFLAGS="$CFLAGS" - CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" - abi="elf" - AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) - AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) - AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) - JEMALLOC_USABLE_SIZE_CONST="" - default_munmap="0" - ;; - *-*-netbsd*) - AC_MSG_CHECKING([ABI]) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM( -[[#ifdef __ELF__ -/* ELF */ -#else -#error aout -#endif -]])], - [CFLAGS="$CFLAGS"; abi="elf"], - [abi="aout"]) - AC_MSG_RESULT([$abi]) - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) - ;; - *-*-solaris2*) - CFLAGS="$CFLAGS" - abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) - RPATH='-Wl,-R,$(1)' - dnl Solaris needs this for sigwait(). - CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" - LIBS="$LIBS -lposix4 -lsocket -lnsl" - ;; - *-ibm-aix*) - if "$LG_SIZEOF_PTR" = "8"; then - dnl 64bit AIX - LD_PRELOAD_VAR="LDR_PRELOAD64" - else - dnl 32bit AIX - LD_PRELOAD_VAR="LDR_PRELOAD" - fi - abi="xcoff" - ;; - *-*-mingw*) - abi="pecoff" - force_tls="0" - RPATH="" - so="dll" - if test "x$je_cv_msvc" = "xyes" ; then - importlib="lib" - DSO_LDFLAGS="-LD" - EXTRA_LDFLAGS="-link -DEBUG" - CTARGET='-Fo$@' - LDTARGET='-Fe$@' - AR='lib' - ARFLAGS='-nologo -out:' - AROUT='$@' - CC_MM= - else - importlib="${so}" - DSO_LDFLAGS="-shared" - fi - a="lib" - libprefix="" - SOREV="${so}" - PIC_CFLAGS="" - ;; - *) - AC_MSG_RESULT([Unsupported operating system: ${host}]) - abi="elf" - ;; -esac -AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST]) -AC_SUBST([abi]) -AC_SUBST([RPATH]) -AC_SUBST([LD_PRELOAD_VAR]) -AC_SUBST([so]) -AC_SUBST([importlib]) -AC_SUBST([o]) -AC_SUBST([a]) -AC_SUBST([exe]) -AC_SUBST([libprefix]) -AC_SUBST([DSO_LDFLAGS]) -AC_SUBST([EXTRA_LDFLAGS]) -AC_SUBST([SOREV]) -AC_SUBST([PIC_CFLAGS]) -AC_SUBST([CTARGET]) -AC_SUBST([LDTARGET]) -AC_SUBST([MKLIB]) -AC_SUBST([ARFLAGS]) -AC_SUBST([AROUT]) -AC_SUBST([CC_MM]) - -JE_COMPILABLE([__attribute__ syntax], - [static __attribute__((unused)) void foo(void){}], - [], - [je_cv_attribute]) -if test "x${je_cv_attribute}" = "xyes" ; then - AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ]) - if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then - JE_CFLAGS_APPEND([-fvisibility=hidden]) - fi -fi -dnl Check for tls_model attribute support (clang 3.0 still lacks support). -SAVED_CFLAGS="${CFLAGS}" -JE_CFLAGS_APPEND([-Werror]) -JE_COMPILABLE([tls_model attribute], [], - [static __thread int - __attribute__((tls_model("initial-exec"))) foo; - foo = 0;], - [je_cv_tls_model]) -CFLAGS="${SAVED_CFLAGS}" -if test "x${je_cv_tls_model}" = "xyes" ; then - AC_DEFINE([JEMALLOC_TLS_MODEL], - [__attribute__((tls_model("initial-exec")))]) -else - AC_DEFINE([JEMALLOC_TLS_MODEL], [ ]) -fi - -dnl Support optional additions to rpath. -AC_ARG_WITH([rpath], - [AS_HELP_STRING([--with-rpath=], [Colon-separated rpath (ELF systems only)])], -if test "x$with_rpath" = "xno" ; then - RPATH_EXTRA= -else - RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" -fi, - RPATH_EXTRA= -) -AC_SUBST([RPATH_EXTRA]) - -dnl Disable rules that do automatic regeneration of configure output by default. -AC_ARG_ENABLE([autogen], - [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])], -if test "x$enable_autogen" = "xno" ; then - enable_autogen="0" -else - enable_autogen="1" -fi -, -enable_autogen="0" -) -AC_SUBST([enable_autogen]) - -AC_PROG_INSTALL -AC_PROG_RANLIB -AC_PATH_PROG([LD], [ld], [false], [$PATH]) -AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) - -public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size" - -dnl Check for allocator-related functions that should be wrapped. -AC_CHECK_FUNC([memalign], - [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ]) - public_syms="${public_syms} memalign"]) -AC_CHECK_FUNC([valloc], - [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ]) - public_syms="${public_syms} valloc"]) - -dnl Support the experimental API by default. -AC_ARG_ENABLE([experimental], - [AS_HELP_STRING([--disable-experimental], - [Disable support for the experimental API])], -[if test "x$enable_experimental" = "xno" ; then - enable_experimental="0" -else - enable_experimental="1" -fi -], -[enable_experimental="1"] -) -if test "x$enable_experimental" = "x1" ; then - AC_DEFINE([JEMALLOC_EXPERIMENTAL], [ ]) - public_syms="${public_syms} allocm dallocm nallocm rallocm sallocm" -fi -AC_SUBST([enable_experimental]) - -dnl Do not compute test code coverage by default. -GCOV_FLAGS= -AC_ARG_ENABLE([code-coverage], - [AS_HELP_STRING([--enable-code-coverage], - [Enable code coverage])], -[if test "x$enable_code_coverage" = "xno" ; then - enable_code_coverage="0" -else - enable_code_coverage="1" -fi -], -[enable_code_coverage="0"] -) -if test "x$enable_code_coverage" = "x1" ; then - deoptimize="no" - echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes" - if test "x${deoptimize}" = "xyes" ; then - JE_CFLAGS_APPEND([-O0]) - fi - JE_CFLAGS_APPEND([-fprofile-arcs -ftest-coverage]) - EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage" - AC_DEFINE([JEMALLOC_CODE_COVERAGE], [ ]) -fi -AC_SUBST([enable_code_coverage]) - -dnl Perform no name mangling by default. -AC_ARG_WITH([mangling], - [AS_HELP_STRING([--with-mangling=], [Mangle symbols in ])], - [mangling_map="$with_mangling"], [mangling_map=""]) - -dnl Do not prefix public APIs by default. -AC_ARG_WITH([jemalloc_prefix], - [AS_HELP_STRING([--with-jemalloc-prefix=], [Prefix to prepend to all public APIs])], - [JEMALLOC_PREFIX="$with_jemalloc_prefix"], - [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then - JEMALLOC_PREFIX="" -else - JEMALLOC_PREFIX="je_" -fi] -) -if test "x$JEMALLOC_PREFIX" != "x" ; then - JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` - AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"]) - AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"]) -fi - -AC_ARG_WITH([export], - [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])], - [if test "x$with_export" = "xno"; then - AC_DEFINE([JEMALLOC_EXPORT],[]) -fi] -) - -dnl Mangle library-private APIs. -AC_ARG_WITH([private_namespace], - [AS_HELP_STRING([--with-private-namespace=], [Prefix to prepend to all library-private APIs])], - [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"], - [JEMALLOC_PRIVATE_NAMESPACE="je_"] -) -AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE]) -private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" -AC_SUBST([private_namespace]) - -dnl Do not add suffix to installed files by default. -AC_ARG_WITH([install_suffix], - [AS_HELP_STRING([--with-install-suffix=], [Suffix to append to all installed files])], - [INSTALL_SUFFIX="$with_install_suffix"], - [INSTALL_SUFFIX=] -) -install_suffix="$INSTALL_SUFFIX" -AC_SUBST([install_suffix]) - -dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of -dnl jemalloc_protos_jet.h easy. -je_="je_" -AC_SUBST([je_]) - -cfgoutputs_in="${srcroot}Makefile.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/html.xsl.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/manpages.xsl.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/jemalloc.xml.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_macros.h.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_protos.h.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal.h.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/test.sh.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/include/test/jemalloc_test.h.in" - -cfgoutputs_out="Makefile" -cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" -cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" -cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" -cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" -cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" -cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h" -cfgoutputs_out="${cfgoutputs_out} test/test.sh" -cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" - -cfgoutputs_tup="Makefile" -cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" -cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" -cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" -cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" -cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" -cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h" -cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" -cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" - -cfghdrs_in="${srcroot}include/jemalloc/jemalloc_defs.h.in" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_namespace.sh" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_unnamespace.sh" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_symbols.txt" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_namespace.sh" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_unnamespace.sh" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/size_classes.sh" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_rename.sh" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_mangle.sh" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc.sh" -cfghdrs_in="${cfghdrs_in} ${srcroot}test/include/test/jemalloc_test_defs.h.in" - -cfghdrs_out="include/jemalloc/jemalloc_defs.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" -cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" - -cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" -cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in" -cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:${srcroot}test/include/test/jemalloc_test_defs.h.in" - -dnl Do not silence irrelevant compiler warnings by default, since enabling this -dnl option incurs a performance penalty. -AC_ARG_ENABLE([cc-silence], - [AS_HELP_STRING([--enable-cc-silence], - [Silence irrelevant compiler warnings])], -[if test "x$enable_cc_silence" = "xno" ; then - enable_cc_silence="0" -else - enable_cc_silence="1" -fi -], -[enable_cc_silence="0"] -) -if test "x$enable_cc_silence" = "x1" ; then - AC_DEFINE([JEMALLOC_CC_SILENCE], [ ]) -fi - -dnl Do not compile with debugging by default. -AC_ARG_ENABLE([debug], - [AS_HELP_STRING([--enable-debug], [Build debugging code (implies --enable-ivsalloc)])], -[if test "x$enable_debug" = "xno" ; then - enable_debug="0" -else - enable_debug="1" -fi -], -[enable_debug="0"] -) -if test "x$enable_debug" = "x1" ; then - AC_DEFINE([JEMALLOC_DEBUG], [ ]) - enable_ivsalloc="1" -fi -AC_SUBST([enable_debug]) - -dnl Do not validate pointers by default. -AC_ARG_ENABLE([ivsalloc], - [AS_HELP_STRING([--enable-ivsalloc], [Validate pointers passed through the public API])], -[if test "x$enable_ivsalloc" = "xno" ; then - enable_ivsalloc="0" -else - enable_ivsalloc="1" -fi -], -[enable_ivsalloc="0"] -) -if test "x$enable_ivsalloc" = "x1" ; then - AC_DEFINE([JEMALLOC_IVSALLOC], [ ]) -fi - -dnl Only optimize if not debugging. -if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then - dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS. - optimize="no" - echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes" - if test "x${optimize}" = "xyes" ; then - if test "x$GCC" = "xyes" ; then - JE_CFLAGS_APPEND([-O3]) - JE_CFLAGS_APPEND([-funroll-loops]) - elif test "x$je_cv_msvc" = "xyes" ; then - JE_CFLAGS_APPEND([-O2]) - else - JE_CFLAGS_APPEND([-O]) - fi - fi -fi - -dnl Enable statistics calculation by default. -AC_ARG_ENABLE([stats], - [AS_HELP_STRING([--disable-stats], - [Disable statistics calculation/reporting])], -[if test "x$enable_stats" = "xno" ; then - enable_stats="0" -else - enable_stats="1" -fi -], -[enable_stats="1"] -) -if test "x$enable_stats" = "x1" ; then - AC_DEFINE([JEMALLOC_STATS], [ ]) -fi -AC_SUBST([enable_stats]) - -dnl Do not enable profiling by default. -AC_ARG_ENABLE([prof], - [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])], -[if test "x$enable_prof" = "xno" ; then - enable_prof="0" -else - enable_prof="1" -fi -], -[enable_prof="0"] -) -if test "x$enable_prof" = "x1" ; then - backtrace_method="" -else - backtrace_method="N/A" -fi - -AC_ARG_ENABLE([prof-libunwind], - [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])], -[if test "x$enable_prof_libunwind" = "xno" ; then - enable_prof_libunwind="0" -else - enable_prof_libunwind="1" -fi -], -[enable_prof_libunwind="0"] -) -AC_ARG_WITH([static_libunwind], - [AS_HELP_STRING([--with-static-libunwind=], - [Path to static libunwind library; use rather than dynamically linking])], -if test "x$with_static_libunwind" = "xno" ; then - LUNWIND="-lunwind" -else - if test ! -f "$with_static_libunwind" ; then - AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind]) - fi - LUNWIND="$with_static_libunwind" -fi, - LUNWIND="-lunwind" -) -if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then - AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"]) - if test "x$LUNWIND" = "x-lunwind" ; then - AC_CHECK_LIB([unwind], [backtrace], [LIBS="$LIBS $LUNWIND"], - [enable_prof_libunwind="0"]) - else - LIBS="$LIBS $LUNWIND" - fi - if test "x${enable_prof_libunwind}" = "x1" ; then - backtrace_method="libunwind" - AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ]) - fi -fi - -AC_ARG_ENABLE([prof-libgcc], - [AS_HELP_STRING([--disable-prof-libgcc], - [Do not use libgcc for backtracing])], -[if test "x$enable_prof_libgcc" = "xno" ; then - enable_prof_libgcc="0" -else - enable_prof_libgcc="1" -fi -], -[enable_prof_libgcc="1"] -) -if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ - -a "x$GCC" = "xyes" ; then - AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"]) - AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [LIBS="$LIBS -lgcc"], [enable_prof_libgcc="0"]) - if test "x${enable_prof_libgcc}" = "x1" ; then - backtrace_method="libgcc" - AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ]) - fi -else - enable_prof_libgcc="0" -fi - -AC_ARG_ENABLE([prof-gcc], - [AS_HELP_STRING([--disable-prof-gcc], - [Do not use gcc intrinsics for backtracing])], -[if test "x$enable_prof_gcc" = "xno" ; then - enable_prof_gcc="0" -else - enable_prof_gcc="1" -fi -], -[enable_prof_gcc="1"] -) -if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ - -a "x$GCC" = "xyes" ; then - JE_CFLAGS_APPEND([-fno-omit-frame-pointer]) - backtrace_method="gcc intrinsics" - AC_DEFINE([JEMALLOC_PROF_GCC], [ ]) -else - enable_prof_gcc="0" -fi - -if test "x$backtrace_method" = "x" ; then - backtrace_method="none (disabling profiling)" - enable_prof="0" -fi -AC_MSG_CHECKING([configured backtracing method]) -AC_MSG_RESULT([$backtrace_method]) -if test "x$enable_prof" = "x1" ; then - if test "x${force_tls}" = "x0" ; then - AC_MSG_ERROR([Heap profiling requires TLS]); - fi - force_tls="1" - - if test "x$abi" != "xpecoff"; then - dnl Heap profiling uses the log(3) function. - LIBS="$LIBS -lm" - fi - - AC_DEFINE([JEMALLOC_PROF], [ ]) -fi -AC_SUBST([enable_prof]) - -dnl Enable thread-specific caching by default. -AC_ARG_ENABLE([tcache], - [AS_HELP_STRING([--disable-tcache], [Disable per thread caches])], -[if test "x$enable_tcache" = "xno" ; then - enable_tcache="0" -else - enable_tcache="1" -fi -], -[enable_tcache="1"] -) -if test "x$enable_tcache" = "x1" ; then - AC_DEFINE([JEMALLOC_TCACHE], [ ]) -fi -AC_SUBST([enable_tcache]) - -dnl Disable mremap() for huge realloc() by default. -AC_ARG_ENABLE([mremap], - [AS_HELP_STRING([--enable-mremap], [Enable mremap(2) for huge realloc()])], -[if test "x$enable_mremap" = "xno" ; then - enable_mremap="0" -else - enable_mremap="1" -fi -], -[enable_mremap="0"] -) -if test "x$enable_mremap" = "x1" ; then - JE_COMPILABLE([mremap(...MREMAP_FIXED...)], [ -#define _GNU_SOURCE -#include -], [ -void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0); -], [je_cv_mremap_fixed]) - if test "x${je_cv_mremap_fixed}" = "xno" ; then - enable_mremap="0" - fi -fi -if test "x$enable_mremap" = "x1" ; then - AC_DEFINE([JEMALLOC_MREMAP], [ ]) -fi -AC_SUBST([enable_mremap]) - -dnl Enable VM deallocation via munmap() by default. -AC_ARG_ENABLE([munmap], - [AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])], -[if test "x$enable_munmap" = "xno" ; then - enable_munmap="0" -else - enable_munmap="1" -fi -], -[enable_munmap="${default_munmap}"] -) -if test "x$enable_munmap" = "x1" ; then - AC_DEFINE([JEMALLOC_MUNMAP], [ ]) -fi -AC_SUBST([enable_munmap]) - -dnl Do not enable allocation from DSS by default. -AC_ARG_ENABLE([dss], - [AS_HELP_STRING([--enable-dss], [Enable allocation from DSS])], -[if test "x$enable_dss" = "xno" ; then - enable_dss="0" -else - enable_dss="1" -fi -], -[enable_dss="0"] -) -dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support. -AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"]) -if test "x$have_sbrk" = "x1" ; then - if test "x$sbrk_deprecated" == "x1" ; then - AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated]) - enable_dss="0" - else - AC_DEFINE([JEMALLOC_HAVE_SBRK], [ ]) - fi -else - enable_dss="0" -fi - -if test "x$enable_dss" = "x1" ; then - AC_DEFINE([JEMALLOC_DSS], [ ]) -fi -AC_SUBST([enable_dss]) - -dnl Support the junk/zero filling option by default. -AC_ARG_ENABLE([fill], - [AS_HELP_STRING([--disable-fill], - [Disable support for junk/zero filling, quarantine, and redzones])], -[if test "x$enable_fill" = "xno" ; then - enable_fill="0" -else - enable_fill="1" -fi -], -[enable_fill="1"] -) -if test "x$enable_fill" = "x1" ; then - AC_DEFINE([JEMALLOC_FILL], [ ]) -fi -AC_SUBST([enable_fill]) - -dnl Disable utrace(2)-based tracing by default. -AC_ARG_ENABLE([utrace], - [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])], -[if test "x$enable_utrace" = "xno" ; then - enable_utrace="0" -else - enable_utrace="1" -fi -], -[enable_utrace="0"] -) -JE_COMPILABLE([utrace(2)], [ -#include -#include -#include -#include -#include -], [ - utrace((void *)0, 0); -], [je_cv_utrace]) -if test "x${je_cv_utrace}" = "xno" ; then - enable_utrace="0" -fi -if test "x$enable_utrace" = "x1" ; then - AC_DEFINE([JEMALLOC_UTRACE], [ ]) -fi -AC_SUBST([enable_utrace]) - -dnl Support Valgrind by default. -AC_ARG_ENABLE([valgrind], - [AS_HELP_STRING([--disable-valgrind], [Disable support for Valgrind])], -[if test "x$enable_valgrind" = "xno" ; then - enable_valgrind="0" -else - enable_valgrind="1" -fi -], -[enable_valgrind="1"] -) -if test "x$enable_valgrind" = "x1" ; then - JE_COMPILABLE([valgrind], [ -#include -#include - -#if !defined(VALGRIND_RESIZEINPLACE_BLOCK) -# error "Incompatible Valgrind version" -#endif -], [], [je_cv_valgrind]) - if test "x${je_cv_valgrind}" = "xno" ; then - enable_valgrind="0" - fi - if test "x$enable_valgrind" = "x1" ; then - AC_DEFINE([JEMALLOC_VALGRIND], [ ]) - fi -fi -AC_SUBST([enable_valgrind]) - -dnl Do not support the xmalloc option by default. -AC_ARG_ENABLE([xmalloc], - [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])], -[if test "x$enable_xmalloc" = "xno" ; then - enable_xmalloc="0" -else - enable_xmalloc="1" -fi -], -[enable_xmalloc="0"] -) -if test "x$enable_xmalloc" = "x1" ; then - AC_DEFINE([JEMALLOC_XMALLOC], [ ]) -fi -AC_SUBST([enable_xmalloc]) - -AC_CACHE_CHECK([STATIC_PAGE_SHIFT], - [je_cv_static_page_shift], - AC_RUN_IFELSE([AC_LANG_PROGRAM( -[[ -#include -#ifdef _WIN32 -#include -#else -#include -#endif -#include -]], -[[ - int result; - FILE *f; - -#ifdef _WIN32 - SYSTEM_INFO si; - GetSystemInfo(&si); - result = si.dwPageSize; -#else - result = sysconf(_SC_PAGESIZE); -#endif - if (result == -1) { - return 1; - } - result = ffsl(result) - 1; - - f = fopen("conftest.out", "w"); - if (f == NULL) { - return 1; - } - fprintf(f, "%d\n", result); - fclose(f); - - return 0; -]])], - [je_cv_static_page_shift=`cat conftest.out`], - [je_cv_static_page_shift=undefined])) - -if test "x$je_cv_static_page_shift" != "xundefined"; then - AC_DEFINE_UNQUOTED([STATIC_PAGE_SHIFT], [$je_cv_static_page_shift]) -else - AC_MSG_ERROR([cannot determine value for STATIC_PAGE_SHIFT]) -fi - -dnl ============================================================================ -dnl jemalloc configuration. -dnl - -dnl Set VERSION if source directory has an embedded git repository. -if test -d "${srcroot}.git" ; then - git describe --long --abbrev=40 > ${srcroot}VERSION -fi -jemalloc_version=`cat ${srcroot}VERSION` -jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'` -jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'` -jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'` -jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'` -jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'` -AC_SUBST([jemalloc_version]) -AC_SUBST([jemalloc_version_major]) -AC_SUBST([jemalloc_version_minor]) -AC_SUBST([jemalloc_version_bugfix]) -AC_SUBST([jemalloc_version_nrev]) -AC_SUBST([jemalloc_version_gid]) - -dnl ============================================================================ -dnl Configure pthreads. - -if test "x$abi" != "xpecoff" ; then - AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])]) - dnl Some systems may embed pthreads functionality in libc; check for libpthread - dnl first, but try libc too before failing. - AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"], - [AC_SEARCH_LIBS([pthread_create], , , - AC_MSG_ERROR([libpthread is missing]))]) -fi - -CPPFLAGS="$CPPFLAGS -D_REENTRANT" - -dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use -dnl it rather than pthreads TSD cleanup functions to support cleanup during -dnl thread exit, in order to avoid pthreads library recursion during -dnl bootstrapping. -AC_CHECK_FUNC([_malloc_thread_cleanup], - [have__malloc_thread_cleanup="1"], - [have__malloc_thread_cleanup="0"] - ) -if test "x$have__malloc_thread_cleanup" = "x1" ; then - AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ]) - force_tls="1" -fi - -dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If -dnl so, mutex initialization causes allocation, and we need to implement this -dnl callback function in order to prevent recursive allocation. -AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb], - [have__pthread_mutex_init_calloc_cb="1"], - [have__pthread_mutex_init_calloc_cb="0"] - ) -if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then - AC_DEFINE([JEMALLOC_MUTEX_INIT_CB]) -fi - -dnl Disable lazy locking by default. -AC_ARG_ENABLE([lazy_lock], - [AS_HELP_STRING([--enable-lazy-lock], - [Enable lazy locking (only lock when multi-threaded)])], -[if test "x$enable_lazy_lock" = "xno" ; then - enable_lazy_lock="0" -else - enable_lazy_lock="1" -fi -], -[enable_lazy_lock="0"] -) -if test "x$enable_lazy_lock" = "x0" -a "x${force_lazy_lock}" = "x1" ; then - AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) - enable_lazy_lock="1" -fi -if test "x$enable_lazy_lock" = "x1" ; then - if test "x$abi" != "xpecoff" ; then - AC_CHECK_HEADERS([dlfcn.h], , [AC_MSG_ERROR([dlfcn.h is missing])]) - AC_CHECK_FUNC([dlsym], [], - [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], - [AC_MSG_ERROR([libdl is missing])]) - ]) - fi - AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ]) -fi -AC_SUBST([enable_lazy_lock]) - -AC_ARG_ENABLE([tls], - [AS_HELP_STRING([--disable-tls], [Disable thread-local storage (__thread keyword)])], -if test "x$enable_tls" = "xno" ; then - enable_tls="0" -else - enable_tls="1" -fi -, -enable_tls="1" -) -if test "x${enable_tls}" = "x0" -a "x${force_tls}" = "x1" ; then - AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues]) - enable_tls="1" -fi -if test "x${enable_tls}" = "x1" -a "x${force_tls}" = "x0" ; then - AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues]) - enable_tls="0" -fi -if test "x${enable_tls}" = "x1" ; then -AC_MSG_CHECKING([for TLS]) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM( -[[ - __thread int x; -]], [[ - x = 42; - - return 0; -]])], - AC_MSG_RESULT([yes]), - AC_MSG_RESULT([no]) - enable_tls="0") -fi -AC_SUBST([enable_tls]) -if test "x${enable_tls}" = "x1" ; then - AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ]) -elif test "x${force_tls}" = "x1" ; then - AC_MSG_ERROR([Failed to configure TLS, which is mandatory for correct function]) -fi - -dnl ============================================================================ -dnl Check for ffsl(3), and fail if not found. This function exists on all -dnl platforms that jemalloc currently has a chance of functioning on without -dnl modification. -JE_COMPILABLE([a program using ffsl], [ -#include -#include -#include -], [ - { - int rv = ffsl(0x08); - printf("%d\n", rv); - } -], [je_cv_function_ffsl]) -if test "x${je_cv_function_ffsl}" != "xyes" ; then - AC_MSG_ERROR([Cannot build without ffsl(3)]) -fi - -dnl ============================================================================ -dnl Check for atomic(9) operations as provided on FreeBSD. - -JE_COMPILABLE([atomic(9)], [ -#include -#include -#include -], [ - { - uint32_t x32 = 0; - volatile uint32_t *x32p = &x32; - atomic_fetchadd_32(x32p, 1); - } - { - unsigned long xlong = 0; - volatile unsigned long *xlongp = &xlong; - atomic_fetchadd_long(xlongp, 1); - } -], [je_cv_atomic9]) -if test "x${je_cv_atomic9}" = "xyes" ; then - AC_DEFINE([JEMALLOC_ATOMIC9]) -fi - -dnl ============================================================================ -dnl Check for atomic(3) operations as provided on Darwin. - -JE_COMPILABLE([Darwin OSAtomic*()], [ -#include -#include -], [ - { - int32_t x32 = 0; - volatile int32_t *x32p = &x32; - OSAtomicAdd32(1, x32p); - } - { - int64_t x64 = 0; - volatile int64_t *x64p = &x64; - OSAtomicAdd64(1, x64p); - } -], [je_cv_osatomic]) -if test "x${je_cv_osatomic}" = "xyes" ; then - AC_DEFINE([JEMALLOC_OSATOMIC], [ ]) -fi - -dnl ============================================================================ -dnl Check whether __sync_{add,sub}_and_fetch() are available despite -dnl __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros being undefined. - -AC_DEFUN([JE_SYNC_COMPARE_AND_SWAP_CHECK],[ - AC_CACHE_CHECK([whether to force $1-bit __sync_{add,sub}_and_fetch()], - [je_cv_sync_compare_and_swap_$2], - [AC_LINK_IFELSE([AC_LANG_PROGRAM([ - #include - ], - [ - #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 - { - uint$1_t x$1 = 0; - __sync_add_and_fetch(&x$1, 42); - __sync_sub_and_fetch(&x$1, 1); - } - #else - #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 is defined, no need to force - #endif - ])], - [je_cv_sync_compare_and_swap_$2=yes], - [je_cv_sync_compare_and_swap_$2=no])]) - - if test "x${je_cv_sync_compare_and_swap_$2}" = "xyes" ; then - AC_DEFINE([JE_FORCE_SYNC_COMPARE_AND_SWAP_$2], [ ]) - fi -]) - -if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then - JE_SYNC_COMPARE_AND_SWAP_CHECK(32, 4) - JE_SYNC_COMPARE_AND_SWAP_CHECK(64, 8) -fi - -dnl ============================================================================ -dnl Check for spinlock(3) operations as provided on Darwin. - -JE_COMPILABLE([Darwin OSSpin*()], [ -#include -#include -], [ - OSSpinLock lock = 0; - OSSpinLockLock(&lock); - OSSpinLockUnlock(&lock); -], [je_cv_osspin]) -if test "x${je_cv_osspin}" = "xyes" ; then - AC_DEFINE([JEMALLOC_OSSPIN], [ ]) -fi - -dnl ============================================================================ -dnl Darwin-related configuration. - -AC_ARG_ENABLE([zone-allocator], - [AS_HELP_STRING([--disable-zone-allocator], - [Disable zone allocator for Darwin])], -[if test "x$enable_zone_allocator" = "xno" ; then - enable_zone_allocator="0" -else - enable_zone_allocator="1" -fi -], -[if test "x${abi}" = "xmacho"; then - enable_zone_allocator="1" -fi -] -) -AC_SUBST([enable_zone_allocator]) - -if test "x${enable_zone_allocator}" = "x1" ; then - if test "x${abi}" != "xmacho"; then - AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin]) - fi - AC_DEFINE([JEMALLOC_IVSALLOC], [ ]) - AC_DEFINE([JEMALLOC_ZONE], [ ]) - - dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6 - dnl releases. malloc_zone_t and malloc_introspection_t have new fields in - dnl 10.6, which is the only source-level indication of the change. - AC_MSG_CHECKING([malloc zone version]) - AC_DEFUN([JE_ZONE_PROGRAM], - [AC_LANG_PROGRAM( - [#include ], - [static foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]] - )]) - - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,14)],[JEMALLOC_ZONE_VERSION=3],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,15)],[JEMALLOC_ZONE_VERSION=5],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,16)],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,9)],[JEMALLOC_ZONE_VERSION=6],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,13)],[JEMALLOC_ZONE_VERSION=7],[JEMALLOC_ZONE_VERSION=] - )])],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,17)],[JEMALLOC_ZONE_VERSION=8],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,>,17)],[JEMALLOC_ZONE_VERSION=9],[JEMALLOC_ZONE_VERSION=] - )])])])]) - if test "x${JEMALLOC_ZONE_VERSION}" = "x"; then - AC_MSG_RESULT([unsupported]) - AC_MSG_ERROR([Unsupported malloc zone version]) - fi - if test "${JEMALLOC_ZONE_VERSION}" = 9; then - JEMALLOC_ZONE_VERSION=8 - AC_MSG_RESULT([> 8]) - else - AC_MSG_RESULT([$JEMALLOC_ZONE_VERSION]) - fi - AC_DEFINE_UNQUOTED(JEMALLOC_ZONE_VERSION, [$JEMALLOC_ZONE_VERSION]) -fi - -dnl ============================================================================ -dnl Check for typedefs, structures, and compiler characteristics. -AC_HEADER_STDBOOL - -dnl ============================================================================ -dnl Define commands that generate output files. - -AC_CONFIG_COMMANDS([include/jemalloc/internal/private_namespace.h], [ - mkdir -p "${objroot}include/jemalloc/internal" - "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/internal/private_unnamespace.h], [ - mkdir -p "${objroot}include/jemalloc/internal" - "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [ - f="${objroot}include/jemalloc/internal/public_symbols.txt" - mkdir -p "${objroot}include/jemalloc/internal" - cp /dev/null "${f}" - for nm in `echo ${mangling_map} |tr ',' ' '` ; do - n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'` - m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'` - echo "${n}:${m}" >> "${f}" - dnl Remove name from public_syms so that it isn't redefined later. - public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` - done - for sym in ${public_syms} ; do - n="${sym}" - m="${JEMALLOC_PREFIX}${sym}" - echo "${n}:${m}" >> "${f}" - done -], [ - srcdir="${srcdir}" - objroot="${objroot}" - mangling_map="${mangling_map}" - public_syms="${public_syms}" - JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [ - mkdir -p "${objroot}include/jemalloc/internal" - "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [ - mkdir -p "${objroot}include/jemalloc/internal" - "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [ - mkdir -p "${objroot}include/jemalloc/internal" - "${srcdir}/include/jemalloc/internal/size_classes.sh" > "${objroot}include/jemalloc/internal/size_classes.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [ - mkdir -p "${objroot}include/jemalloc" - cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [ - mkdir -p "${objroot}include/jemalloc" - "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [ - mkdir -p "${objroot}include/jemalloc" - "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [ - mkdir -p "${objroot}include/jemalloc" - "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [ - mkdir -p "${objroot}include/jemalloc" - "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" - install_suffix="${install_suffix}" -]) - -dnl Process .in files. -AC_SUBST([cfghdrs_in]) -AC_SUBST([cfghdrs_out]) -AC_CONFIG_HEADERS([$cfghdrs_tup]) - -dnl ============================================================================ -dnl Generate outputs. - -AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc.sh]) -AC_SUBST([cfgoutputs_in]) -AC_SUBST([cfgoutputs_out]) -AC_OUTPUT - -dnl ============================================================================ -dnl Print out the results of configuration. -AC_MSG_RESULT([===============================================================================]) -AC_MSG_RESULT([jemalloc version : ${jemalloc_version}]) -AC_MSG_RESULT([library revision : ${rev}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([CC : ${CC}]) -AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}]) -AC_MSG_RESULT([CFLAGS : ${CFLAGS}]) -AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}]) -AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}]) -AC_MSG_RESULT([LIBS : ${LIBS}]) -AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}]) -AC_MSG_RESULT([XSLROOT : ${XSLROOT}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([PREFIX : ${PREFIX}]) -AC_MSG_RESULT([BINDIR : ${BINDIR}]) -AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}]) -AC_MSG_RESULT([LIBDIR : ${LIBDIR}]) -AC_MSG_RESULT([DATADIR : ${DATADIR}]) -AC_MSG_RESULT([MANDIR : ${MANDIR}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([srcroot : ${srcroot}]) -AC_MSG_RESULT([abs_srcroot : ${abs_srcroot}]) -AC_MSG_RESULT([objroot : ${objroot}]) -AC_MSG_RESULT([abs_objroot : ${abs_objroot}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}]) -AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE]) -AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}]) -AC_MSG_RESULT([install_suffix : ${install_suffix}]) -AC_MSG_RESULT([autogen : ${enable_autogen}]) -AC_MSG_RESULT([experimental : ${enable_experimental}]) -AC_MSG_RESULT([cc-silence : ${enable_cc_silence}]) -AC_MSG_RESULT([debug : ${enable_debug}]) -AC_MSG_RESULT([code-coverage : ${enable_code_coverage}]) -AC_MSG_RESULT([stats : ${enable_stats}]) -AC_MSG_RESULT([prof : ${enable_prof}]) -AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}]) -AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}]) -AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}]) -AC_MSG_RESULT([tcache : ${enable_tcache}]) -AC_MSG_RESULT([fill : ${enable_fill}]) -AC_MSG_RESULT([utrace : ${enable_utrace}]) -AC_MSG_RESULT([valgrind : ${enable_valgrind}]) -AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) -AC_MSG_RESULT([mremap : ${enable_mremap}]) -AC_MSG_RESULT([munmap : ${enable_munmap}]) -AC_MSG_RESULT([dss : ${enable_dss}]) -AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) -AC_MSG_RESULT([tls : ${enable_tls}]) -AC_MSG_RESULT([===============================================================================]) diff --git a/lib/jemalloc-3.6.0/coverage.sh b/lib/jemalloc-3.6.0/coverage.sh deleted file mode 100755 index 6d1362a..0000000 --- a/lib/jemalloc-3.6.0/coverage.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh - -set -e - -objdir=$1 -suffix=$2 -shift 2 -objs=$@ - -gcov -b -p -f -o "${objdir}" ${objs} - -# Move gcov outputs so that subsequent gcov invocations won't clobber results -# for the same sources with different compilation flags. -for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do - mv "${f}" "${f}.${suffix}" -done diff --git a/lib/jemalloc-3.6.0/doc/html.xsl.in b/lib/jemalloc-3.6.0/doc/html.xsl.in deleted file mode 100644 index a91d974..0000000 --- a/lib/jemalloc-3.6.0/doc/html.xsl.in +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/lib/jemalloc-3.6.0/doc/jemalloc.xml.in b/lib/jemalloc-3.6.0/doc/jemalloc.xml.in deleted file mode 100644 index d8e2e71..0000000 --- a/lib/jemalloc-3.6.0/doc/jemalloc.xml.in +++ /dev/null @@ -1,2345 +0,0 @@ - - - - - - - User Manual - jemalloc - @jemalloc_version@ - - - Jason - Evans - Author - - - - - JEMALLOC - 3 - - - jemalloc - jemalloc - - general purpose memory allocation functions - - - LIBRARY - This manual describes jemalloc @jemalloc_version@. More information - can be found at the jemalloc website. - - - SYNOPSIS - - #include <stdlib.h> -#include <jemalloc/jemalloc.h> - - Standard API - - void *malloc - size_t size - - - void *calloc - size_t number - size_t size - - - int posix_memalign - void **ptr - size_t alignment - size_t size - - - void *aligned_alloc - size_t alignment - size_t size - - - void *realloc - void *ptr - size_t size - - - void free - void *ptr - - - - Non-standard API - - void *mallocx - size_t size - int flags - - - void *rallocx - void *ptr - size_t size - int flags - - - size_t xallocx - void *ptr - size_t size - size_t extra - int flags - - - size_t sallocx - void *ptr - int flags - - - void dallocx - void *ptr - int flags - - - size_t nallocx - size_t size - int flags - - - int mallctl - const char *name - void *oldp - size_t *oldlenp - void *newp - size_t newlen - - - int mallctlnametomib - const char *name - size_t *mibp - size_t *miblenp - - - int mallctlbymib - const size_t *mib - size_t miblen - void *oldp - size_t *oldlenp - void *newp - size_t newlen - - - void malloc_stats_print - void (*write_cb) - void *, const char * - - void *cbopaque - const char *opts - - - size_t malloc_usable_size - const void *ptr - - - void (*malloc_message) - void *cbopaque - const char *s - - const char *malloc_conf; - - - Experimental API - - int allocm - void **ptr - size_t *rsize - size_t size - int flags - - - int rallocm - void **ptr - size_t *rsize - size_t size - size_t extra - int flags - - - int sallocm - const void *ptr - size_t *rsize - int flags - - - int dallocm - void *ptr - int flags - - - int nallocm - size_t *rsize - size_t size - int flags - - - - - - DESCRIPTION - - Standard API - - The malloc function allocates - size bytes of uninitialized memory. The allocated - space is suitably aligned (after possible pointer coercion) for storage - of any type of object. - - The calloc function allocates - space for number objects, each - size bytes in length. The result is identical to - calling malloc with an argument of - number * size, with the - exception that the allocated memory is explicitly initialized to zero - bytes. - - The posix_memalign function - allocates size bytes of memory such that the - allocation's base address is an even multiple of - alignment, and returns the allocation in the value - pointed to by ptr. The requested - alignment must be a power of 2 at least as large - as sizeof(void *). - - The aligned_alloc function - allocates size bytes of memory such that the - allocation's base address is an even multiple of - alignment. The requested - alignment must be a power of 2. Behavior is - undefined if size is not an integral multiple of - alignment. - - The realloc function changes the - size of the previously allocated memory referenced by - ptr to size bytes. The - contents of the memory are unchanged up to the lesser of the new and old - sizes. If the new size is larger, the contents of the newly allocated - portion of the memory are undefined. Upon success, the memory referenced - by ptr is freed and a pointer to the newly - allocated memory is returned. Note that - realloc may move the memory allocation, - resulting in a different return value than ptr. - If ptr is NULL, the - realloc function behaves identically to - malloc for the specified size. - - The free function causes the - allocated memory referenced by ptr to be made - available for future allocations. If ptr is - NULL, no action occurs. - - - Non-standard API - The mallocx, - rallocx, - xallocx, - sallocx, - dallocx, and - nallocx functions all have a - flags argument that can be used to specify - options. The functions only check the options that are contextually - relevant. Use bitwise or (|) operations to - specify one or more of the following: - - - MALLOCX_LG_ALIGN(la) - - - Align the memory allocation to start at an address - that is a multiple of (1 << - la). This macro does not validate - that la is within the valid - range. - - - MALLOCX_ALIGN(a) - - - Align the memory allocation to start at an address - that is a multiple of a, where - a is a power of two. This macro does not - validate that a is a power of 2. - - - - MALLOCX_ZERO - - Initialize newly allocated memory to contain zero - bytes. In the growing reallocation case, the real size prior to - reallocation defines the boundary between untouched bytes and those - that are initialized to contain zero bytes. If this macro is - absent, newly allocated memory is uninitialized. - - - MALLOCX_ARENA(a) - - - Use the arena specified by the index - a (and by necessity bypass the thread - cache). This macro has no effect for huge regions, nor for regions - that were allocated via an arena other than the one specified. - This macro does not validate that a - specifies an arena index in the valid range. - - - - - The mallocx function allocates at - least size bytes of memory, and returns a pointer - to the base address of the allocation. Behavior is undefined if - size is 0, or if request size - overflows due to size class and/or alignment constraints. - - The rallocx function resizes the - allocation at ptr to be at least - size bytes, and returns a pointer to the base - address of the resulting allocation, which may or may not have moved from - its original location. Behavior is undefined if - size is 0, or if request size - overflows due to size class and/or alignment constraints. - - The xallocx function resizes the - allocation at ptr in place to be at least - size bytes, and returns the real size of the - allocation. If extra is non-zero, an attempt is - made to resize the allocation to be at least (size + - extra) bytes, though inability to allocate - the extra byte(s) will not by itself result in failure to resize. - Behavior is undefined if size is - 0, or if (size + extra - > SIZE_T_MAX). - - The sallocx function returns the - real size of the allocation at ptr. - - The dallocx function causes the - memory referenced by ptr to be made available for - future allocations. - - The nallocx function allocates no - memory, but it performs the same size computation as the - mallocx function, and returns the real - size of the allocation that would result from the equivalent - mallocx function call. Behavior is - undefined if size is 0, or if - request size overflows due to size class and/or alignment - constraints. - - The mallctl function provides a - general interface for introspecting the memory allocator, as well as - setting modifiable parameters and triggering actions. The - period-separated name argument specifies a - location in a tree-structured namespace; see the section for - documentation on the tree contents. To read a value, pass a pointer via - oldp to adequate space to contain the value, and a - pointer to its length via oldlenp; otherwise pass - NULL and NULL. Similarly, to - write a value, pass a pointer to the value via - newp, and its length via - newlen; otherwise pass NULL - and 0. - - The mallctlnametomib function - provides a way to avoid repeated name lookups for applications that - repeatedly query the same portion of the namespace, by translating a name - to a “Management Information Base” (MIB) that can be passed - repeatedly to mallctlbymib. Upon - successful return from mallctlnametomib, - mibp contains an array of - *miblenp integers, where - *miblenp is the lesser of the number of components - in name and the input value of - *miblenp. Thus it is possible to pass a - *miblenp that is smaller than the number of - period-separated name components, which results in a partial MIB that can - be used as the basis for constructing a complete MIB. For name - components that are integers (e.g. the 2 in - arenas.bin.2.size), - the corresponding MIB component will always be that integer. Therefore, - it is legitimate to construct code like the following: - - The malloc_stats_print function - writes human-readable summary statistics via the - write_cb callback function pointer and - cbopaque data passed to - write_cb, or - malloc_message if - write_cb is NULL. This - function can be called repeatedly. General information that never - changes during execution can be omitted by specifying "g" as a character - within the opts string. Note that - malloc_message uses the - mallctl* functions internally, so - inconsistent statistics can be reported if multiple threads use these - functions simultaneously. If is - specified during configuration, “m” and “a” can - be specified to omit merged arena and per arena statistics, respectively; - “b” and “l” can be specified to omit per size - class statistics for bins and large objects, respectively. Unrecognized - characters are silently ignored. Note that thread caching may prevent - some statistics from being completely up to date, since extra locking - would be required to merge counters that track thread cache operations. - - - The malloc_usable_size function - returns the usable size of the allocation pointed to by - ptr. The return value may be larger than the size - that was requested during allocation. The - malloc_usable_size function is not a - mechanism for in-place realloc; rather - it is provided solely as a tool for introspection purposes. Any - discrepancy between the requested allocation size and the size reported - by malloc_usable_size should not be - depended on, since such behavior is entirely implementation-dependent. - - - - Experimental API - The experimental API is subject to change or removal without regard - for backward compatibility. If - is specified during configuration, the experimental API is - omitted. - - The allocm, - rallocm, - sallocm, - dallocm, and - nallocm functions all have a - flags argument that can be used to specify - options. The functions only check the options that are contextually - relevant. Use bitwise or (|) operations to - specify one or more of the following: - - - ALLOCM_LG_ALIGN(la) - - - Align the memory allocation to start at an address - that is a multiple of (1 << - la). This macro does not validate - that la is within the valid - range. - - - ALLOCM_ALIGN(a) - - - Align the memory allocation to start at an address - that is a multiple of a, where - a is a power of two. This macro does not - validate that a is a power of 2. - - - - ALLOCM_ZERO - - Initialize newly allocated memory to contain zero - bytes. In the growing reallocation case, the real size prior to - reallocation defines the boundary between untouched bytes and those - that are initialized to contain zero bytes. If this macro is - absent, newly allocated memory is uninitialized. - - - ALLOCM_NO_MOVE - - For reallocation, fail rather than moving the - object. This constraint can apply to both growth and - shrinkage. - - - ALLOCM_ARENA(a) - - - Use the arena specified by the index - a (and by necessity bypass the thread - cache). This macro has no effect for huge regions, nor for regions - that were allocated via an arena other than the one specified. - This macro does not validate that a - specifies an arena index in the valid range. - - - - - The allocm function allocates at - least size bytes of memory, sets - *ptr to the base address of the allocation, and - sets *rsize to the real size of the allocation if - rsize is not NULL. Behavior - is undefined if size is 0, or - if request size overflows due to size class and/or alignment - constraints. - - The rallocm function resizes the - allocation at *ptr to be at least - size bytes, sets *ptr to - the base address of the allocation if it moved, and sets - *rsize to the real size of the allocation if - rsize is not NULL. If - extra is non-zero, an attempt is made to resize - the allocation to be at least (size + - extra) bytes, though inability to allocate - the extra byte(s) will not by itself result in failure. Behavior is - undefined if size is 0, if - request size overflows due to size class and/or alignment constraints, or - if (size + - extra > - SIZE_T_MAX). - - The sallocm function sets - *rsize to the real size of the allocation. - - The dallocm function causes the - memory referenced by ptr to be made available for - future allocations. - - The nallocm function allocates no - memory, but it performs the same size computation as the - allocm function, and if - rsize is not NULL it sets - *rsize to the real size of the allocation that - would result from the equivalent allocm - function call. Behavior is undefined if size is - 0, or if request size overflows due to size class - and/or alignment constraints. - - - - TUNING - Once, when the first call is made to one of the memory allocation - routines, the allocator initializes its internals based in part on various - options that can be specified at compile- or run-time. - - The string pointed to by the global variable - malloc_conf, the “name” of the file - referenced by the symbolic link named /etc/malloc.conf, and the value of the - environment variable MALLOC_CONF, will be interpreted, in - that order, from left to right as options. Note that - malloc_conf may be read before - main is entered, so the declaration of - malloc_conf should specify an initializer that contains - the final value to be read by jemalloc. malloc_conf is - a compile-time setting, whereas /etc/malloc.conf and MALLOC_CONF - can be safely set any time prior to program invocation. - - An options string is a comma-separated list of option:value pairs. - There is one key corresponding to each opt.* mallctl (see the section for options - documentation). For example, abort:true,narenas:1 sets - the opt.abort and opt.narenas options. Some - options have boolean values (true/false), others have integer values (base - 8, 10, or 16, depending on prefix), and yet others have raw string - values. - - - IMPLEMENTATION NOTES - Traditionally, allocators have used - sbrk - 2 to obtain memory, which is - suboptimal for several reasons, including race conditions, increased - fragmentation, and artificial limitations on maximum usable memory. If - is specified during configuration, this - allocator uses both mmap - 2 and - sbrk - 2, in that order of preference; - otherwise only mmap - 2 is used. - - This allocator uses multiple arenas in order to reduce lock - contention for threaded programs on multi-processor systems. This works - well with regard to threading scalability, but incurs some costs. There is - a small fixed per-arena overhead, and additionally, arenas manage memory - completely independently of each other, which means a small fixed increase - in overall memory fragmentation. These overheads are not generally an - issue, given the number of arenas normally used. Note that using - substantially more arenas than the default is not likely to improve - performance, mainly due to reduced cache performance. However, it may make - sense to reduce the number of arenas if an application does not make much - use of the allocation functions. - - In addition to multiple arenas, unless - is specified during configuration, this - allocator supports thread-specific caching for small and large objects, in - order to make it possible to completely avoid synchronization for most - allocation requests. Such caching allows very fast allocation in the - common case, but it increases memory usage and fragmentation, since a - bounded number of objects can remain allocated in each thread cache. - - Memory is conceptually broken into equal-sized chunks, where the - chunk size is a power of two that is greater than the page size. Chunks - are always aligned to multiples of the chunk size. This alignment makes it - possible to find metadata for user objects very quickly. - - User objects are broken into three categories according to size: - small, large, and huge. Small objects are smaller than one page. Large - objects are smaller than the chunk size. Huge objects are a multiple of - the chunk size. Small and large objects are managed by arenas; huge - objects are managed separately in a single data structure that is shared by - all threads. Huge objects are used by applications infrequently enough - that this single data structure is not a scalability issue. - - Each chunk that is managed by an arena tracks its contents as runs of - contiguous pages (unused, backing a set of small objects, or backing one - large object). The combination of chunk alignment and chunk page maps - makes it possible to determine all metadata regarding small and large - allocations in constant time. - - Small objects are managed in groups by page runs. Each run maintains - a frontier and free list to track which regions are in use. Allocation - requests that are no more than half the quantum (8 or 16, depending on - architecture) are rounded up to the nearest power of two that is at least - sizeof(double). All other small - object size classes are multiples of the quantum, spaced such that internal - fragmentation is limited to approximately 25% for all but the smallest size - classes. Allocation requests that are larger than the maximum small size - class, but small enough to fit in an arena-managed chunk (see the opt.lg_chunk option), are - rounded up to the nearest run size. Allocation requests that are too large - to fit in an arena-managed chunk are rounded up to the nearest multiple of - the chunk size. - - Allocations are packed tightly together, which can be an issue for - multi-threaded applications. If you need to assure that allocations do not - suffer from cacheline sharing, round your allocation requests up to the - nearest multiple of the cacheline size, or specify cacheline alignment when - allocating. - - Assuming 4 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit - system, the size classes in each category are as shown in . - - - Size classes - - - - - - - Category - Spacing - Size - - - - - Small - lg - [8] - - - 16 - [16, 32, 48, ..., 128] - - - 32 - [160, 192, 224, 256] - - - 64 - [320, 384, 448, 512] - - - 128 - [640, 768, 896, 1024] - - - 256 - [1280, 1536, 1792, 2048] - - - 512 - [2560, 3072, 3584] - - - Large - 4 KiB - [4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB] - - - Huge - 4 MiB - [4 MiB, 8 MiB, 12 MiB, ...] - - - -
-
- - MALLCTL NAMESPACE - The following names are defined in the namespace accessible via the - mallctl* functions. Value types are - specified in parentheses, their readable/writable statuses are encoded as - rw, r-, -w, or - --, and required build configuration flags follow, if - any. A name element encoded as <i> or - <j> indicates an integer component, where the - integer varies from 0 to some upper value that must be determined via - introspection. In the case of stats.arenas.<i>.*, - <i> equal to arenas.narenas can be - used to access the summation of statistics from all arenas. Take special - note of the epoch mallctl, - which controls refreshing of cached dynamic statistics. - - - - - version - (const char *) - r- - - Return the jemalloc version string. - - - - - epoch - (uint64_t) - rw - - If a value is passed in, refresh the data from which - the mallctl* functions report values, - and increment the epoch. Return the current epoch. This is useful for - detecting whether another thread caused a refresh. - - - - - config.debug - (bool) - r- - - was specified during - build configuration. - - - - - config.dss - (bool) - r- - - was specified during - build configuration. - - - - - config.fill - (bool) - r- - - was specified during - build configuration. - - - - - config.lazy_lock - (bool) - r- - - was specified - during build configuration. - - - - - config.mremap - (bool) - r- - - was specified during - build configuration. - - - - - config.munmap - (bool) - r- - - was specified during - build configuration. - - - - - config.prof - (bool) - r- - - was specified during - build configuration. - - - - - config.prof_libgcc - (bool) - r- - - was not - specified during build configuration. - - - - - config.prof_libunwind - (bool) - r- - - was specified - during build configuration. - - - - - config.stats - (bool) - r- - - was specified during - build configuration. - - - - - config.tcache - (bool) - r- - - was not specified - during build configuration. - - - - - config.tls - (bool) - r- - - was not specified during - build configuration. - - - - - config.utrace - (bool) - r- - - was specified during - build configuration. - - - - - config.valgrind - (bool) - r- - - was specified during - build configuration. - - - - - config.xmalloc - (bool) - r- - - was specified during - build configuration. - - - - - opt.abort - (bool) - r- - - Abort-on-warning enabled/disabled. If true, most - warnings are fatal. The process will call - abort - 3 in these cases. This option is - disabled by default unless is - specified during configuration, in which case it is enabled by default. - - - - - - opt.dss - (const char *) - r- - - dss (sbrk - 2) allocation precedence as - related to mmap - 2 allocation. The following - settings are supported: “disabled”, “primary”, - and “secondary”. The default is “secondary” if - config.dss is - true, “disabled” otherwise. - - - - - - opt.lg_chunk - (size_t) - r- - - Virtual memory chunk size (log base 2). If a chunk - size outside the supported size range is specified, the size is - silently clipped to the minimum/maximum supported size. The default - chunk size is 4 MiB (2^22). - - - - - - opt.narenas - (size_t) - r- - - Maximum number of arenas to use for automatic - multiplexing of threads and arenas. The default is four times the - number of CPUs, or one if there is a single CPU. - - - - - opt.lg_dirty_mult - (ssize_t) - r- - - Per-arena minimum ratio (log base 2) of active to dirty - pages. Some dirty unused pages may be allowed to accumulate, within - the limit set by the ratio (or one chunk worth of dirty pages, - whichever is greater), before informing the kernel about some of those - pages via madvise - 2 or a similar system call. This - provides the kernel with sufficient information to recycle dirty pages - if physical memory becomes scarce and the pages remain unused. The - default minimum ratio is 8:1 (2^3:1); an option value of -1 will - disable dirty page purging. - - - - - opt.stats_print - (bool) - r- - - Enable/disable statistics printing at exit. If - enabled, the malloc_stats_print - function is called at program exit via an - atexit - 3 function. If - is specified during configuration, this - has the potential to cause deadlock for a multi-threaded process that - exits while one or more threads are executing in the memory allocation - functions. Therefore, this option should only be used with care; it is - primarily intended as a performance tuning aid during application - development. This option is disabled by default. - - - - - opt.junk - (bool) - r- - [] - - Junk filling enabled/disabled. If enabled, each byte - of uninitialized allocated memory will be initialized to - 0xa5. All deallocated memory will be initialized to - 0x5a. This is intended for debugging and will - impact performance negatively. This option is disabled by default - unless is specified during - configuration, in which case it is enabled by default unless running - inside Valgrind. - - - - - opt.quarantine - (size_t) - r- - [] - - Per thread quarantine size in bytes. If non-zero, each - thread maintains a FIFO object quarantine that stores up to the - specified number of bytes of memory. The quarantined memory is not - freed until it is released from quarantine, though it is immediately - junk-filled if the opt.junk option is - enabled. This feature is of particular use in combination with Valgrind, which can detect attempts - to access quarantined objects. This is intended for debugging and will - impact performance negatively. The default quarantine size is 0 unless - running inside Valgrind, in which case the default is 16 - MiB. - - - - - opt.redzone - (bool) - r- - [] - - Redzones enabled/disabled. If enabled, small - allocations have redzones before and after them. Furthermore, if the - opt.junk option is - enabled, the redzones are checked for corruption during deallocation. - However, the primary intended purpose of this feature is to be used in - combination with Valgrind, - which needs redzones in order to do effective buffer overflow/underflow - detection. This option is intended for debugging and will impact - performance negatively. This option is disabled by - default unless running inside Valgrind. - - - - - opt.zero - (bool) - r- - [] - - Zero filling enabled/disabled. If enabled, each byte - of uninitialized allocated memory will be initialized to 0. Note that - this initialization only happens once for each byte, so - realloc, - rallocx and - rallocm calls do not zero memory that - was previously allocated. This is intended for debugging and will - impact performance negatively. This option is disabled by default. - - - - - - opt.utrace - (bool) - r- - [] - - Allocation tracing based on - utrace - 2 enabled/disabled. This option - is disabled by default. - - - - - opt.valgrind - (bool) - r- - [] - - Valgrind - support enabled/disabled. This option is vestigal because jemalloc - auto-detects whether it is running inside Valgrind. This option is - disabled by default, unless running inside Valgrind. - - - - - opt.xmalloc - (bool) - r- - [] - - Abort-on-out-of-memory enabled/disabled. If enabled, - rather than returning failure for any allocation function, display a - diagnostic message on STDERR_FILENO and cause the - program to drop core (using - abort - 3). If an application is - designed to depend on this behavior, set the option at compile time by - including the following in the source code: - - This option is disabled by default. - - - - - opt.tcache - (bool) - r- - [] - - Thread-specific caching enabled/disabled. When there - are multiple threads, each thread uses a thread-specific cache for - objects up to a certain size. Thread-specific caching allows many - allocations to be satisfied without performing any thread - synchronization, at the cost of increased memory use. See the - opt.lg_tcache_max - option for related tuning information. This option is enabled by - default unless running inside Valgrind. - - - - - opt.lg_tcache_max - (size_t) - r- - [] - - Maximum size class (log base 2) to cache in the - thread-specific cache. At a minimum, all small size classes are - cached, and at a maximum all large size classes are cached. The - default maximum is 32 KiB (2^15). - - - - - opt.prof - (bool) - r- - [] - - Memory profiling enabled/disabled. If enabled, profile - memory allocation activity. See the opt.prof_active - option for on-the-fly activation/deactivation. See the opt.lg_prof_sample - option for probabilistic sampling control. See the opt.prof_accum - option for control of cumulative sample reporting. See the opt.lg_prof_interval - option for information on interval-triggered profile dumping, the opt.prof_gdump - option for information on high-water-triggered profile dumping, and the - opt.prof_final - option for final profile dumping. Profile output is compatible with - the included pprof Perl script, which originates - from the gperftools - package. - - - - - opt.prof_prefix - (const char *) - r- - [] - - Filename prefix for profile dumps. If the prefix is - set to the empty string, no automatic dumps will occur; this is - primarily useful for disabling the automatic final heap dump (which - also disables leak reporting, if enabled). The default prefix is - jeprof. - - - - - opt.prof_active - (bool) - rw - [] - - Profiling activated/deactivated. This is a secondary - control mechanism that makes it possible to start the application with - profiling enabled (see the opt.prof option) but - inactive, then toggle profiling at any time during program execution - with the prof.active mallctl. - This option is enabled by default. - - - - - opt.lg_prof_sample - (ssize_t) - r- - [] - - Average interval (log base 2) between allocation - samples, as measured in bytes of allocation activity. Increasing the - sampling interval decreases profile fidelity, but also decreases the - computational overhead. The default sample interval is 512 KiB (2^19 - B). - - - - - opt.prof_accum - (bool) - r- - [] - - Reporting of cumulative object/byte counts in profile - dumps enabled/disabled. If this option is enabled, every unique - backtrace must be stored for the duration of execution. Depending on - the application, this can impose a large memory overhead, and the - cumulative counts are not always of interest. This option is disabled - by default. - - - - - opt.lg_prof_interval - (ssize_t) - r- - [] - - Average interval (log base 2) between memory profile - dumps, as measured in bytes of allocation activity. The actual - interval between dumps may be sporadic because decentralized allocation - counters are used to avoid synchronization bottlenecks. Profiles are - dumped to files named according to the pattern - <prefix>.<pid>.<seq>.i<iseq>.heap, - where <prefix> is controlled by the - opt.prof_prefix - option. By default, interval-triggered profile dumping is disabled - (encoded as -1). - - - - - - opt.prof_gdump - (bool) - r- - [] - - Trigger a memory profile dump every time the total - virtual memory exceeds the previous maximum. Profiles are dumped to - files named according to the pattern - <prefix>.<pid>.<seq>.u<useq>.heap, - where <prefix> is controlled by the opt.prof_prefix - option. This option is disabled by default. - - - - - opt.prof_final - (bool) - r- - [] - - Use an - atexit - 3 function to dump final memory - usage to a file named according to the pattern - <prefix>.<pid>.<seq>.f.heap, - where <prefix> is controlled by the opt.prof_prefix - option. This option is enabled by default. - - - - - opt.prof_leak - (bool) - r- - [] - - Leak reporting enabled/disabled. If enabled, use an - atexit - 3 function to report memory leaks - detected by allocation sampling. See the - opt.prof option for - information on analyzing heap profile output. This option is disabled - by default. - - - - - thread.arena - (unsigned) - rw - - Get or set the arena associated with the calling - thread. If the specified arena was not initialized beforehand (see the - arenas.initialized - mallctl), it will be automatically initialized as a side effect of - calling this interface. - - - - - thread.allocated - (uint64_t) - r- - [] - - Get the total number of bytes ever allocated by the - calling thread. This counter has the potential to wrap around; it is - up to the application to appropriately interpret the counter in such - cases. - - - - - thread.allocatedp - (uint64_t *) - r- - [] - - Get a pointer to the the value that is returned by the - thread.allocated - mallctl. This is useful for avoiding the overhead of repeated - mallctl* calls. - - - - - thread.deallocated - (uint64_t) - r- - [] - - Get the total number of bytes ever deallocated by the - calling thread. This counter has the potential to wrap around; it is - up to the application to appropriately interpret the counter in such - cases. - - - - - thread.deallocatedp - (uint64_t *) - r- - [] - - Get a pointer to the the value that is returned by the - thread.deallocated - mallctl. This is useful for avoiding the overhead of repeated - mallctl* calls. - - - - - thread.tcache.enabled - (bool) - rw - [] - - Enable/disable calling thread's tcache. The tcache is - implicitly flushed as a side effect of becoming - disabled (see thread.tcache.flush). - - - - - - thread.tcache.flush - (void) - -- - [] - - Flush calling thread's tcache. This interface releases - all cached objects and internal data structures associated with the - calling thread's thread-specific cache. Ordinarily, this interface - need not be called, since automatic periodic incremental garbage - collection occurs, and the thread cache is automatically discarded when - a thread exits. However, garbage collection is triggered by allocation - activity, so it is possible for a thread that stops - allocating/deallocating to retain its cache indefinitely, in which case - the developer may find manual flushing useful. - - - - - arena.<i>.purge - (unsigned) - -- - - Purge unused dirty pages for arena <i>, or for - all arenas if <i> equals arenas.narenas. - - - - - - arena.<i>.dss - (const char *) - rw - - Set the precedence of dss allocation as related to mmap - allocation for arena <i>, or for all arenas if <i> equals - arenas.narenas. Note - that even during huge allocation this setting is read from the arena - that would be chosen for small or large allocation so that applications - can depend on consistent dss versus mmap allocation regardless of - allocation size. See opt.dss for supported - settings. - - - - - - arenas.narenas - (unsigned) - r- - - Current limit on number of arenas. - - - - - arenas.initialized - (bool *) - r- - - An array of arenas.narenas - booleans. Each boolean indicates whether the corresponding arena is - initialized. - - - - - arenas.quantum - (size_t) - r- - - Quantum size. - - - - - arenas.page - (size_t) - r- - - Page size. - - - - - arenas.tcache_max - (size_t) - r- - [] - - Maximum thread-cached size class. - - - - - arenas.nbins - (unsigned) - r- - - Number of bin size classes. - - - - - arenas.nhbins - (unsigned) - r- - [] - - Total number of thread cache bin size - classes. - - - - - arenas.bin.<i>.size - (size_t) - r- - - Maximum size supported by size class. - - - - - arenas.bin.<i>.nregs - (uint32_t) - r- - - Number of regions per page run. - - - - - arenas.bin.<i>.run_size - (size_t) - r- - - Number of bytes per page run. - - - - - arenas.nlruns - (size_t) - r- - - Total number of large size classes. - - - - - arenas.lrun.<i>.size - (size_t) - r- - - Maximum size supported by this large size - class. - - - - - arenas.purge - (unsigned) - -w - - Purge unused dirty pages for the specified arena, or - for all arenas if none is specified. - - - - - arenas.extend - (unsigned) - r- - - Extend the array of arenas by appending a new arena, - and returning the new arena index. - - - - - prof.active - (bool) - rw - [] - - Control whether sampling is currently active. See the - opt.prof_active - option for additional information. - - - - - - prof.dump - (const char *) - -w - [] - - Dump a memory profile to the specified file, or if NULL - is specified, to a file according to the pattern - <prefix>.<pid>.<seq>.m<mseq>.heap, - where <prefix> is controlled by the - opt.prof_prefix - option. - - - - - prof.interval - (uint64_t) - r- - [] - - Average number of bytes allocated between - inverval-based profile dumps. See the - opt.lg_prof_interval - option for additional information. - - - - - stats.cactive - (size_t *) - r- - [] - - Pointer to a counter that contains an approximate count - of the current number of bytes in active pages. The estimate may be - high, but never low, because each arena rounds up to the nearest - multiple of the chunk size when computing its contribution to the - counter. Note that the epoch mallctl has no bearing - on this counter. Furthermore, counter consistency is maintained via - atomic operations, so it is necessary to use an atomic operation in - order to guarantee a consistent read when dereferencing the pointer. - - - - - - stats.allocated - (size_t) - r- - [] - - Total number of bytes allocated by the - application. - - - - - stats.active - (size_t) - r- - [] - - Total number of bytes in active pages allocated by the - application. This is a multiple of the page size, and greater than or - equal to stats.allocated. - This does not include - stats.arenas.<i>.pdirty and pages - entirely devoted to allocator metadata. - - - - - stats.mapped - (size_t) - r- - [] - - Total number of bytes in chunks mapped on behalf of the - application. This is a multiple of the chunk size, and is at least as - large as stats.active. This - does not include inactive chunks. - - - - - stats.chunks.current - (size_t) - r- - [] - - Total number of chunks actively mapped on behalf of the - application. This does not include inactive chunks. - - - - - - stats.chunks.total - (uint64_t) - r- - [] - - Cumulative number of chunks allocated. - - - - - stats.chunks.high - (size_t) - r- - [] - - Maximum number of active chunks at any time thus far. - - - - - - stats.huge.allocated - (size_t) - r- - [] - - Number of bytes currently allocated by huge objects. - - - - - - stats.huge.nmalloc - (uint64_t) - r- - [] - - Cumulative number of huge allocation requests. - - - - - - stats.huge.ndalloc - (uint64_t) - r- - [] - - Cumulative number of huge deallocation requests. - - - - - - stats.arenas.<i>.dss - (const char *) - r- - - dss (sbrk - 2) allocation precedence as - related to mmap - 2 allocation. See opt.dss for details. - - - - - - stats.arenas.<i>.nthreads - (unsigned) - r- - - Number of threads currently assigned to - arena. - - - - - stats.arenas.<i>.pactive - (size_t) - r- - - Number of pages in active runs. - - - - - stats.arenas.<i>.pdirty - (size_t) - r- - - Number of pages within unused runs that are potentially - dirty, and for which madvise... - MADV_DONTNEED or - similar has not been called. - - - - - stats.arenas.<i>.mapped - (size_t) - r- - [] - - Number of mapped bytes. - - - - - stats.arenas.<i>.npurge - (uint64_t) - r- - [] - - Number of dirty page purge sweeps performed. - - - - - - stats.arenas.<i>.nmadvise - (uint64_t) - r- - [] - - Number of madvise... - MADV_DONTNEED or - similar calls made to purge dirty pages. - - - - - stats.arenas.<i>.purged - (uint64_t) - r- - [] - - Number of pages purged. - - - - - stats.arenas.<i>.small.allocated - (size_t) - r- - [] - - Number of bytes currently allocated by small objects. - - - - - - stats.arenas.<i>.small.nmalloc - (uint64_t) - r- - [] - - Cumulative number of allocation requests served by - small bins. - - - - - stats.arenas.<i>.small.ndalloc - (uint64_t) - r- - [] - - Cumulative number of small objects returned to bins. - - - - - - stats.arenas.<i>.small.nrequests - (uint64_t) - r- - [] - - Cumulative number of small allocation requests. - - - - - - stats.arenas.<i>.large.allocated - (size_t) - r- - [] - - Number of bytes currently allocated by large objects. - - - - - - stats.arenas.<i>.large.nmalloc - (uint64_t) - r- - [] - - Cumulative number of large allocation requests served - directly by the arena. - - - - - stats.arenas.<i>.large.ndalloc - (uint64_t) - r- - [] - - Cumulative number of large deallocation requests served - directly by the arena. - - - - - stats.arenas.<i>.large.nrequests - (uint64_t) - r- - [] - - Cumulative number of large allocation requests. - - - - - - stats.arenas.<i>.bins.<j>.allocated - (size_t) - r- - [] - - Current number of bytes allocated by - bin. - - - - - stats.arenas.<i>.bins.<j>.nmalloc - (uint64_t) - r- - [] - - Cumulative number of allocations served by bin. - - - - - - stats.arenas.<i>.bins.<j>.ndalloc - (uint64_t) - r- - [] - - Cumulative number of allocations returned to bin. - - - - - - stats.arenas.<i>.bins.<j>.nrequests - (uint64_t) - r- - [] - - Cumulative number of allocation - requests. - - - - - stats.arenas.<i>.bins.<j>.nfills - (uint64_t) - r- - [ ] - - Cumulative number of tcache fills. - - - - - stats.arenas.<i>.bins.<j>.nflushes - (uint64_t) - r- - [ ] - - Cumulative number of tcache flushes. - - - - - stats.arenas.<i>.bins.<j>.nruns - (uint64_t) - r- - [] - - Cumulative number of runs created. - - - - - stats.arenas.<i>.bins.<j>.nreruns - (uint64_t) - r- - [] - - Cumulative number of times the current run from which - to allocate changed. - - - - - stats.arenas.<i>.bins.<j>.curruns - (size_t) - r- - [] - - Current number of runs. - - - - - stats.arenas.<i>.lruns.<j>.nmalloc - (uint64_t) - r- - [] - - Cumulative number of allocation requests for this size - class served directly by the arena. - - - - - stats.arenas.<i>.lruns.<j>.ndalloc - (uint64_t) - r- - [] - - Cumulative number of deallocation requests for this - size class served directly by the arena. - - - - - stats.arenas.<i>.lruns.<j>.nrequests - (uint64_t) - r- - [] - - Cumulative number of allocation requests for this size - class. - - - - - stats.arenas.<i>.lruns.<j>.curruns - (size_t) - r- - [] - - Current number of runs for this size class. - - - - - - DEBUGGING MALLOC PROBLEMS - When debugging, it is a good idea to configure/build jemalloc with - the and - options, and recompile the program with suitable options and symbols for - debugger support. When so configured, jemalloc incorporates a wide variety - of run-time assertions that catch application errors such as double-free, - write-after-free, etc. - - Programs often accidentally depend on “uninitialized” - memory actually being filled with zero bytes. Junk filling - (see the opt.junk - option) tends to expose such bugs in the form of obviously incorrect - results and/or coredumps. Conversely, zero - filling (see the opt.zero option) eliminates - the symptoms of such bugs. Between these two options, it is usually - possible to quickly detect, diagnose, and eliminate such bugs. - - This implementation does not provide much detail about the problems - it detects, because the performance impact for storing such information - would be prohibitive. However, jemalloc does integrate with the most - excellent Valgrind tool if the - configuration option is enabled. - - - DIAGNOSTIC MESSAGES - If any of the memory allocation/deallocation functions detect an - error or warning condition, a message will be printed to file descriptor - STDERR_FILENO. Errors will result in the process - dumping core. If the opt.abort option is set, most - warnings are treated as errors. - - The malloc_message variable allows the programmer - to override the function which emits the text strings forming the errors - and warnings if for some reason the STDERR_FILENO file - descriptor is not suitable for this. - malloc_message takes the - cbopaque pointer argument that is - NULL unless overridden by the arguments in a call to - malloc_stats_print, followed by a string - pointer. Please note that doing anything which tries to allocate memory in - this function is likely to result in a crash or deadlock. - - All messages are prefixed by - “<jemalloc>: ”. - - - RETURN VALUES - - Standard API - The malloc and - calloc functions return a pointer to the - allocated memory if successful; otherwise a NULL - pointer is returned and errno is set to - ENOMEM. - - The posix_memalign function - returns the value 0 if successful; otherwise it returns an error value. - The posix_memalign function will fail - if: - - - EINVAL - - The alignment parameter is - not a power of 2 at least as large as - sizeof(void *). - - - - ENOMEM - - Memory allocation error. - - - - - The aligned_alloc function returns - a pointer to the allocated memory if successful; otherwise a - NULL pointer is returned and - errno is set. The - aligned_alloc function will fail if: - - - EINVAL - - The alignment parameter is - not a power of 2. - - - - ENOMEM - - Memory allocation error. - - - - - The realloc function returns a - pointer, possibly identical to ptr, to the - allocated memory if successful; otherwise a NULL - pointer is returned, and errno is set to - ENOMEM if the error was the result of an - allocation failure. The realloc - function always leaves the original buffer intact when an error occurs. - - - The free function returns no - value. - - - Non-standard API - The mallocx and - rallocx functions return a pointer to - the allocated memory if successful; otherwise a NULL - pointer is returned to indicate insufficient contiguous memory was - available to service the allocation request. - - The xallocx function returns the - real size of the resulting resized allocation pointed to by - ptr, which is a value less than - size if the allocation could not be adequately - grown in place. - - The sallocx function returns the - real size of the allocation pointed to by ptr. - - - The nallocx returns the real size - that would result from a successful equivalent - mallocx function call, or zero if - insufficient memory is available to perform the size computation. - - The mallctl, - mallctlnametomib, and - mallctlbymib functions return 0 on - success; otherwise they return an error value. The functions will fail - if: - - - EINVAL - - newp is not - NULL, and newlen is too - large or too small. Alternatively, *oldlenp - is too large or too small; in this case as much data as possible - are read despite the error. - - - ENOENT - - name or - mib specifies an unknown/invalid - value. - - - EPERM - - Attempt to read or write void value, or attempt to - write read-only value. - - - EAGAIN - - A memory allocation failure - occurred. - - - EFAULT - - An interface with side effects failed in some way - not directly related to mallctl* - read/write processing. - - - - - The malloc_usable_size function - returns the usable size of the allocation pointed to by - ptr. - - - Experimental API - The allocm, - rallocm, - sallocm, - dallocm, and - nallocm functions return - ALLOCM_SUCCESS on success; otherwise they return an - error value. The allocm, - rallocm, and - nallocm functions will fail if: - - - ALLOCM_ERR_OOM - - Out of memory. Insufficient contiguous memory was - available to service the allocation request. The - allocm function additionally sets - *ptr to NULL, whereas - the rallocm function leaves - *ptr unmodified. - - - The rallocm function will also - fail if: - - - ALLOCM_ERR_NOT_MOVED - - ALLOCM_NO_MOVE was specified, - but the reallocation request could not be serviced without moving - the object. - - - - - - - ENVIRONMENT - The following environment variable affects the execution of the - allocation functions: - - - MALLOC_CONF - - If the environment variable - MALLOC_CONF is set, the characters it contains - will be interpreted as options. - - - - - - EXAMPLES - To dump core whenever a problem occurs: - ln -s 'abort:true' /etc/malloc.conf - - To specify in the source a chunk size that is 16 MiB: - - - - SEE ALSO - madvise - 2, - mmap - 2, - sbrk - 2, - utrace - 2, - alloca - 3, - atexit - 3, - getpagesize - 3 - - - STANDARDS - The malloc, - calloc, - realloc, and - free functions conform to ISO/IEC - 9899:1990 (“ISO C90”). - - The posix_memalign function conforms - to IEEE Std 1003.1-2001 (“POSIX.1”). - - diff --git a/lib/jemalloc-3.6.0/doc/manpages.xsl.in b/lib/jemalloc-3.6.0/doc/manpages.xsl.in deleted file mode 100644 index 88b2626..0000000 --- a/lib/jemalloc-3.6.0/doc/manpages.xsl.in +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/lib/jemalloc-3.6.0/doc/stylesheet.xsl b/lib/jemalloc-3.6.0/doc/stylesheet.xsl deleted file mode 100644 index 4e334a8..0000000 --- a/lib/jemalloc-3.6.0/doc/stylesheet.xsl +++ /dev/null @@ -1,7 +0,0 @@ - - ansi - - - "" - - diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/arena.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/arena.h deleted file mode 100644 index 9d000c0..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/arena.h +++ /dev/null @@ -1,1063 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized - * as small as possible such that this setting is still honored, without - * violating other constraints. The goal is to make runs as small as possible - * without exceeding a per run external fragmentation threshold. - * - * We use binary fixed point math for overhead computations, where the binary - * point is implicitly RUN_BFP bits to the left. - * - * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be - * honored for some/all object sizes, since when heap profiling is enabled - * there is one pointer of header overhead per object (plus a constant). This - * constraint is relaxed (ignored) for runs that are so small that the - * per-region overhead is greater than: - * - * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) - */ -#define RUN_BFP 12 -/* \/ Implicit binary fixed point. */ -#define RUN_MAX_OVRHD 0x0000003dU -#define RUN_MAX_OVRHD_RELAX 0x00001800U - -/* Maximum number of regions in one run. */ -#define LG_RUN_MAXREGS 11 -#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) - -/* - * Minimum redzone size. Redzones may be larger than this if necessary to - * preserve region alignment. - */ -#define REDZONE_MINSIZE 16 - -/* - * The minimum ratio of active:dirty pages per arena is computed as: - * - * (nactive >> opt_lg_dirty_mult) >= ndirty - * - * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times - * as many active pages as dirty pages. - */ -#define LG_DIRTY_MULT_DEFAULT 3 - -typedef struct arena_chunk_map_s arena_chunk_map_t; -typedef struct arena_chunk_s arena_chunk_t; -typedef struct arena_run_s arena_run_t; -typedef struct arena_bin_info_s arena_bin_info_t; -typedef struct arena_bin_s arena_bin_t; -typedef struct arena_s arena_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Each element of the chunk map corresponds to one page within the chunk. */ -struct arena_chunk_map_s { -#ifndef JEMALLOC_PROF - /* - * Overlay prof_ctx in order to allow it to be referenced by dead code. - * Such antics aren't warranted for per arena data structures, but - * chunk map overhead accounts for a percentage of memory, rather than - * being just a fixed cost. - */ - union { -#endif - union { - /* - * Linkage for run trees. There are two disjoint uses: - * - * 1) arena_t's runs_avail tree. - * 2) arena_run_t conceptually uses this linkage for in-use - * non-full runs, rather than directly embedding linkage. - */ - rb_node(arena_chunk_map_t) rb_link; - /* - * List of runs currently in purgatory. arena_chunk_purge() - * temporarily allocates runs that contain dirty pages while - * purging, so that other threads cannot use the runs while the - * purging thread is operating without the arena lock held. - */ - ql_elm(arena_chunk_map_t) ql_link; - } u; - - /* Profile counters, used for large object runs. */ - prof_ctx_t *prof_ctx; -#ifndef JEMALLOC_PROF - }; /* union { ... }; */ -#endif - - /* - * Run address (or size) and various flags are stored together. The bit - * layout looks like (assuming 32-bit system): - * - * ???????? ???????? ????nnnn nnnndula - * - * ? : Unallocated: Run address for first/last pages, unset for internal - * pages. - * Small: Run page offset. - * Large: Run size for first page, unset for trailing pages. - * n : binind for small size class, BININD_INVALID for large size class. - * d : dirty? - * u : unzeroed? - * l : large? - * a : allocated? - * - * Following are example bit patterns for the three types of runs. - * - * p : run page offset - * s : run size - * n : binind for size class; large objects set these to BININD_INVALID - * except for promoted allocations (see prof_promote) - * x : don't care - * - : 0 - * + : 1 - * [DULA] : bit set - * [dula] : bit unset - * - * Unallocated (clean): - * ssssssss ssssssss ssss++++ ++++du-a - * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx - * ssssssss ssssssss ssss++++ ++++dU-a - * - * Unallocated (dirty): - * ssssssss ssssssss ssss++++ ++++D--a - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * ssssssss ssssssss ssss++++ ++++D--a - * - * Small: - * pppppppp pppppppp ppppnnnn nnnnd--A - * pppppppp pppppppp ppppnnnn nnnn---A - * pppppppp pppppppp ppppnnnn nnnnd--A - * - * Large: - * ssssssss ssssssss ssss++++ ++++D-LA - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * -------- -------- ----++++ ++++D-LA - * - * Large (sampled, size <= PAGE): - * ssssssss ssssssss ssssnnnn nnnnD-LA - * - * Large (not sampled, size == PAGE): - * ssssssss ssssssss ssss++++ ++++D-LA - */ - size_t bits; -#define CHUNK_MAP_BININD_SHIFT 4 -#define BININD_INVALID ((size_t)0xffU) -/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */ -#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U) -#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK -#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU) -#define CHUNK_MAP_DIRTY ((size_t)0x8U) -#define CHUNK_MAP_UNZEROED ((size_t)0x4U) -#define CHUNK_MAP_LARGE ((size_t)0x2U) -#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) -#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED -}; -typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; -typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; -typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t; - -/* Arena chunk header. */ -struct arena_chunk_s { - /* Arena that owns the chunk. */ - arena_t *arena; - - /* Linkage for tree of arena chunks that contain dirty runs. */ - rb_node(arena_chunk_t) dirty_link; - - /* Number of dirty pages. */ - size_t ndirty; - - /* Number of available runs. */ - size_t nruns_avail; - - /* - * Number of available run adjacencies that purging could coalesce. - * Clean and dirty available runs are not coalesced, which causes - * virtual memory fragmentation. The ratio of - * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this - * fragmentation. - */ - size_t nruns_adjac; - - /* - * Map of pages within chunk that keeps track of free/large/small. The - * first map_bias entries are omitted, since the chunk header does not - * need to be tracked in the map. This omission saves a header page - * for common chunk sizes (e.g. 4 MiB). - */ - arena_chunk_map_t map[1]; /* Dynamically sized. */ -}; -typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; - -struct arena_run_s { - /* Bin this run is associated with. */ - arena_bin_t *bin; - - /* Index of next region that has never been allocated, or nregs. */ - uint32_t nextind; - - /* Number of free regions in run. */ - unsigned nfree; -}; - -/* - * Read-only information associated with each element of arena_t's bins array - * is stored separately, partly to reduce memory usage (only one copy, rather - * than one per arena), but mainly to avoid false cacheline sharing. - * - * Each run has the following layout: - * - * /--------------------\ - * | arena_run_t header | - * | ... | - * bitmap_offset | bitmap | - * | ... | - * ctx0_offset | ctx map | - * | ... | - * |--------------------| - * | redzone | - * reg0_offset | region 0 | - * | redzone | - * |--------------------| \ - * | redzone | | - * | region 1 | > reg_interval - * | redzone | / - * |--------------------| - * | ... | - * | ... | - * | ... | - * |--------------------| - * | redzone | - * | region nregs-1 | - * | redzone | - * |--------------------| - * | alignment pad? | - * \--------------------/ - * - * reg_interval has at least the same minimum alignment as reg_size; this - * preserves the alignment constraint that sa2u() depends on. Alignment pad is - * either 0 or redzone_size; it is present only if needed to align reg0_offset. - */ -struct arena_bin_info_s { - /* Size of regions in a run for this bin's size class. */ - size_t reg_size; - - /* Redzone size. */ - size_t redzone_size; - - /* Interval between regions (reg_size + (redzone_size << 1)). */ - size_t reg_interval; - - /* Total size of a run for this bin's size class. */ - size_t run_size; - - /* Total number of regions in a run for this bin's size class. */ - uint32_t nregs; - - /* - * Offset of first bitmap_t element in a run header for this bin's size - * class. - */ - uint32_t bitmap_offset; - - /* - * Metadata used to manipulate bitmaps for runs associated with this - * bin. - */ - bitmap_info_t bitmap_info; - - /* - * Offset of first (prof_ctx_t *) in a run header for this bin's size - * class, or 0 if (config_prof == false || opt_prof == false). - */ - uint32_t ctx0_offset; - - /* Offset of first region in a run for this bin's size class. */ - uint32_t reg0_offset; -}; - -struct arena_bin_s { - /* - * All operations on runcur, runs, and stats require that lock be - * locked. Run allocation/deallocation are protected by the arena lock, - * which may be acquired while holding one or more bin locks, but not - * vise versa. - */ - malloc_mutex_t lock; - - /* - * Current run being used to service allocations of this bin's size - * class. - */ - arena_run_t *runcur; - - /* - * Tree of non-full runs. This tree is used when looking for an - * existing run when runcur is no longer usable. We choose the - * non-full run that is lowest in memory; this policy tends to keep - * objects packed well, and it can also help reduce the number of - * almost-empty chunks. - */ - arena_run_tree_t runs; - - /* Bin statistics. */ - malloc_bin_stats_t stats; -}; - -struct arena_s { - /* This arena's index within the arenas array. */ - unsigned ind; - - /* - * Number of threads currently assigned to this arena. This field is - * protected by arenas_lock. - */ - unsigned nthreads; - - /* - * There are three classes of arena operations from a locking - * perspective: - * 1) Thread asssignment (modifies nthreads) is protected by - * arenas_lock. - * 2) Bin-related operations are protected by bin locks. - * 3) Chunk- and run-related operations are protected by this mutex. - */ - malloc_mutex_t lock; - - arena_stats_t stats; - /* - * List of tcaches for extant threads associated with this arena. - * Stats from these are merged incrementally, and at exit. - */ - ql_head(tcache_t) tcache_ql; - - uint64_t prof_accumbytes; - - dss_prec_t dss_prec; - - /* Tree of dirty-page-containing chunks this arena manages. */ - arena_chunk_tree_t chunks_dirty; - - /* - * In order to avoid rapid chunk allocation/deallocation when an arena - * oscillates right on the cusp of needing a new chunk, cache the most - * recently freed chunk. The spare is left in the arena's chunk trees - * until it is deleted. - * - * There is one spare chunk per arena, rather than one spare total, in - * order to avoid interactions between multiple threads that could make - * a single spare inadequate. - */ - arena_chunk_t *spare; - - /* Number of pages in active runs. */ - size_t nactive; - - /* - * Current count of pages within unused runs that are potentially - * dirty, and for which madvise(... MADV_DONTNEED) has not been called. - * By tracking this, we can institute a limit on how much dirty unused - * memory is mapped for each arena. - */ - size_t ndirty; - - /* - * Approximate number of pages being purged. It is possible for - * multiple threads to purge dirty pages concurrently, and they use - * npurgatory to indicate the total number of pages all threads are - * attempting to purge. - */ - size_t npurgatory; - - /* - * Size/address-ordered trees of this arena's available runs. The trees - * are used for first-best-fit run allocation. - */ - arena_avail_tree_t runs_avail; - - /* bins is used to store trees of free regions. */ - arena_bin_t bins[NBINS]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern ssize_t opt_lg_dirty_mult; -/* - * small_size2bin is a compact lookup table that rounds request sizes up to - * size classes. In order to reduce cache footprint, the table is compressed, - * and all accesses are via the SMALL_SIZE2BIN macro. - */ -extern uint8_t const small_size2bin[]; -#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN]) - -extern arena_bin_info_t arena_bin_info[NBINS]; - -/* Number of large size classes. */ -#define nlclasses (chunk_npages - map_bias) - -void arena_purge_all(arena_t *arena); -void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, - size_t binind, uint64_t prof_accumbytes); -void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, - bool zero); -#ifdef JEMALLOC_JET -typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, - uint8_t); -extern arena_redzone_corruption_t *arena_redzone_corruption; -typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); -extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; -#else -void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); -#endif -void arena_quarantine_junk_small(void *ptr, size_t usize); -void *arena_malloc_small(arena_t *arena, size_t size, bool zero); -void *arena_malloc_large(arena_t *arena, size_t size, bool zero); -void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); -void arena_prof_promoted(const void *ptr, size_t size); -void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_t *mapelm); -void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_t *mapelm); -void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind); -#ifdef JEMALLOC_JET -typedef void (arena_dalloc_junk_large_t)(void *, size_t); -extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; -#endif -void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr); -void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); -#ifdef JEMALLOC_JET -typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); -extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; -#endif -bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, - size_t extra, bool zero); -void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, - bool try_tcache_dalloc); -dss_prec_t arena_dss_prec_get(arena_t *arena); -void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); -void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats); -bool arena_new(arena_t *arena, unsigned ind); -void arena_boot(void); -void arena_prefork(arena_t *arena); -void arena_postfork_parent(arena_t *arena); -void arena_postfork_child(arena_t *arena); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind); -size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbitsp_read(size_t *mapbitsp); -size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); -void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); -void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, - size_t size, size_t flags); -void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, - size_t size); -void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, - size_t size, size_t flags); -void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - size_t binind); -void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, - size_t runind, size_t binind, size_t flags); -void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, - size_t unzeroed); -bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); -size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); -size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); -unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, - const void *ptr); -prof_ctx_t *arena_prof_ctx_get(const void *ptr); -void arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx); -void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); -size_t arena_salloc(const void *ptr, bool demote); -void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, - bool try_tcache); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) -# ifdef JEMALLOC_ARENA_INLINE_A -JEMALLOC_ALWAYS_INLINE arena_chunk_map_t * -arena_mapp_get(arena_chunk_t *chunk, size_t pageind) -{ - - assert(pageind >= map_bias); - assert(pageind < chunk_npages); - - return (&chunk->map[pageind-map_bias]); -} - -JEMALLOC_ALWAYS_INLINE size_t * -arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) -{ - - return (&arena_mapp_get(chunk, pageind)->bits); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbitsp_read(size_t *mapbitsp) -{ - - return (*mapbitsp); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - return (mapbits & ~PAGE_MASK); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == - (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); - return (mapbits & ~PAGE_MASK); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == - CHUNK_MAP_ALLOCATED); - return (mapbits >> LG_PAGE); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - size_t binind; - - mapbits = arena_mapbits_get(chunk, pageind); - binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; - assert(binind < NBINS || binind == BININD_INVALID); - return (binind); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_DIRTY); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_UNZEROED); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_LARGE); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) -{ - - *mapbitsp = mapbits; -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, - size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - - assert((size & PAGE_MASK) == 0); - assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); - assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags); - arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, - size_t size) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - assert((size & PAGE_MASK) == 0); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK)); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, - size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - size_t unzeroed; - - assert((size & PAGE_MASK) == 0); - assert((flags & CHUNK_MAP_DIRTY) == flags); - unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ - arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags - | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - size_t binind) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - assert(binind <= BININD_INVALID); - assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE); - arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | - (binind << CHUNK_MAP_BININD_SHIFT)); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, - size_t binind, size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - size_t unzeroed; - - assert(binind < BININD_INVALID); - assert(pageind - runind >= map_bias); - assert((flags & CHUNK_MAP_DIRTY) == flags); - unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ - arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind << - CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, - size_t unzeroed) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) | - unzeroed); -} - -JEMALLOC_INLINE bool -arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - assert(prof_interval != 0); - - arena->prof_accumbytes += accumbytes; - if (arena->prof_accumbytes >= prof_interval) { - arena->prof_accumbytes -= prof_interval; - return (true); - } - return (false); -} - -JEMALLOC_INLINE bool -arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - - if (prof_interval == 0) - return (false); - return (arena_prof_accum_impl(arena, accumbytes)); -} - -JEMALLOC_INLINE bool -arena_prof_accum(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - - if (prof_interval == 0) - return (false); - - { - bool ret; - - malloc_mutex_lock(&arena->lock); - ret = arena_prof_accum_impl(arena, accumbytes); - malloc_mutex_unlock(&arena->lock); - return (ret); - } -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_ptr_small_binind_get(const void *ptr, size_t mapbits) -{ - size_t binind; - - binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; - - if (config_debug) { - arena_chunk_t *chunk; - arena_t *arena; - size_t pageind; - size_t actual_mapbits; - arena_run_t *run; - arena_bin_t *bin; - size_t actual_binind; - arena_bin_info_t *bin_info; - - assert(binind != BININD_INVALID); - assert(binind < NBINS); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - actual_mapbits = arena_mapbits_get(chunk, pageind); - assert(mapbits == actual_mapbits); - assert(arena_mapbits_large_get(chunk, pageind) == 0); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - (actual_mapbits >> LG_PAGE)) << LG_PAGE)); - bin = run->bin; - actual_binind = bin - arena->bins; - assert(binind == actual_binind); - bin_info = &arena_bin_info[actual_binind]; - assert(((uintptr_t)ptr - ((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval - == 0); - } - - return (binind); -} -# endif /* JEMALLOC_ARENA_INLINE_A */ - -# ifdef JEMALLOC_ARENA_INLINE_B -JEMALLOC_INLINE size_t -arena_bin_index(arena_t *arena, arena_bin_t *bin) -{ - size_t binind = bin - arena->bins; - assert(binind < NBINS); - return (binind); -} - -JEMALLOC_INLINE unsigned -arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) -{ - unsigned shift, diff, regind; - size_t interval; - - /* - * Freeing a pointer lower than region zero can cause assertion - * failure. - */ - assert((uintptr_t)ptr >= (uintptr_t)run + - (uintptr_t)bin_info->reg0_offset); - - /* - * Avoid doing division with a variable divisor if possible. Using - * actual division here can reduce allocator throughput by over 20%! - */ - diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - - bin_info->reg0_offset); - - /* Rescale (factor powers of 2 out of the numerator and denominator). */ - interval = bin_info->reg_interval; - shift = ffs(interval) - 1; - diff >>= shift; - interval >>= shift; - - if (interval == 1) { - /* The divisor was a power of 2. */ - regind = diff; - } else { - /* - * To divide by a number D that is not a power of two we - * multiply by (2^21 / D) and then right shift by 21 positions. - * - * X / D - * - * becomes - * - * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT - * - * We can omit the first three elements, because we never - * divide by 0, and 1 and 2 are both powers of two, which are - * handled above. - */ -#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) -#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) - static const unsigned interval_invs[] = { - SIZE_INV(3), - SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), - SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), - SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), - SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), - SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), - SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), - SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) - }; - - if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + - 2)) { - regind = (diff * interval_invs[interval - 3]) >> - SIZE_INV_SHIFT; - } else - regind = diff / interval; -#undef SIZE_INV -#undef SIZE_INV_SHIFT - } - assert(diff == regind * interval); - assert(regind < bin_info->nregs); - - return (regind); -} - -JEMALLOC_INLINE prof_ctx_t * -arena_prof_ctx_get(const void *ptr) -{ - prof_ctx_t *ret; - arena_chunk_t *chunk; - size_t pageind, mapbits; - - cassert(config_prof); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { - if (prof_promote) - ret = (prof_ctx_t *)(uintptr_t)1U; - else { - arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << - LG_PAGE)); - size_t binind = arena_ptr_small_binind_get(ptr, - mapbits); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - unsigned regind; - - regind = arena_run_regind(run, bin_info, ptr); - ret = *(prof_ctx_t **)((uintptr_t)run + - bin_info->ctx0_offset + (regind * - sizeof(prof_ctx_t *))); - } - } else - ret = arena_mapp_get(chunk, pageind)->prof_ctx; - - return (ret); -} - -JEMALLOC_INLINE void -arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx) -{ - arena_chunk_t *chunk; - size_t pageind; - - cassert(config_prof); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - - if (usize > SMALL_MAXCLASS || (prof_promote && - ((uintptr_t)ctx != (uintptr_t)1U || arena_mapbits_large_get(chunk, - pageind) != 0))) { - assert(arena_mapbits_large_get(chunk, pageind) != 0); - arena_mapp_get(chunk, pageind)->prof_ctx = ctx; - } else { - assert(arena_mapbits_large_get(chunk, pageind) == 0); - if (prof_promote == false) { - size_t mapbits = arena_mapbits_get(chunk, pageind); - arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << - LG_PAGE)); - size_t binind; - arena_bin_info_t *bin_info; - unsigned regind; - - binind = arena_ptr_small_binind_get(ptr, mapbits); - bin_info = &arena_bin_info[binind]; - regind = arena_run_regind(run, bin_info, ptr); - - *((prof_ctx_t **)((uintptr_t)run + - bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t - *)))) = ctx; - } - } -} - -JEMALLOC_ALWAYS_INLINE void * -arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) -{ - tcache_t *tcache; - - assert(size != 0); - assert(size <= arena_maxclass); - - if (size <= SMALL_MAXCLASS) { - if (try_tcache && (tcache = tcache_get(true)) != NULL) - return (tcache_alloc_small(tcache, size, zero)); - else { - return (arena_malloc_small(choose_arena(arena), size, - zero)); - } - } else { - /* - * Initialize tcache after checking size in order to avoid - * infinite recursion during tcache initialization. - */ - if (try_tcache && size <= tcache_maxclass && (tcache = - tcache_get(true)) != NULL) - return (tcache_alloc_large(tcache, size, zero)); - else { - return (arena_malloc_large(choose_arena(arena), size, - zero)); - } - } -} - -/* Return the size of the allocation pointed to by ptr. */ -JEMALLOC_ALWAYS_INLINE size_t -arena_salloc(const void *ptr, bool demote) -{ - size_t ret; - arena_chunk_t *chunk; - size_t pageind, binind; - - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - binind = arena_mapbits_binind_get(chunk, pageind); - if (binind == BININD_INVALID || (config_prof && demote == false && - prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) { - /* - * Large allocation. In the common case (demote == true), and - * as this is an inline function, most callers will only end up - * looking at binind to determine that ptr is a small - * allocation. - */ - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - ret = arena_mapbits_large_size_get(chunk, pageind); - assert(ret != 0); - assert(pageind + (ret>>LG_PAGE) <= chunk_npages); - assert(ret == PAGE || arena_mapbits_large_size_get(chunk, - pageind+(ret>>LG_PAGE)-1) == 0); - assert(binind == arena_mapbits_binind_get(chunk, - pageind+(ret>>LG_PAGE)-1)); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); - } else { - /* - * Small allocation (possibly promoted to a large object due to - * prof_promote). - */ - assert(arena_mapbits_large_get(chunk, pageind) != 0 || - arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) == binind); - ret = arena_bin_info[binind].reg_size; - } - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache) -{ - size_t pageind, mapbits; - tcache_t *tcache; - - assert(arena != NULL); - assert(chunk->arena == arena); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { - /* Small allocation. */ - if (try_tcache && (tcache = tcache_get(false)) != NULL) { - size_t binind; - - binind = arena_ptr_small_binind_get(ptr, mapbits); - tcache_dalloc_small(tcache, ptr, binind); - } else - arena_dalloc_small(arena, chunk, ptr, pageind); - } else { - size_t size = arena_mapbits_large_size_get(chunk, pageind); - - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - - if (try_tcache && size <= tcache_maxclass && (tcache = - tcache_get(false)) != NULL) { - tcache_dalloc_large(tcache, ptr, size); - } else - arena_dalloc_large(arena, chunk, ptr); - } -} -# endif /* JEMALLOC_ARENA_INLINE_B */ -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/atomic.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/atomic.h deleted file mode 100644 index 11a7b47..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/atomic.h +++ /dev/null @@ -1,304 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#define atomic_read_uint64(p) atomic_add_uint64(p, 0) -#define atomic_read_uint32(p) atomic_add_uint32(p, 0) -#define atomic_read_z(p) atomic_add_z(p, 0) -#define atomic_read_u(p) atomic_add_u(p, 0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); -uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); -uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); -uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); -size_t atomic_add_z(size_t *p, size_t x); -size_t atomic_sub_z(size_t *p, size_t x); -unsigned atomic_add_u(unsigned *p, unsigned x); -unsigned atomic_sub_u(unsigned *p, unsigned x); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) -/******************************************************************************/ -/* 64-bit operations. */ -#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) -# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, -((int64_t)x))); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); -} -# elif (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - x = (uint64_t)(-(int64_t)x); - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} -# elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - /* - * atomic_fetchadd_64() doesn't exist, but we only ever use this - * function on LP64 systems, so atomic_fetchadd_long() will do. - */ - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); -} -# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -# else -# error "Missing implementation for 64-bit atomic operations" -# endif -#endif - -/******************************************************************************/ -/* 32-bit operations. */ -#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (InterlockedExchangeAdd(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (InterlockedExchangeAdd(p, -((int32_t)x))); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); -} -#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - x = (uint32_t)(-(int32_t)x); - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} -#elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); -} -#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -#else -# error "Missing implementation for 32-bit atomic operations" -#endif - -/******************************************************************************/ -/* size_t operations. */ -JEMALLOC_INLINE size_t -atomic_add_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE size_t -atomic_sub_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -/******************************************************************************/ -/* unsigned operations. */ -JEMALLOC_INLINE unsigned -atomic_add_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE unsigned -atomic_sub_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} -/******************************************************************************/ -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/base.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/base.h deleted file mode 100644 index 9cf75ff..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/base.h +++ /dev/null @@ -1,26 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *base_alloc(size_t size); -void *base_calloc(size_t number, size_t size); -extent_node_t *base_node_alloc(void); -void base_node_dealloc(extent_node_t *node); -bool base_boot(void); -void base_prefork(void); -void base_postfork_parent(void); -void base_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/bitmap.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/bitmap.h deleted file mode 100644 index 605ebac..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/bitmap.h +++ /dev/null @@ -1,184 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ -#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS - -typedef struct bitmap_level_s bitmap_level_t; -typedef struct bitmap_info_s bitmap_info_t; -typedef unsigned long bitmap_t; -#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG - -/* Number of bits per group. */ -#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) -#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) -#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) - -/* Maximum number of levels possible. */ -#define BITMAP_MAX_LEVELS \ - (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ - + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct bitmap_level_s { - /* Offset of this level's groups within the array of groups. */ - size_t group_offset; -}; - -struct bitmap_info_s { - /* Logical number of bits in bitmap (stored at bottom level). */ - size_t nbits; - - /* Number of levels necessary for nbits. */ - unsigned nlevels; - - /* - * Only the first (nlevels+1) elements are used, and levels are ordered - * bottom to top (e.g. the bottom level is stored in levels[0]). - */ - bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); -size_t bitmap_info_ngroups(const bitmap_info_t *binfo); -size_t bitmap_size(size_t nbits); -void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); -bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); -void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) -JEMALLOC_INLINE bool -bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1; - bitmap_t rg = bitmap[rgoff]; - /* The bitmap is full iff the root group is 0. */ - return (rg == 0); -} - -JEMALLOC_INLINE bool -bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t g; - - assert(bit < binfo->nbits); - goff = bit >> LG_BITMAP_GROUP_NBITS; - g = bitmap[goff]; - return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))); -} - -JEMALLOC_INLINE void -bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t *gp; - bitmap_t g; - - assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit) == false); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(bitmap_get(bitmap, binfo, bit)); - /* Propagate group state transitions up the tree. */ - if (g == 0) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - if (g != 0) - break; - } - } -} - -/* sfu: set first unset. */ -JEMALLOC_INLINE size_t -bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - size_t bit; - bitmap_t g; - unsigned i; - - assert(bitmap_full(bitmap, binfo) == false); - - i = binfo->nlevels - 1; - g = bitmap[binfo->levels[i].group_offset]; - bit = ffsl(g) - 1; - while (i > 0) { - i--; - g = bitmap[binfo->levels[i].group_offset + bit]; - bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1); - } - - bitmap_set(bitmap, binfo, bit); - return (bit); -} - -JEMALLOC_INLINE void -bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t *gp; - bitmap_t g; - bool propagate; - - assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit)); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - propagate = (g == 0); - assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(bitmap_get(bitmap, binfo, bit) == false); - /* Propagate group state transitions up the tree. */ - if (propagate) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - propagate = (g == 0); - assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) - == 0); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - if (propagate == false) - break; - } - } -} - -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/chunk.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/chunk.h deleted file mode 100644 index 87d8700..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/chunk.h +++ /dev/null @@ -1,63 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Size and alignment of memory chunks that are allocated by the OS's virtual - * memory system. - */ -#define LG_CHUNK_DEFAULT 22 - -/* Return the chunk address for allocation address a. */ -#define CHUNK_ADDR2BASE(a) \ - ((void *)((uintptr_t)(a) & ~chunksize_mask)) - -/* Return the chunk offset of address a. */ -#define CHUNK_ADDR2OFFSET(a) \ - ((size_t)((uintptr_t)(a) & chunksize_mask)) - -/* Return the smallest chunk multiple that is >= s. */ -#define CHUNK_CEILING(s) \ - (((s) + chunksize_mask) & ~chunksize_mask) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern size_t opt_lg_chunk; -extern const char *opt_dss; - -/* Protects stats_chunks; currently not used for any other purpose. */ -extern malloc_mutex_t chunks_mtx; -/* Chunk statistics. */ -extern chunk_stats_t stats_chunks; - -extern rtree_t *chunks_rtree; - -extern size_t chunksize; -extern size_t chunksize_mask; /* (chunksize - 1). */ -extern size_t chunk_npages; -extern size_t map_bias; /* Number of arena chunk header pages. */ -extern size_t arena_maxclass; /* Max size class for arenas. */ - -void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, - dss_prec_t dss_prec); -void chunk_unmap(void *chunk, size_t size); -void chunk_dealloc(void *chunk, size_t size, bool unmap); -bool chunk_boot(void); -void chunk_prefork(void); -void chunk_postfork_parent(void); -void chunk_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - -#include "jemalloc/internal/chunk_dss.h" -#include "jemalloc/internal/chunk_mmap.h" diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/chunk_dss.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/chunk_dss.h deleted file mode 100644 index 4535ce0..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/chunk_dss.h +++ /dev/null @@ -1,38 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef enum { - dss_prec_disabled = 0, - dss_prec_primary = 1, - dss_prec_secondary = 2, - - dss_prec_limit = 3 -} dss_prec_t; -#define DSS_PREC_DEFAULT dss_prec_secondary -#define DSS_DEFAULT "secondary" - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -extern const char *dss_prec_names[]; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -dss_prec_t chunk_dss_prec_get(void); -bool chunk_dss_prec_set(dss_prec_t dss_prec); -void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero); -bool chunk_in_dss(void *chunk); -bool chunk_dss_boot(void); -void chunk_dss_prefork(void); -void chunk_dss_postfork_parent(void); -void chunk_dss_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/chunk_mmap.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/chunk_mmap.h deleted file mode 100644 index f24abac..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/chunk_mmap.h +++ /dev/null @@ -1,22 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool pages_purge(void *addr, size_t length); - -void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero); -bool chunk_dealloc_mmap(void *chunk, size_t size); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/ckh.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/ckh.h deleted file mode 100644 index 58712a6..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/ckh.h +++ /dev/null @@ -1,88 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ckh_s ckh_t; -typedef struct ckhc_s ckhc_t; - -/* Typedefs to allow easy function pointer passing. */ -typedef void ckh_hash_t (const void *, size_t[2]); -typedef bool ckh_keycomp_t (const void *, const void *); - -/* Maintain counters used to get an idea of performance. */ -/* #define CKH_COUNT */ -/* Print counter values in ckh_delete() (requires CKH_COUNT). */ -/* #define CKH_VERBOSE */ - -/* - * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit - * one bucket per L1 cache line. - */ -#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Hash table cell. */ -struct ckhc_s { - const void *key; - const void *data; -}; - -struct ckh_s { -#ifdef CKH_COUNT - /* Counters used to get an idea of performance. */ - uint64_t ngrows; - uint64_t nshrinks; - uint64_t nshrinkfails; - uint64_t ninserts; - uint64_t nrelocs; -#endif - - /* Used for pseudo-random number generation. */ -#define CKH_A 1103515241 -#define CKH_C 12347 - uint32_t prng_state; - - /* Total number of items. */ - size_t count; - - /* - * Minimum and current number of hash table buckets. There are - * 2^LG_CKH_BUCKET_CELLS cells per bucket. - */ - unsigned lg_minbuckets; - unsigned lg_curbuckets; - - /* Hash and comparison functions. */ - ckh_hash_t *hash; - ckh_keycomp_t *keycomp; - - /* Hash table with 2^lg_curbuckets buckets. */ - ckhc_t *tab; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, - ckh_keycomp_t *keycomp); -void ckh_delete(ckh_t *ckh); -size_t ckh_count(ckh_t *ckh); -bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); -bool ckh_insert(ckh_t *ckh, const void *key, const void *data); -bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key, - void **data); -bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); -void ckh_string_hash(const void *key, size_t r_hash[2]); -bool ckh_string_keycomp(const void *k1, const void *k2); -void ckh_pointer_hash(const void *key, size_t r_hash[2]); -bool ckh_pointer_keycomp(const void *k1, const void *k2); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/ctl.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/ctl.h deleted file mode 100644 index 0ffecc5..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/ctl.h +++ /dev/null @@ -1,117 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ctl_node_s ctl_node_t; -typedef struct ctl_named_node_s ctl_named_node_t; -typedef struct ctl_indexed_node_s ctl_indexed_node_t; -typedef struct ctl_arena_stats_s ctl_arena_stats_t; -typedef struct ctl_stats_s ctl_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct ctl_node_s { - bool named; -}; - -struct ctl_named_node_s { - struct ctl_node_s node; - const char *name; - /* If (nchildren == 0), this is a terminal node. */ - unsigned nchildren; - const ctl_node_t *children; - int (*ctl)(const size_t *, size_t, void *, size_t *, - void *, size_t); -}; - -struct ctl_indexed_node_s { - struct ctl_node_s node; - const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); -}; - -struct ctl_arena_stats_s { - bool initialized; - unsigned nthreads; - const char *dss; - size_t pactive; - size_t pdirty; - arena_stats_t astats; - - /* Aggregate stats for small size classes, based on bin stats. */ - size_t allocated_small; - uint64_t nmalloc_small; - uint64_t ndalloc_small; - uint64_t nrequests_small; - - malloc_bin_stats_t bstats[NBINS]; - malloc_large_stats_t *lstats; /* nlclasses elements. */ -}; - -struct ctl_stats_s { - size_t allocated; - size_t active; - size_t mapped; - struct { - size_t current; /* stats_chunks.curchunks */ - uint64_t total; /* stats_chunks.nchunks */ - size_t high; /* stats_chunks.highchunks */ - } chunks; - struct { - size_t allocated; /* huge_allocated */ - uint64_t nmalloc; /* huge_nmalloc */ - uint64_t ndalloc; /* huge_ndalloc */ - } huge; - unsigned narenas; - ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen); -int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); - -int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen); -bool ctl_boot(void); -void ctl_prefork(void); -void ctl_postfork_parent(void); -void ctl_postfork_child(void); - -#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ - != 0) { \ - malloc_printf( \ - ": Failure in xmallctl(\"%s\", ...)\n", \ - name); \ - abort(); \ - } \ -} while (0) - -#define xmallctlnametomib(name, mibp, miblenp) do { \ - if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ - malloc_printf(": Failure in " \ - "xmallctlnametomib(\"%s\", ...)\n", name); \ - abort(); \ - } \ -} while (0) - -#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ - newlen) != 0) { \ - malloc_write( \ - ": Failure in xmallctlbymib()\n"); \ - abort(); \ - } \ -} while (0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/extent.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/extent.h deleted file mode 100644 index ba95ca8..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/extent.h +++ /dev/null @@ -1,46 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct extent_node_s extent_node_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Tree of extents. */ -struct extent_node_s { - /* Linkage for the size/address-ordered tree. */ - rb_node(extent_node_t) link_szad; - - /* Linkage for the address-ordered tree. */ - rb_node(extent_node_t) link_ad; - - /* Profile counters, used for huge objects. */ - prof_ctx_t *prof_ctx; - - /* Pointer to the extent that this tree node is responsible for. */ - void *addr; - - /* Total region size. */ - size_t size; - - /* True if zero-filled; used by chunk recycling code. */ - bool zeroed; -}; -typedef rb_tree(extent_node_t) extent_tree_t; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) - -rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/hash.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/hash.h deleted file mode 100644 index c7183ed..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/hash.h +++ /dev/null @@ -1,335 +0,0 @@ -/* - * The following hash function is based on MurmurHash3, placed into the public - * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for - * details. - */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint32_t hash_x86_32(const void *key, int len, uint32_t seed); -void hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]); -void hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]); -void hash(const void *key, size_t len, const uint32_t seed, - size_t r_hash[2]); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) -/******************************************************************************/ -/* Internal implementation. */ -JEMALLOC_INLINE uint32_t -hash_rotl_32(uint32_t x, int8_t r) -{ - - return (x << r) | (x >> (32 - r)); -} - -JEMALLOC_INLINE uint64_t -hash_rotl_64(uint64_t x, int8_t r) -{ - return (x << r) | (x >> (64 - r)); -} - -JEMALLOC_INLINE uint32_t -hash_get_block_32(const uint32_t *p, int i) -{ - - return (p[i]); -} - -JEMALLOC_INLINE uint64_t -hash_get_block_64(const uint64_t *p, int i) -{ - - return (p[i]); -} - -JEMALLOC_INLINE uint32_t -hash_fmix_32(uint32_t h) -{ - - h ^= h >> 16; - h *= 0x85ebca6b; - h ^= h >> 13; - h *= 0xc2b2ae35; - h ^= h >> 16; - - return (h); -} - -JEMALLOC_INLINE uint64_t -hash_fmix_64(uint64_t k) -{ - - k ^= k >> 33; - k *= QU(0xff51afd7ed558ccdLLU); - k ^= k >> 33; - k *= QU(0xc4ceb9fe1a85ec53LLU); - k ^= k >> 33; - - return (k); -} - -JEMALLOC_INLINE uint32_t -hash_x86_32(const void *key, int len, uint32_t seed) -{ - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 4; - - uint32_t h1 = seed; - - const uint32_t c1 = 0xcc9e2d51; - const uint32_t c2 = 0x1b873593; - - /* body */ - { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); - int i; - - for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i); - - k1 *= c1; - k1 = hash_rotl_32(k1, 15); - k1 *= c2; - - h1 ^= k1; - h1 = hash_rotl_32(h1, 13); - h1 = h1*5 + 0xe6546b64; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t *) (data + nblocks*4); - - uint32_t k1 = 0; - - switch (len & 3) { - case 3: k1 ^= tail[2] << 16; - case 2: k1 ^= tail[1] << 8; - case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); - k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; - - h1 = hash_fmix_32(h1); - - return (h1); -} - -UNUSED JEMALLOC_INLINE void -hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]) -{ - const uint8_t * data = (const uint8_t *) key; - const int nblocks = len / 16; - - uint32_t h1 = seed; - uint32_t h2 = seed; - uint32_t h3 = seed; - uint32_t h4 = seed; - - const uint32_t c1 = 0x239b961b; - const uint32_t c2 = 0xab0e9789; - const uint32_t c3 = 0x38b34ae5; - const uint32_t c4 = 0xa1e38b93; - - /* body */ - { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); - int i; - - for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); - uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); - uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); - uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); - - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; - - h1 = hash_rotl_32(h1, 19); h1 += h2; - h1 = h1*5 + 0x561ccd1b; - - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - - h2 = hash_rotl_32(h2, 17); h2 += h3; - h2 = h2*5 + 0x0bcaa747; - - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - - h3 = hash_rotl_32(h3, 15); h3 += h4; - h3 = h3*5 + 0x96cd1c35; - - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - - h4 = hash_rotl_32(h4, 13); h4 += h1; - h4 = h4*5 + 0x32ac3b17; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t *) (data + nblocks*16); - uint32_t k1 = 0; - uint32_t k2 = 0; - uint32_t k3 = 0; - uint32_t k4 = 0; - - switch (len & 15) { - case 15: k4 ^= tail[14] << 16; - case 14: k4 ^= tail[13] << 8; - case 13: k4 ^= tail[12] << 0; - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - - case 12: k3 ^= tail[11] << 24; - case 11: k3 ^= tail[10] << 16; - case 10: k3 ^= tail[ 9] << 8; - case 9: k3 ^= tail[ 8] << 0; - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - - case 8: k2 ^= tail[ 7] << 24; - case 7: k2 ^= tail[ 6] << 16; - case 6: k2 ^= tail[ 5] << 8; - case 5: k2 ^= tail[ 4] << 0; - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - - case 4: k1 ^= tail[ 3] << 24; - case 3: k1 ^= tail[ 2] << 16; - case 2: k1 ^= tail[ 1] << 8; - case 1: k1 ^= tail[ 0] << 0; - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; - - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; - - h1 = hash_fmix_32(h1); - h2 = hash_fmix_32(h2); - h3 = hash_fmix_32(h3); - h4 = hash_fmix_32(h4); - - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; - - r_out[0] = (((uint64_t) h2) << 32) | h1; - r_out[1] = (((uint64_t) h4) << 32) | h3; -} - -UNUSED JEMALLOC_INLINE void -hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]) -{ - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 16; - - uint64_t h1 = seed; - uint64_t h2 = seed; - - const uint64_t c1 = QU(0x87c37b91114253d5LLU); - const uint64_t c2 = QU(0x4cf5ad432745937fLLU); - - /* body */ - { - const uint64_t *blocks = (const uint64_t *) (data); - int i; - - for (i = 0; i < nblocks; i++) { - uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); - uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); - - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; - - h1 = hash_rotl_64(h1, 27); h1 += h2; - h1 = h1*5 + 0x52dce729; - - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - - h2 = hash_rotl_64(h2, 31); h2 += h1; - h2 = h2*5 + 0x38495ab5; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t*)(data + nblocks*16); - uint64_t k1 = 0; - uint64_t k2 = 0; - - switch (len & 15) { - case 15: k2 ^= ((uint64_t)(tail[14])) << 48; - case 14: k2 ^= ((uint64_t)(tail[13])) << 40; - case 13: k2 ^= ((uint64_t)(tail[12])) << 32; - case 12: k2 ^= ((uint64_t)(tail[11])) << 24; - case 11: k2 ^= ((uint64_t)(tail[10])) << 16; - case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; - case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - - case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; - case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; - case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; - case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; - case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; - case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; - case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; - case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; h2 ^= len; - - h1 += h2; - h2 += h1; - - h1 = hash_fmix_64(h1); - h2 = hash_fmix_64(h2); - - h1 += h2; - h2 += h1; - - r_out[0] = h1; - r_out[1] = h2; -} - -/******************************************************************************/ -/* API. */ -JEMALLOC_INLINE void -hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) -{ -#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) - hash_x64_128(key, len, seed, (uint64_t *)r_hash); -#else - uint64_t hashes[2]; - hash_x86_128(key, len, seed, hashes); - r_hash[0] = (size_t)hashes[0]; - r_hash[1] = (size_t)hashes[1]; -#endif -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/huge.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/huge.h deleted file mode 100644 index a2b9c77..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/huge.h +++ /dev/null @@ -1,46 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -/* Huge allocation statistics. */ -extern uint64_t huge_nmalloc; -extern uint64_t huge_ndalloc; -extern size_t huge_allocated; - -/* Protects chunk-related data structures. */ -extern malloc_mutex_t huge_mtx; - -void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec); -void *huge_palloc(size_t size, size_t alignment, bool zero, - dss_prec_t dss_prec); -bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, - size_t extra); -void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec); -#ifdef JEMALLOC_JET -typedef void (huge_dalloc_junk_t)(void *, size_t); -extern huge_dalloc_junk_t *huge_dalloc_junk; -#endif -void huge_dalloc(void *ptr, bool unmap); -size_t huge_salloc(const void *ptr); -dss_prec_t huge_dss_prec_get(arena_t *arena); -prof_ctx_t *huge_prof_ctx_get(const void *ptr); -void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); -bool huge_boot(void); -void huge_prefork(void); -void huge_postfork_parent(void); -void huge_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/jemalloc_internal.h.in b/lib/jemalloc-3.6.0/include/jemalloc/internal/jemalloc_internal.h.in deleted file mode 100644 index 574bbb1..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/jemalloc_internal.h.in +++ /dev/null @@ -1,1028 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_H -#define JEMALLOC_INTERNAL_H -#include -#ifdef _WIN32 -# include -# define ENOENT ERROR_PATH_NOT_FOUND -# define EINVAL ERROR_BAD_ARGUMENTS -# define EAGAIN ERROR_OUTOFMEMORY -# define EPERM ERROR_WRITE_FAULT -# define EFAULT ERROR_INVALID_ADDRESS -# define ENOMEM ERROR_NOT_ENOUGH_MEMORY -# undef ERANGE -# define ERANGE ERROR_INVALID_DATA -#else -# include -# include -# include -# if !defined(SYS_write) && defined(__NR_write) -# define SYS_write __NR_write -# endif -# include -# include -# include -#endif -#include - -#include -#ifndef SIZE_T_MAX -# define SIZE_T_MAX SIZE_MAX -#endif -#include -#include -#include -#include -#include -#include -#ifndef offsetof -# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) -#endif -#include -#include -#include -#include -#ifdef _MSC_VER -# include -typedef intptr_t ssize_t; -# define PATH_MAX 1024 -# define STDERR_FILENO 2 -# define __func__ __FUNCTION__ -/* Disable warnings about deprecated system functions */ -# pragma warning(disable: 4996) -#else -# include -#endif -#include - -#include "jemalloc_internal_defs.h" - -#ifdef JEMALLOC_UTRACE -#include -#endif - -#ifdef JEMALLOC_VALGRIND -#include -#include -#endif - -#define JEMALLOC_NO_DEMANGLE -#ifdef JEMALLOC_JET -# define JEMALLOC_N(n) jet_##n -# include "jemalloc/internal/public_namespace.h" -# define JEMALLOC_NO_RENAME -# include "../jemalloc@install_suffix@.h" -# undef JEMALLOC_NO_RENAME -#else -# define JEMALLOC_N(n) @private_namespace@##n -# include "../jemalloc@install_suffix@.h" -#endif -#include "jemalloc/internal/private_namespace.h" - -static const bool config_debug = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; -static const bool config_dss = -#ifdef JEMALLOC_DSS - true -#else - false -#endif - ; -static const bool config_fill = -#ifdef JEMALLOC_FILL - true -#else - false -#endif - ; -static const bool config_lazy_lock = -#ifdef JEMALLOC_LAZY_LOCK - true -#else - false -#endif - ; -static const bool config_prof = -#ifdef JEMALLOC_PROF - true -#else - false -#endif - ; -static const bool config_prof_libgcc = -#ifdef JEMALLOC_PROF_LIBGCC - true -#else - false -#endif - ; -static const bool config_prof_libunwind = -#ifdef JEMALLOC_PROF_LIBUNWIND - true -#else - false -#endif - ; -static const bool config_mremap = -#ifdef JEMALLOC_MREMAP - true -#else - false -#endif - ; -static const bool config_munmap = -#ifdef JEMALLOC_MUNMAP - true -#else - false -#endif - ; -static const bool config_stats = -#ifdef JEMALLOC_STATS - true -#else - false -#endif - ; -static const bool config_tcache = -#ifdef JEMALLOC_TCACHE - true -#else - false -#endif - ; -static const bool config_tls = -#ifdef JEMALLOC_TLS - true -#else - false -#endif - ; -static const bool config_utrace = -#ifdef JEMALLOC_UTRACE - true -#else - false -#endif - ; -static const bool config_valgrind = -#ifdef JEMALLOC_VALGRIND - true -#else - false -#endif - ; -static const bool config_xmalloc = -#ifdef JEMALLOC_XMALLOC - true -#else - false -#endif - ; -static const bool config_ivsalloc = -#ifdef JEMALLOC_IVSALLOC - true -#else - false -#endif - ; - -#ifdef JEMALLOC_ATOMIC9 -#include -#endif - -#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) -#include -#endif - -#ifdef JEMALLOC_ZONE -#include -#include -#include -#include -#endif - -#define RB_COMPACT -#include "jemalloc/internal/rb.h" -#include "jemalloc/internal/qr.h" -#include "jemalloc/internal/ql.h" - -/* - * jemalloc can conceptually be broken into components (arena, tcache, etc.), - * but there are circular dependencies that cannot be broken without - * substantial performance degradation. In order to reduce the effect on - * visual code flow, read the header files in multiple passes, with one of the - * following cpp variables defined during each pass: - * - * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data - * types. - * JEMALLOC_H_STRUCTS : Data structures. - * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. - * JEMALLOC_H_INLINES : Inline functions. - */ -/******************************************************************************/ -#define JEMALLOC_H_TYPES - -#include "jemalloc/internal/jemalloc_internal_macros.h" - -#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) -#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) - -/* Smallest size class to support. */ -#define LG_TINY_MIN 3 -#define TINY_MIN (1U << LG_TINY_MIN) - -/* - * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size - * classes). - */ -#ifndef LG_QUANTUM -# if (defined(__i386__) || defined(_M_IX86)) -# define LG_QUANTUM 4 -# endif -# ifdef __ia64__ -# define LG_QUANTUM 4 -# endif -# ifdef __alpha__ -# define LG_QUANTUM 4 -# endif -# ifdef __sparc64__ -# define LG_QUANTUM 4 -# endif -# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) -# define LG_QUANTUM 4 -# endif -# ifdef __arm__ -# define LG_QUANTUM 3 -# endif -# ifdef __aarch64__ -# define LG_QUANTUM 4 -# endif -# ifdef __hppa__ -# define LG_QUANTUM 4 -# endif -# ifdef __mips__ -# define LG_QUANTUM 3 -# endif -# ifdef __powerpc__ -# define LG_QUANTUM 4 -# endif -# ifdef __s390__ -# define LG_QUANTUM 4 -# endif -# ifdef __SH4__ -# define LG_QUANTUM 4 -# endif -# ifdef __tile__ -# define LG_QUANTUM 4 -# endif -# ifndef LG_QUANTUM -# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" -# endif -#endif - -#define QUANTUM ((size_t)(1U << LG_QUANTUM)) -#define QUANTUM_MASK (QUANTUM - 1) - -/* Return the smallest quantum multiple that is >= a. */ -#define QUANTUM_CEILING(a) \ - (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) - -#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) -#define LONG_MASK (LONG - 1) - -/* Return the smallest long multiple that is >= a. */ -#define LONG_CEILING(a) \ - (((a) + LONG_MASK) & ~LONG_MASK) - -#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) -#define PTR_MASK (SIZEOF_PTR - 1) - -/* Return the smallest (void *) multiple that is >= a. */ -#define PTR_CEILING(a) \ - (((a) + PTR_MASK) & ~PTR_MASK) - -/* - * Maximum size of L1 cache line. This is used to avoid cache line aliasing. - * In addition, this controls the spacing of cacheline-spaced size classes. - * - * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can - * only handle raw constants. - */ -#define LG_CACHELINE 6 -#define CACHELINE 64 -#define CACHELINE_MASK (CACHELINE - 1) - -/* Return the smallest cacheline multiple that is >= s. */ -#define CACHELINE_CEILING(s) \ - (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) - -/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ -#ifdef PAGE_MASK -# undef PAGE_MASK -#endif -#define LG_PAGE STATIC_PAGE_SHIFT -#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) -#define PAGE_MASK ((size_t)(PAGE - 1)) - -/* Return the smallest pagesize multiple that is >= s. */ -#define PAGE_CEILING(s) \ - (((s) + PAGE_MASK) & ~PAGE_MASK) - -/* Return the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2BASE(a, alignment) \ - ((void *)((uintptr_t)(a) & (-(alignment)))) - -/* Return the offset between a and the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ - ((size_t)((uintptr_t)(a) & (alignment - 1))) - -/* Return the smallest alignment multiple that is >= s. */ -#define ALIGNMENT_CEILING(s, alignment) \ - (((s) + (alignment - 1)) & (-(alignment))) - -/* Declare a variable length array */ -#if __STDC_VERSION__ < 199901L -# ifdef _MSC_VER -# include -# define alloca _alloca -# else -# ifdef JEMALLOC_HAS_ALLOCA_H -# include -# else -# include -# endif -# endif -# define VARIABLE_ARRAY(type, name, count) \ - type *name = alloca(sizeof(type) * count) -#else -# define VARIABLE_ARRAY(type, name, count) type name[count] -#endif - -#ifdef JEMALLOC_VALGRIND -/* - * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions - * so that when Valgrind reports errors, there are no extra stack frames - * in the backtraces. - * - * The size that is reported to valgrind must be consistent through a chain of - * malloc..realloc..realloc calls. Request size isn't recorded anywhere in - * jemalloc, so it is critical that all callers of these macros provide usize - * rather than request size. As a result, buffer overflow detection is - * technically weakened for the standard API, though it is generally accepted - * practice to consider any extra bytes reported by malloc_usable_size() as - * usable space. - */ -#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ - if (config_valgrind && opt_valgrind && cond) \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ -} while (0) -#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ - old_rzsize, zero) do { \ - if (config_valgrind && opt_valgrind) { \ - size_t rzsize = p2rz(ptr); \ - \ - if (ptr == old_ptr) { \ - VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ - usize, rzsize); \ - if (zero && old_usize < usize) { \ - VALGRIND_MAKE_MEM_DEFINED( \ - (void *)((uintptr_t)ptr + \ - old_usize), usize - old_usize); \ - } \ - } else { \ - if (old_ptr != NULL) { \ - VALGRIND_FREELIKE_BLOCK(old_ptr, \ - old_rzsize); \ - } \ - if (ptr != NULL) { \ - size_t copy_size = (old_usize < usize) \ - ? old_usize : usize; \ - size_t tail_size = usize - copy_size; \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ - rzsize, false); \ - if (copy_size > 0) { \ - VALGRIND_MAKE_MEM_DEFINED(ptr, \ - copy_size); \ - } \ - if (zero && tail_size > 0) { \ - VALGRIND_MAKE_MEM_DEFINED( \ - (void *)((uintptr_t)ptr + \ - copy_size), tail_size); \ - } \ - } \ - } \ - } \ -} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ - if (config_valgrind && opt_valgrind) \ - VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ -} while (0) -#else -#define RUNNING_ON_VALGRIND ((unsigned)0) -#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ - do {} while (0) -#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ - do {} while (0) -#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0) -#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0) -#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0) -#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0) -#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ - old_rzsize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) -#endif - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_TYPES -/******************************************************************************/ -#define JEMALLOC_H_STRUCTS - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -typedef struct { - uint64_t allocated; - uint64_t deallocated; -} thread_allocated_t; -/* - * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro - * argument. - */ -#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0}) - -#undef JEMALLOC_H_STRUCTS -/******************************************************************************/ -#define JEMALLOC_H_EXTERNS - -extern bool opt_abort; -extern bool opt_junk; -extern size_t opt_quarantine; -extern bool opt_redzone; -extern bool opt_utrace; -extern bool opt_valgrind; -extern bool opt_xmalloc; -extern bool opt_zero; -extern size_t opt_narenas; - -/* Number of CPUs. */ -extern unsigned ncpus; - -/* Protects arenas initialization (arenas, arenas_total). */ -extern malloc_mutex_t arenas_lock; -/* - * Arenas that are used to service external requests. Not all elements of the - * arenas array are necessarily used; arenas are created lazily as needed. - * - * arenas[0..narenas_auto) are used for automatic multiplexing of threads and - * arenas. arenas[narenas_auto..narenas_total) are only used if the application - * takes some action to create them and allocate from them. - */ -extern arena_t **arenas; -extern unsigned narenas_total; -extern unsigned narenas_auto; /* Read-only after initialization. */ - -arena_t *arenas_extend(unsigned ind); -void arenas_cleanup(void *arg); -arena_t *choose_arena_hard(void); -void jemalloc_prefork(void); -void jemalloc_postfork_parent(void); -void jemalloc_postfork_child(void); - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_EXTERNS -/******************************************************************************/ -#define JEMALLOC_H_INLINES - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) - -size_t s2u(size_t size); -size_t sa2u(size_t size, size_t alignment); -unsigned narenas_total_get(void); -arena_t *choose_arena(arena_t *arena); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -/* - * Map of pthread_self() --> arenas[???], used for selecting an arena to use - * for allocations. - */ -malloc_tsd_externs(arenas, arena_t *) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL, - arenas_cleanup) - -/* - * Compute usable size that would result from allocating an object with the - * specified size. - */ -JEMALLOC_ALWAYS_INLINE size_t -s2u(size_t size) -{ - - if (size <= SMALL_MAXCLASS) - return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); - if (size <= arena_maxclass) - return (PAGE_CEILING(size)); - return (CHUNK_CEILING(size)); -} - -/* - * Compute usable size that would result from allocating an object with the - * specified size and alignment. - */ -JEMALLOC_ALWAYS_INLINE size_t -sa2u(size_t size, size_t alignment) -{ - size_t usize; - - assert(alignment != 0 && ((alignment - 1) & alignment) == 0); - - /* - * Round size up to the nearest multiple of alignment. - * - * This done, we can take advantage of the fact that for each small - * size class, every object is aligned at the smallest power of two - * that is non-zero in the base two representation of the size. For - * example: - * - * Size | Base 2 | Minimum alignment - * -----+----------+------------------ - * 96 | 1100000 | 32 - * 144 | 10100000 | 32 - * 192 | 11000000 | 64 - */ - usize = ALIGNMENT_CEILING(size, alignment); - /* - * (usize < size) protects against the combination of maximal - * alignment and size greater than maximal alignment. - */ - if (usize < size) { - /* size_t overflow. */ - return (0); - } - - if (usize <= arena_maxclass && alignment <= PAGE) { - if (usize <= SMALL_MAXCLASS) - return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); - return (PAGE_CEILING(usize)); - } else { - size_t run_size; - - /* - * We can't achieve subpage alignment, so round up alignment - * permanently; it makes later calculations simpler. - */ - alignment = PAGE_CEILING(alignment); - usize = PAGE_CEILING(size); - /* - * (usize < size) protects against very large sizes within - * PAGE of SIZE_T_MAX. - * - * (usize + alignment < usize) protects against the - * combination of maximal alignment and usize large enough - * to cause overflow. This is similar to the first overflow - * check above, but it needs to be repeated due to the new - * usize value, which may now be *equal* to maximal - * alignment, whereas before we only detected overflow if the - * original size was *greater* than maximal alignment. - */ - if (usize < size || usize + alignment < usize) { - /* size_t overflow. */ - return (0); - } - - /* - * Calculate the size of the over-size run that arena_palloc() - * would need to allocate in order to guarantee the alignment. - * If the run wouldn't fit within a chunk, round up to a huge - * allocation size. - */ - run_size = usize + alignment - PAGE; - if (run_size <= arena_maxclass) - return (PAGE_CEILING(usize)); - return (CHUNK_CEILING(usize)); - } -} - -JEMALLOC_INLINE unsigned -narenas_total_get(void) -{ - unsigned narenas; - - malloc_mutex_lock(&arenas_lock); - narenas = narenas_total; - malloc_mutex_unlock(&arenas_lock); - - return (narenas); -} - -/* Choose an arena based on a per-thread value. */ -JEMALLOC_INLINE arena_t * -choose_arena(arena_t *arena) -{ - arena_t *ret; - - if (arena != NULL) - return (arena); - - if ((ret = *arenas_tsd_get()) == NULL) { - ret = choose_arena_hard(); - assert(ret != NULL); - } - - return (ret); -} -#endif - -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/rtree.h" -/* - * Include arena.h twice in order to resolve circular dependencies with - * tcache.h. - */ -#define JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/tcache.h" -#define JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" - -#ifndef JEMALLOC_ENABLE_INLINE -void *imalloct(size_t size, bool try_tcache, arena_t *arena); -void *imalloc(size_t size); -void *icalloct(size_t size, bool try_tcache, arena_t *arena); -void *icalloc(size_t size); -void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena); -void *ipalloc(size_t usize, size_t alignment, bool zero); -size_t isalloc(const void *ptr, bool demote); -size_t ivsalloc(const void *ptr, bool demote); -size_t u2rz(size_t usize); -size_t p2rz(const void *ptr); -void idalloct(void *ptr, bool try_tcache); -void idalloc(void *ptr); -void iqalloct(void *ptr, bool try_tcache); -void iqalloc(void *ptr); -void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, - arena_t *arena); -void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment, - bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena); -void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, - bool zero); -bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, - bool zero); -malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -JEMALLOC_ALWAYS_INLINE void * -imalloct(size_t size, bool try_tcache, arena_t *arena) -{ - - assert(size != 0); - - if (size <= arena_maxclass) - return (arena_malloc(arena, size, false, try_tcache)); - else - return (huge_malloc(size, false, huge_dss_prec_get(arena))); -} - -JEMALLOC_ALWAYS_INLINE void * -imalloc(size_t size) -{ - - return (imalloct(size, true, NULL)); -} - -JEMALLOC_ALWAYS_INLINE void * -icalloct(size_t size, bool try_tcache, arena_t *arena) -{ - - if (size <= arena_maxclass) - return (arena_malloc(arena, size, true, try_tcache)); - else - return (huge_malloc(size, true, huge_dss_prec_get(arena))); -} - -JEMALLOC_ALWAYS_INLINE void * -icalloc(size_t size) -{ - - return (icalloct(size, true, NULL)); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena) -{ - void *ret; - - assert(usize != 0); - assert(usize == sa2u(usize, alignment)); - - if (usize <= arena_maxclass && alignment <= PAGE) - ret = arena_malloc(arena, usize, zero, try_tcache); - else { - if (usize <= arena_maxclass) { - ret = arena_palloc(choose_arena(arena), usize, - alignment, zero); - } else if (alignment <= chunksize) - ret = huge_malloc(usize, zero, huge_dss_prec_get(arena)); - else - ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena)); - } - - assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloc(size_t usize, size_t alignment, bool zero) -{ - - return (ipalloct(usize, alignment, zero, true, NULL)); -} - -/* - * Typical usage: - * void *ptr = [...] - * size_t sz = isalloc(ptr, config_prof); - */ -JEMALLOC_ALWAYS_INLINE size_t -isalloc(const void *ptr, bool demote) -{ - size_t ret; - arena_chunk_t *chunk; - - assert(ptr != NULL); - /* Demotion only makes sense if config_prof is true. */ - assert(config_prof || demote == false); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - ret = arena_salloc(ptr, demote); - else - ret = huge_salloc(ptr); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE size_t -ivsalloc(const void *ptr, bool demote) -{ - - /* Return 0 if ptr is not within a chunk managed by jemalloc. */ - if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0) - return (0); - - return (isalloc(ptr, demote)); -} - -JEMALLOC_INLINE size_t -u2rz(size_t usize) -{ - size_t ret; - - if (usize <= SMALL_MAXCLASS) { - size_t binind = SMALL_SIZE2BIN(usize); - ret = arena_bin_info[binind].redzone_size; - } else - ret = 0; - - return (ret); -} - -JEMALLOC_INLINE size_t -p2rz(const void *ptr) -{ - size_t usize = isalloc(ptr, false); - - return (u2rz(usize)); -} - -JEMALLOC_ALWAYS_INLINE void -idalloct(void *ptr, bool try_tcache) -{ - arena_chunk_t *chunk; - - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - arena_dalloc(chunk->arena, chunk, ptr, try_tcache); - else - huge_dalloc(ptr, true); -} - -JEMALLOC_ALWAYS_INLINE void -idalloc(void *ptr) -{ - - idalloct(ptr, true); -} - -JEMALLOC_ALWAYS_INLINE void -iqalloct(void *ptr, bool try_tcache) -{ - - if (config_fill && opt_quarantine) - quarantine(ptr); - else - idalloct(ptr, try_tcache); -} - -JEMALLOC_ALWAYS_INLINE void -iqalloc(void *ptr) -{ - - iqalloct(ptr, true); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, - arena_t *arena) -{ - void *p; - size_t usize, copysize; - - usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); - if (p == NULL) { - if (extra == 0) - return (NULL); - /* Try again, without extra this time. */ - usize = sa2u(size, alignment); - if (usize == 0) - return (NULL); - p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); - if (p == NULL) - return (NULL); - } - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - memcpy(p, ptr, copysize); - iqalloct(ptr, try_tcache_dalloc); - return (p); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, - bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) -{ - size_t oldsize; - - assert(ptr != NULL); - assert(size != 0); - - oldsize = isalloc(ptr, config_prof); - - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - /* - * Existing object alignment is inadequate; allocate new space - * and copy. - */ - return (iralloct_realign(ptr, oldsize, size, extra, alignment, - zero, try_tcache_alloc, try_tcache_dalloc, arena)); - } - - if (size + extra <= arena_maxclass) { - return (arena_ralloc(arena, ptr, oldsize, size, extra, - alignment, zero, try_tcache_alloc, - try_tcache_dalloc)); - } else { - return (huge_ralloc(ptr, oldsize, size, extra, - alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena))); - } -} - -JEMALLOC_ALWAYS_INLINE void * -iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) -{ - - return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL)); -} - -JEMALLOC_ALWAYS_INLINE bool -ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) -{ - size_t oldsize; - - assert(ptr != NULL); - assert(size != 0); - - oldsize = isalloc(ptr, config_prof); - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - /* Existing object alignment is inadequate. */ - return (true); - } - - if (size <= arena_maxclass) - return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); - else - return (huge_ralloc_no_move(ptr, oldsize, size, extra)); -} - -malloc_tsd_externs(thread_allocated, thread_allocated_t) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t, - THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) -#endif - -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_INLINES -/******************************************************************************/ -#endif /* JEMALLOC_INTERNAL_H */ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/jemalloc_internal_defs.h.in b/lib/jemalloc-3.6.0/include/jemalloc/internal/jemalloc_internal_defs.h.in deleted file mode 100644 index c166fbd..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ /dev/null @@ -1,205 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_DEFS_H_ -#define JEMALLOC_INTERNAL_DEFS_H_ -/* - * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all - * public APIs to be prefixed. This makes it possible, with some care, to use - * multiple allocators simultaneously. - */ -#undef JEMALLOC_PREFIX -#undef JEMALLOC_CPREFIX - -/* - * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. - * For shared libraries, symbol visibility mechanisms prevent these symbols - * from being exported, but for static libraries, naming collisions are a real - * possibility. - */ -#undef JEMALLOC_PRIVATE_NAMESPACE - -/* - * Hyper-threaded CPUs may need a special instruction inside spin loops in - * order to yield to another virtual CPU. - */ -#undef CPU_SPINWAIT - -/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ -#undef JEMALLOC_ATOMIC9 - -/* - * Defined if OSAtomic*() functions are available, as provided by Darwin, and - * documented in the atomic(3) manual page. - */ -#undef JEMALLOC_OSATOMIC - -/* - * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and - * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the - * functions are defined in libgcc instead of being inlines) - */ -#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 - -/* - * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and - * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the - * functions are defined in libgcc instead of being inlines) - */ -#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 - -/* - * Defined if OSSpin*() functions are available, as provided by Darwin, and - * documented in the spinlock(3) manual page. - */ -#undef JEMALLOC_OSSPIN - -/* - * Defined if _malloc_thread_cleanup() exists. At least in the case of - * FreeBSD, pthread_key_create() allocates, which if used during malloc - * bootstrapping will cause recursion into the pthreads library. Therefore, if - * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in - * malloc_tsd. - */ -#undef JEMALLOC_MALLOC_THREAD_CLEANUP - -/* - * Defined if threaded initialization is known to be safe on this platform. - * Among other things, it must be possible to initialize a mutex without - * triggering allocation in order for threaded allocation to be safe. - */ -#undef JEMALLOC_THREADED_INIT - -/* - * Defined if the pthreads implementation defines - * _pthread_mutex_init_calloc_cb(), in which case the function is used in order - * to avoid recursive allocation during mutex initialization. - */ -#undef JEMALLOC_MUTEX_INIT_CB - -/* Defined if sbrk() is supported. */ -#undef JEMALLOC_HAVE_SBRK - -/* Non-empty if the tls_model attribute is supported. */ -#undef JEMALLOC_TLS_MODEL - -/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ -#undef JEMALLOC_CC_SILENCE - -/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ -#undef JEMALLOC_CODE_COVERAGE - -/* - * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables - * inline functions. - */ -#undef JEMALLOC_DEBUG - -/* JEMALLOC_STATS enables statistics calculation. */ -#undef JEMALLOC_STATS - -/* JEMALLOC_PROF enables allocation profiling. */ -#undef JEMALLOC_PROF - -/* Use libunwind for profile backtracing if defined. */ -#undef JEMALLOC_PROF_LIBUNWIND - -/* Use libgcc for profile backtracing if defined. */ -#undef JEMALLOC_PROF_LIBGCC - -/* Use gcc intrinsics for profile backtracing if defined. */ -#undef JEMALLOC_PROF_GCC - -/* - * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. - * This makes it possible to allocate/deallocate objects without any locking - * when the cache is in the steady state. - */ -#undef JEMALLOC_TCACHE - -/* - * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage - * segment (DSS). - */ -#undef JEMALLOC_DSS - -/* Support memory filling (junk/zero/quarantine/redzone). */ -#undef JEMALLOC_FILL - -/* Support utrace(2)-based tracing. */ -#undef JEMALLOC_UTRACE - -/* Support Valgrind. */ -#undef JEMALLOC_VALGRIND - -/* Support optional abort() on OOM. */ -#undef JEMALLOC_XMALLOC - -/* Support lazy locking (avoid locking unless a second thread is launched). */ -#undef JEMALLOC_LAZY_LOCK - -/* One page is 2^STATIC_PAGE_SHIFT bytes. */ -#undef STATIC_PAGE_SHIFT - -/* - * If defined, use munmap() to unmap freed chunks, rather than storing them for - * later reuse. This is disabled by default on Linux because common sequences - * of mmap()/munmap() calls will cause virtual memory map holes. - */ -#undef JEMALLOC_MUNMAP - -/* - * If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is - * disabled by default because it is Linux-specific and it will cause virtual - * memory map holes, much like munmap(2) does. - */ -#undef JEMALLOC_MREMAP - -/* TLS is used to map arenas and magazine caches to threads. */ -#undef JEMALLOC_TLS - -/* - * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside - * within jemalloc-owned chunks before dereferencing them. - */ -#undef JEMALLOC_IVSALLOC - -/* - * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. - */ -#undef JEMALLOC_ZONE -#undef JEMALLOC_ZONE_VERSION - -/* - * Methods for purging unused pages differ between operating systems. - * - * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, - * such that new pages will be demand-zeroed if - * the address region is later touched. - * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being - * unused, such that they will be discarded rather - * than swapped out. - */ -#undef JEMALLOC_PURGE_MADVISE_DONTNEED -#undef JEMALLOC_PURGE_MADVISE_FREE - -/* - * Define if operating system has alloca.h header. - */ -#undef JEMALLOC_HAS_ALLOCA_H - -/* C99 restrict keyword supported. */ -#undef JEMALLOC_HAS_RESTRICT - -/* For use by hash code. */ -#undef JEMALLOC_BIG_ENDIAN - -/* sizeof(int) == 2^LG_SIZEOF_INT. */ -#undef LG_SIZEOF_INT - -/* sizeof(long) == 2^LG_SIZEOF_LONG. */ -#undef LG_SIZEOF_LONG - -/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ -#undef LG_SIZEOF_INTMAX_T - -#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/jemalloc_internal_macros.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/jemalloc_internal_macros.h deleted file mode 100644 index 4e23923..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/jemalloc_internal_macros.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for - * functions that are static inline functions if inlining is enabled, and - * single-definition library-private functions if inlining is disabled. - * - * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in - * which case the denoted functions are always static, regardless of whether - * inlining is enabled. - */ -#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) - /* Disable inlining to make debugging/profiling easier. */ -# define JEMALLOC_ALWAYS_INLINE -# define JEMALLOC_ALWAYS_INLINE_C static -# define JEMALLOC_INLINE -# define JEMALLOC_INLINE_C static -# define inline -#else -# define JEMALLOC_ENABLE_INLINE -# ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ALWAYS_INLINE \ - static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) -# define JEMALLOC_ALWAYS_INLINE_C \ - static inline JEMALLOC_ATTR(always_inline) -# else -# define JEMALLOC_ALWAYS_INLINE static inline -# define JEMALLOC_ALWAYS_INLINE_C static inline -# endif -# define JEMALLOC_INLINE static inline -# define JEMALLOC_INLINE_C static inline -# ifdef _MSC_VER -# define inline _inline -# endif -#endif - -#ifdef JEMALLOC_CC_SILENCE -# define UNUSED JEMALLOC_ATTR(unused) -#else -# define UNUSED -#endif - -#define ZU(z) ((size_t)z) -#define QU(q) ((uint64_t)q) -#define QI(q) ((int64_t)q) - -#ifndef __DECONST -# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) -#endif - -#ifndef JEMALLOC_HAS_RESTRICT -# define restrict -#endif diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/mb.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/mb.h deleted file mode 100644 index 3cfa787..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/mb.h +++ /dev/null @@ -1,115 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void mb_write(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) -#ifdef __i386__ -/* - * According to the Intel Architecture Software Developer's Manual, current - * processors execute instructions in order from the perspective of other - * processors in a multiprocessor system, but 1) Intel reserves the right to - * change that, and 2) the compiler's optimizer could re-order instructions if - * there weren't some form of barrier. Therefore, even if running on an - * architecture that does not need memory barriers (everything through at least - * i686), an "optimizer barrier" is necessary. - */ -JEMALLOC_INLINE void -mb_write(void) -{ - -# if 0 - /* This is a true memory barrier. */ - asm volatile ("pusha;" - "xor %%eax,%%eax;" - "cpuid;" - "popa;" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -#else - /* - * This is hopefully enough to keep the compiler from reordering - * instructions around this one. - */ - asm volatile ("nop;" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -#endif -} -#elif (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("sfence" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__powerpc__) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("eieio" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__sparc64__) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("membar #StoreStore" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__tile__) -JEMALLOC_INLINE void -mb_write(void) -{ - - __sync_synchronize(); -} -#else -/* - * This is much slower than a simple memory barrier, but the semantics of mutex - * unlock make this work. - */ -JEMALLOC_INLINE void -mb_write(void) -{ - malloc_mutex_t mtx; - - malloc_mutex_init(&mtx); - malloc_mutex_lock(&mtx); - malloc_mutex_unlock(&mtx); -} -#endif -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/mutex.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/mutex.h deleted file mode 100644 index de44e14..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/mutex.h +++ /dev/null @@ -1,99 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct malloc_mutex_s malloc_mutex_t; - -#ifdef _WIN32 -# define MALLOC_MUTEX_INITIALIZER -#elif (defined(JEMALLOC_OSSPIN)) -# define MALLOC_MUTEX_INITIALIZER {0} -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} -#else -# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \ - defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} -# else -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} -# endif -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct malloc_mutex_s { -#ifdef _WIN32 - CRITICAL_SECTION lock; -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLock lock; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) - pthread_mutex_t lock; - malloc_mutex_t *postponed_next; -#else - pthread_mutex_t lock; -#endif -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#ifdef JEMALLOC_LAZY_LOCK -extern bool isthreaded; -#else -# undef isthreaded /* Undo private_namespace.h definition. */ -# define isthreaded true -#endif - -bool malloc_mutex_init(malloc_mutex_t *mutex); -void malloc_mutex_prefork(malloc_mutex_t *mutex); -void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); -void malloc_mutex_postfork_child(malloc_mutex_t *mutex); -bool mutex_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void malloc_mutex_lock(malloc_mutex_t *mutex); -void malloc_mutex_unlock(malloc_mutex_t *mutex); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) -JEMALLOC_INLINE void -malloc_mutex_lock(malloc_mutex_t *mutex) -{ - - if (isthreaded) { -#ifdef _WIN32 - EnterCriticalSection(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockLock(&mutex->lock); -#else - pthread_mutex_lock(&mutex->lock); -#endif - } -} - -JEMALLOC_INLINE void -malloc_mutex_unlock(malloc_mutex_t *mutex) -{ - - if (isthreaded) { -#ifdef _WIN32 - LeaveCriticalSection(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockUnlock(&mutex->lock); -#else - pthread_mutex_unlock(&mutex->lock); -#endif - } -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/private_namespace.sh b/lib/jemalloc-3.6.0/include/jemalloc/internal/private_namespace.sh deleted file mode 100755 index cd25eb3..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/private_namespace.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -for symbol in `cat $1` ; do - echo "#define ${symbol} JEMALLOC_N(${symbol})" -done diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/private_symbols.txt b/lib/jemalloc-3.6.0/include/jemalloc/internal/private_symbols.txt deleted file mode 100644 index 93516d2..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/private_symbols.txt +++ /dev/null @@ -1,413 +0,0 @@ -a0calloc -a0free -a0malloc -arena_alloc_junk_small -arena_bin_index -arena_bin_info -arena_boot -arena_dalloc -arena_dalloc_bin -arena_dalloc_bin_locked -arena_dalloc_junk_large -arena_dalloc_junk_small -arena_dalloc_large -arena_dalloc_large_locked -arena_dalloc_small -arena_dss_prec_get -arena_dss_prec_set -arena_malloc -arena_malloc_large -arena_malloc_small -arena_mapbits_allocated_get -arena_mapbits_binind_get -arena_mapbits_dirty_get -arena_mapbits_get -arena_mapbits_large_binind_set -arena_mapbits_large_get -arena_mapbits_large_set -arena_mapbits_large_size_get -arena_mapbits_small_runind_get -arena_mapbits_small_set -arena_mapbits_unallocated_set -arena_mapbits_unallocated_size_get -arena_mapbits_unallocated_size_set -arena_mapbits_unzeroed_get -arena_mapbits_unzeroed_set -arena_mapbitsp_get -arena_mapbitsp_read -arena_mapbitsp_write -arena_mapp_get -arena_maxclass -arena_new -arena_palloc -arena_postfork_child -arena_postfork_parent -arena_prefork -arena_prof_accum -arena_prof_accum_impl -arena_prof_accum_locked -arena_prof_ctx_get -arena_prof_ctx_set -arena_prof_promoted -arena_ptr_small_binind_get -arena_purge_all -arena_quarantine_junk_small -arena_ralloc -arena_ralloc_junk_large -arena_ralloc_no_move -arena_redzone_corruption -arena_run_regind -arena_salloc -arena_stats_merge -arena_tcache_fill_small -arenas -arenas_booted -arenas_cleanup -arenas_extend -arenas_initialized -arenas_lock -arenas_tls -arenas_tsd -arenas_tsd_boot -arenas_tsd_cleanup_wrapper -arenas_tsd_get -arenas_tsd_get_wrapper -arenas_tsd_init_head -arenas_tsd_set -atomic_add_u -atomic_add_uint32 -atomic_add_uint64 -atomic_add_z -atomic_sub_u -atomic_sub_uint32 -atomic_sub_uint64 -atomic_sub_z -base_alloc -base_boot -base_calloc -base_node_alloc -base_node_dealloc -base_postfork_child -base_postfork_parent -base_prefork -bitmap_full -bitmap_get -bitmap_info_init -bitmap_info_ngroups -bitmap_init -bitmap_set -bitmap_sfu -bitmap_size -bitmap_unset -bt_init -buferror -choose_arena -choose_arena_hard -chunk_alloc -chunk_alloc_dss -chunk_alloc_mmap -chunk_boot -chunk_dealloc -chunk_dealloc_mmap -chunk_dss_boot -chunk_dss_postfork_child -chunk_dss_postfork_parent -chunk_dss_prec_get -chunk_dss_prec_set -chunk_dss_prefork -chunk_in_dss -chunk_npages -chunk_postfork_child -chunk_postfork_parent -chunk_prefork -chunk_unmap -chunks_mtx -chunks_rtree -chunksize -chunksize_mask -ckh_bucket_search -ckh_count -ckh_delete -ckh_evict_reloc_insert -ckh_insert -ckh_isearch -ckh_iter -ckh_new -ckh_pointer_hash -ckh_pointer_keycomp -ckh_rebuild -ckh_remove -ckh_search -ckh_string_hash -ckh_string_keycomp -ckh_try_bucket_insert -ckh_try_insert -ctl_boot -ctl_bymib -ctl_byname -ctl_nametomib -ctl_postfork_child -ctl_postfork_parent -ctl_prefork -dss_prec_names -extent_tree_ad_first -extent_tree_ad_insert -extent_tree_ad_iter -extent_tree_ad_iter_recurse -extent_tree_ad_iter_start -extent_tree_ad_last -extent_tree_ad_new -extent_tree_ad_next -extent_tree_ad_nsearch -extent_tree_ad_prev -extent_tree_ad_psearch -extent_tree_ad_remove -extent_tree_ad_reverse_iter -extent_tree_ad_reverse_iter_recurse -extent_tree_ad_reverse_iter_start -extent_tree_ad_search -extent_tree_szad_first -extent_tree_szad_insert -extent_tree_szad_iter -extent_tree_szad_iter_recurse -extent_tree_szad_iter_start -extent_tree_szad_last -extent_tree_szad_new -extent_tree_szad_next -extent_tree_szad_nsearch -extent_tree_szad_prev -extent_tree_szad_psearch -extent_tree_szad_remove -extent_tree_szad_reverse_iter -extent_tree_szad_reverse_iter_recurse -extent_tree_szad_reverse_iter_start -extent_tree_szad_search -get_errno -hash -hash_fmix_32 -hash_fmix_64 -hash_get_block_32 -hash_get_block_64 -hash_rotl_32 -hash_rotl_64 -hash_x64_128 -hash_x86_128 -hash_x86_32 -huge_allocated -huge_boot -huge_dalloc -huge_dalloc_junk -huge_dss_prec_get -huge_malloc -huge_mtx -huge_ndalloc -huge_nmalloc -huge_palloc -huge_postfork_child -huge_postfork_parent -huge_prefork -huge_prof_ctx_get -huge_prof_ctx_set -huge_ralloc -huge_ralloc_no_move -huge_salloc -iallocm -icalloc -icalloct -idalloc -idalloct -imalloc -imalloct -ipalloc -ipalloct -iqalloc -iqalloct -iralloc -iralloct -iralloct_realign -isalloc -isthreaded -ivsalloc -ixalloc -jemalloc_postfork_child -jemalloc_postfork_parent -jemalloc_prefork -malloc_cprintf -malloc_mutex_init -malloc_mutex_lock -malloc_mutex_postfork_child -malloc_mutex_postfork_parent -malloc_mutex_prefork -malloc_mutex_unlock -malloc_printf -malloc_snprintf -malloc_strtoumax -malloc_tsd_boot -malloc_tsd_cleanup_register -malloc_tsd_dalloc -malloc_tsd_malloc -malloc_tsd_no_cleanup -malloc_vcprintf -malloc_vsnprintf -malloc_write -map_bias -mb_write -mutex_boot -narenas_auto -narenas_total -narenas_total_get -ncpus -nhbins -opt_abort -opt_dss -opt_junk -opt_lg_chunk -opt_lg_dirty_mult -opt_lg_prof_interval -opt_lg_prof_sample -opt_lg_tcache_max -opt_narenas -opt_prof -opt_prof_accum -opt_prof_active -opt_prof_final -opt_prof_gdump -opt_prof_leak -opt_prof_prefix -opt_quarantine -opt_redzone -opt_stats_print -opt_tcache -opt_utrace -opt_valgrind -opt_xmalloc -opt_zero -p2rz -pages_purge -pow2_ceil -prof_backtrace -prof_boot0 -prof_boot1 -prof_boot2 -prof_bt_count -prof_ctx_get -prof_ctx_set -prof_dump_open -prof_free -prof_gdump -prof_idump -prof_interval -prof_lookup -prof_malloc -prof_mdump -prof_postfork_child -prof_postfork_parent -prof_prefork -prof_promote -prof_realloc -prof_sample_accum_update -prof_sample_threshold_update -prof_tdata_booted -prof_tdata_cleanup -prof_tdata_get -prof_tdata_init -prof_tdata_initialized -prof_tdata_tls -prof_tdata_tsd -prof_tdata_tsd_boot -prof_tdata_tsd_cleanup_wrapper -prof_tdata_tsd_get -prof_tdata_tsd_get_wrapper -prof_tdata_tsd_init_head -prof_tdata_tsd_set -quarantine -quarantine_alloc_hook -quarantine_boot -quarantine_booted -quarantine_cleanup -quarantine_init -quarantine_tls -quarantine_tsd -quarantine_tsd_boot -quarantine_tsd_cleanup_wrapper -quarantine_tsd_get -quarantine_tsd_get_wrapper -quarantine_tsd_init_head -quarantine_tsd_set -register_zone -rtree_delete -rtree_get -rtree_get_locked -rtree_new -rtree_postfork_child -rtree_postfork_parent -rtree_prefork -rtree_set -s2u -sa2u -set_errno -small_size2bin -stats_cactive -stats_cactive_add -stats_cactive_get -stats_cactive_sub -stats_chunks -stats_print -tcache_alloc_easy -tcache_alloc_large -tcache_alloc_small -tcache_alloc_small_hard -tcache_arena_associate -tcache_arena_dissociate -tcache_bin_flush_large -tcache_bin_flush_small -tcache_bin_info -tcache_boot0 -tcache_boot1 -tcache_booted -tcache_create -tcache_dalloc_large -tcache_dalloc_small -tcache_destroy -tcache_enabled_booted -tcache_enabled_get -tcache_enabled_initialized -tcache_enabled_set -tcache_enabled_tls -tcache_enabled_tsd -tcache_enabled_tsd_boot -tcache_enabled_tsd_cleanup_wrapper -tcache_enabled_tsd_get -tcache_enabled_tsd_get_wrapper -tcache_enabled_tsd_init_head -tcache_enabled_tsd_set -tcache_event -tcache_event_hard -tcache_flush -tcache_get -tcache_initialized -tcache_maxclass -tcache_salloc -tcache_stats_merge -tcache_thread_cleanup -tcache_tls -tcache_tsd -tcache_tsd_boot -tcache_tsd_cleanup_wrapper -tcache_tsd_get -tcache_tsd_get_wrapper -tcache_tsd_init_head -tcache_tsd_set -thread_allocated_booted -thread_allocated_initialized -thread_allocated_tls -thread_allocated_tsd -thread_allocated_tsd_boot -thread_allocated_tsd_cleanup_wrapper -thread_allocated_tsd_get -thread_allocated_tsd_get_wrapper -thread_allocated_tsd_init_head -thread_allocated_tsd_set -tsd_init_check_recursion -tsd_init_finish -u2rz diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/private_unnamespace.sh b/lib/jemalloc-3.6.0/include/jemalloc/internal/private_unnamespace.sh deleted file mode 100755 index 23fed8e..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/private_unnamespace.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -for symbol in `cat $1` ; do - echo "#undef ${symbol}" -done diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/prng.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/prng.h deleted file mode 100644 index 7b2b065..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/prng.h +++ /dev/null @@ -1,60 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Simple linear congruential pseudo-random number generator: - * - * prng(y) = (a*x + c) % m - * - * where the following constants ensure maximal period: - * - * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. - * c == Odd number (relatively prime to 2^n). - * m == 2^32 - * - * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. - * - * This choice of m has the disadvantage that the quality of the bits is - * proportional to bit position. For example. the lowest bit has a cycle of 2, - * the next has a cycle of 4, etc. For this reason, we prefer to use the upper - * bits. - * - * Macro parameters: - * uint32_t r : Result. - * unsigned lg_range : (0..32], number of least significant bits to return. - * uint32_t state : Seed value. - * const uint32_t a, c : See above discussion. - */ -#define prng32(r, lg_range, state, a, c) do { \ - assert(lg_range > 0); \ - assert(lg_range <= 32); \ - \ - r = (state * (a)) + (c); \ - state = r; \ - r >>= (32 - lg_range); \ -} while (false) - -/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ -#define prng64(r, lg_range, state, a, c) do { \ - assert(lg_range > 0); \ - assert(lg_range <= 64); \ - \ - r = (state * (a)) + (c); \ - state = r; \ - r >>= (64 - lg_range); \ -} while (false) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/prof.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/prof.h deleted file mode 100644 index 6f162d2..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/prof.h +++ /dev/null @@ -1,613 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct prof_bt_s prof_bt_t; -typedef struct prof_cnt_s prof_cnt_t; -typedef struct prof_thr_cnt_s prof_thr_cnt_t; -typedef struct prof_ctx_s prof_ctx_t; -typedef struct prof_tdata_s prof_tdata_t; - -/* Option defaults. */ -#ifdef JEMALLOC_PROF -# define PROF_PREFIX_DEFAULT "jeprof" -#else -# define PROF_PREFIX_DEFAULT "" -#endif -#define LG_PROF_SAMPLE_DEFAULT 19 -#define LG_PROF_INTERVAL_DEFAULT -1 - -/* - * Hard limit on stack backtrace depth. The version of prof_backtrace() that - * is based on __builtin_return_address() necessarily has a hard-coded number - * of backtrace frame handlers, and should be kept in sync with this setting. - */ -#define PROF_BT_MAX 128 - -/* Maximum number of backtraces to store in each per thread LRU cache. */ -#define PROF_TCMAX 1024 - -/* Initial hash table size. */ -#define PROF_CKH_MINITEMS 64 - -/* Size of memory buffer to use when writing dump files. */ -#define PROF_DUMP_BUFSIZE 65536 - -/* Size of stack-allocated buffer used by prof_printf(). */ -#define PROF_PRINTF_BUFSIZE 128 - -/* - * Number of mutexes shared among all ctx's. No space is allocated for these - * unless profiling is enabled, so it's okay to over-provision. - */ -#define PROF_NCTX_LOCKS 1024 - -/* - * prof_tdata pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) -#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) -#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct prof_bt_s { - /* Backtrace, stored as len program counters. */ - void **vec; - unsigned len; -}; - -#ifdef JEMALLOC_PROF_LIBGCC -/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ -typedef struct { - prof_bt_t *bt; - unsigned nignore; - unsigned max; -} prof_unwind_data_t; -#endif - -struct prof_cnt_s { - /* - * Profiling counters. An allocation/deallocation pair can operate on - * different prof_thr_cnt_t objects that are linked into the same - * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go - * negative. In principle it is possible for the *bytes counters to - * overflow/underflow, but a general solution would require something - * like 128-bit counters; this implementation doesn't bother to solve - * that problem. - */ - int64_t curobjs; - int64_t curbytes; - uint64_t accumobjs; - uint64_t accumbytes; -}; - -struct prof_thr_cnt_s { - /* Linkage into prof_ctx_t's cnts_ql. */ - ql_elm(prof_thr_cnt_t) cnts_link; - - /* Linkage into thread's LRU. */ - ql_elm(prof_thr_cnt_t) lru_link; - - /* - * Associated context. If a thread frees an object that it did not - * allocate, it is possible that the context is not cached in the - * thread's hash table, in which case it must be able to look up the - * context, insert a new prof_thr_cnt_t into the thread's hash table, - * and link it into the prof_ctx_t's cnts_ql. - */ - prof_ctx_t *ctx; - - /* - * Threads use memory barriers to update the counters. Since there is - * only ever one writer, the only challenge is for the reader to get a - * consistent read of the counters. - * - * The writer uses this series of operations: - * - * 1) Increment epoch to an odd number. - * 2) Update counters. - * 3) Increment epoch to an even number. - * - * The reader must assure 1) that the epoch is even while it reads the - * counters, and 2) that the epoch doesn't change between the time it - * starts and finishes reading the counters. - */ - unsigned epoch; - - /* Profiling counters. */ - prof_cnt_t cnts; -}; - -struct prof_ctx_s { - /* Associated backtrace. */ - prof_bt_t *bt; - - /* Protects nlimbo, cnt_merged, and cnts_ql. */ - malloc_mutex_t *lock; - - /* - * Number of threads that currently cause this ctx to be in a state of - * limbo due to one of: - * - Initializing per thread counters associated with this ctx. - * - Preparing to destroy this ctx. - * - Dumping a heap profile that includes this ctx. - * nlimbo must be 1 (single destroyer) in order to safely destroy the - * ctx. - */ - unsigned nlimbo; - - /* Temporary storage for summation during dump. */ - prof_cnt_t cnt_summed; - - /* When threads exit, they merge their stats into cnt_merged. */ - prof_cnt_t cnt_merged; - - /* - * List of profile counters, one for each thread that has allocated in - * this context. - */ - ql_head(prof_thr_cnt_t) cnts_ql; - - /* Linkage for list of contexts to be dumped. */ - ql_elm(prof_ctx_t) dump_link; -}; -typedef ql_head(prof_ctx_t) prof_ctx_list_t; - -struct prof_tdata_s { - /* - * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a - * cache of backtraces, with associated thread-specific prof_thr_cnt_t - * objects. Other threads may read the prof_thr_cnt_t contents, but no - * others will ever write them. - * - * Upon thread exit, the thread must merge all the prof_thr_cnt_t - * counter data into the associated prof_ctx_t objects, and unlink/free - * the prof_thr_cnt_t objects. - */ - ckh_t bt2cnt; - - /* LRU for contents of bt2cnt. */ - ql_head(prof_thr_cnt_t) lru_ql; - - /* Backtrace vector, used for calls to prof_backtrace(). */ - void **vec; - - /* Sampling state. */ - uint64_t prng_state; - uint64_t threshold; - uint64_t accum; - - /* State used to avoid dumping while operating on prof internals. */ - bool enq; - bool enq_idump; - bool enq_gdump; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_prof; -/* - * Even if opt_prof is true, sampling can be temporarily disabled by setting - * opt_prof_active to false. No locking is used when updating opt_prof_active, - * so there are no guarantees regarding how long it will take for all threads - * to notice state changes. - */ -extern bool opt_prof_active; -extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ -extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ -extern bool opt_prof_gdump; /* High-water memory dumping. */ -extern bool opt_prof_final; /* Final profile dumping. */ -extern bool opt_prof_leak; /* Dump leak summary at exit. */ -extern bool opt_prof_accum; /* Report cumulative bytes. */ -extern char opt_prof_prefix[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - -/* - * Profile dump interval, measured in bytes allocated. Each arena triggers a - * profile dump when it reaches this threshold. The effect is that the - * interval between profile dumps averages prof_interval, though the actual - * interval between dumps will tend to be sporadic, and the interval will be a - * maximum of approximately (prof_interval * narenas). - */ -extern uint64_t prof_interval; - -/* - * If true, promote small sampled objects to large objects, since small run - * headers do not have embedded profile context pointers. - */ -extern bool prof_promote; - -void bt_init(prof_bt_t *bt, void **vec); -void prof_backtrace(prof_bt_t *bt, unsigned nignore); -prof_thr_cnt_t *prof_lookup(prof_bt_t *bt); -#ifdef JEMALLOC_JET -size_t prof_bt_count(void); -typedef int (prof_dump_open_t)(bool, const char *); -extern prof_dump_open_t *prof_dump_open; -#endif -void prof_idump(void); -bool prof_mdump(const char *filename); -void prof_gdump(void); -prof_tdata_t *prof_tdata_init(void); -void prof_tdata_cleanup(void *arg); -void prof_boot0(void); -void prof_boot1(void); -bool prof_boot2(void); -void prof_prefork(void); -void prof_postfork_parent(void); -void prof_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#define PROF_ALLOC_PREP(nignore, size, ret) do { \ - prof_tdata_t *prof_tdata; \ - prof_bt_t bt; \ - \ - assert(size == s2u(size)); \ - \ - prof_tdata = prof_tdata_get(true); \ - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \ - if (prof_tdata != NULL) \ - ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ - else \ - ret = NULL; \ - break; \ - } \ - \ - if (opt_prof_active == false) { \ - /* Sampling is currently inactive, so avoid sampling. */\ - ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ - } else if (opt_lg_prof_sample == 0) { \ - /* Don't bother with sampling logic, since sampling */\ - /* interval is 1. */\ - bt_init(&bt, prof_tdata->vec); \ - prof_backtrace(&bt, nignore); \ - ret = prof_lookup(&bt); \ - } else { \ - if (prof_tdata->threshold == 0) { \ - /* Initialize. Seed the prng differently for */\ - /* each thread. */\ - prof_tdata->prng_state = \ - (uint64_t)(uintptr_t)&size; \ - prof_sample_threshold_update(prof_tdata); \ - } \ - \ - /* Determine whether to capture a backtrace based on */\ - /* whether size is enough for prof_accum to reach */\ - /* prof_tdata->threshold. However, delay updating */\ - /* these variables until prof_{m,re}alloc(), because */\ - /* we don't know for sure that the allocation will */\ - /* succeed. */\ - /* */\ - /* Use subtraction rather than addition to avoid */\ - /* potential integer overflow. */\ - if (size >= prof_tdata->threshold - \ - prof_tdata->accum) { \ - bt_init(&bt, prof_tdata->vec); \ - prof_backtrace(&bt, nignore); \ - ret = prof_lookup(&bt); \ - } else \ - ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ - } \ -} while (0) - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *) - -prof_tdata_t *prof_tdata_get(bool create); -void prof_sample_threshold_update(prof_tdata_t *prof_tdata); -prof_ctx_t *prof_ctx_get(const void *ptr); -void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx); -bool prof_sample_accum_update(size_t size); -void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt); -void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, - size_t old_usize, prof_ctx_t *old_ctx); -void prof_free(const void *ptr, size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) -/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */ -malloc_tsd_externs(prof_tdata, prof_tdata_t *) -malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL, - prof_tdata_cleanup) - -JEMALLOC_INLINE prof_tdata_t * -prof_tdata_get(bool create) -{ - prof_tdata_t *prof_tdata; - - cassert(config_prof); - - prof_tdata = *prof_tdata_tsd_get(); - if (create && prof_tdata == NULL) - prof_tdata = prof_tdata_init(); - - return (prof_tdata); -} - -JEMALLOC_INLINE void -prof_sample_threshold_update(prof_tdata_t *prof_tdata) -{ - /* - * The body of this function is compiled out unless heap profiling is - * enabled, so that it is possible to compile jemalloc with floating - * point support completely disabled. Avoiding floating point code is - * important on memory-constrained systems, but it also enables a - * workaround for versions of glibc that don't properly save/restore - * floating point registers during dynamic lazy symbol loading (which - * internally calls into whatever malloc implementation happens to be - * integrated into the application). Note that some compilers (e.g. - * gcc 4.8) may use floating point registers for fast memory moves, so - * jemalloc must be compiled with such optimizations disabled (e.g. - * -mno-sse) in order for the workaround to be complete. - */ -#ifdef JEMALLOC_PROF - uint64_t r; - double u; - - cassert(config_prof); - - /* - * Compute sample threshold as a geometrically distributed random - * variable with mean (2^opt_lg_prof_sample). - * - * __ __ - * | log(u) | 1 - * prof_tdata->threshold = | -------- |, where p = ------------------- - * | log(1-p) | opt_lg_prof_sample - * 2 - * - * For more information on the math, see: - * - * Non-Uniform Random Variate Generation - * Luc Devroye - * Springer-Verlag, New York, 1986 - * pp 500 - * (http://luc.devroye.org/rnbookindex.html) - */ - prng64(r, 53, prof_tdata->prng_state, - UINT64_C(6364136223846793005), UINT64_C(1442695040888963407)); - u = (double)r * (1.0/9007199254740992.0L); - prof_tdata->threshold = (uint64_t)(log(u) / - log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample)))) - + (uint64_t)1U; -#endif -} - -JEMALLOC_INLINE prof_ctx_t * -prof_ctx_get(const void *ptr) -{ - prof_ctx_t *ret; - arena_chunk_t *chunk; - - cassert(config_prof); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) { - /* Region. */ - ret = arena_prof_ctx_get(ptr); - } else - ret = huge_prof_ctx_get(ptr); - - return (ret); -} - -JEMALLOC_INLINE void -prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx) -{ - arena_chunk_t *chunk; - - cassert(config_prof); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) { - /* Region. */ - arena_prof_ctx_set(ptr, usize, ctx); - } else - huge_prof_ctx_set(ptr, ctx); -} - -JEMALLOC_INLINE bool -prof_sample_accum_update(size_t size) -{ - prof_tdata_t *prof_tdata; - - cassert(config_prof); - /* Sampling logic is unnecessary if the interval is 1. */ - assert(opt_lg_prof_sample != 0); - - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (true); - - /* Take care to avoid integer overflow. */ - if (size >= prof_tdata->threshold - prof_tdata->accum) { - prof_tdata->accum -= (prof_tdata->threshold - size); - /* Compute new sample threshold. */ - prof_sample_threshold_update(prof_tdata); - while (prof_tdata->accum >= prof_tdata->threshold) { - prof_tdata->accum -= prof_tdata->threshold; - prof_sample_threshold_update(prof_tdata); - } - return (false); - } else { - prof_tdata->accum += size; - return (true); - } -} - -JEMALLOC_INLINE void -prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt) -{ - - cassert(config_prof); - assert(ptr != NULL); - assert(usize == isalloc(ptr, true)); - - if (opt_lg_prof_sample != 0) { - if (prof_sample_accum_update(usize)) { - /* - * Don't sample. For malloc()-like allocation, it is - * always possible to tell in advance how large an - * object's usable size will be, so there should never - * be a difference between the usize passed to - * PROF_ALLOC_PREP() and prof_malloc(). - */ - assert((uintptr_t)cnt == (uintptr_t)1U); - } - } - - if ((uintptr_t)cnt > (uintptr_t)1U) { - prof_ctx_set(ptr, usize, cnt->ctx); - - cnt->epoch++; - /*********/ - mb_write(); - /*********/ - cnt->cnts.curobjs++; - cnt->cnts.curbytes += usize; - if (opt_prof_accum) { - cnt->cnts.accumobjs++; - cnt->cnts.accumbytes += usize; - } - /*********/ - mb_write(); - /*********/ - cnt->epoch++; - /*********/ - mb_write(); - /*********/ - } else - prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U); -} - -JEMALLOC_INLINE void -prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, - size_t old_usize, prof_ctx_t *old_ctx) -{ - prof_thr_cnt_t *told_cnt; - - cassert(config_prof); - assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); - - if (ptr != NULL) { - assert(usize == isalloc(ptr, true)); - if (opt_lg_prof_sample != 0) { - if (prof_sample_accum_update(usize)) { - /* - * Don't sample. The usize passed to - * PROF_ALLOC_PREP() was larger than what - * actually got allocated, so a backtrace was - * captured for this allocation, even though - * its actual usize was insufficient to cross - * the sample threshold. - */ - cnt = (prof_thr_cnt_t *)(uintptr_t)1U; - } - } - } - - if ((uintptr_t)old_ctx > (uintptr_t)1U) { - told_cnt = prof_lookup(old_ctx->bt); - if (told_cnt == NULL) { - /* - * It's too late to propagate OOM for this realloc(), - * so operate directly on old_cnt->ctx->cnt_merged. - */ - malloc_mutex_lock(old_ctx->lock); - old_ctx->cnt_merged.curobjs--; - old_ctx->cnt_merged.curbytes -= old_usize; - malloc_mutex_unlock(old_ctx->lock); - told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; - } - } else - told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; - - if ((uintptr_t)told_cnt > (uintptr_t)1U) - told_cnt->epoch++; - if ((uintptr_t)cnt > (uintptr_t)1U) { - prof_ctx_set(ptr, usize, cnt->ctx); - cnt->epoch++; - } else if (ptr != NULL) - prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U); - /*********/ - mb_write(); - /*********/ - if ((uintptr_t)told_cnt > (uintptr_t)1U) { - told_cnt->cnts.curobjs--; - told_cnt->cnts.curbytes -= old_usize; - } - if ((uintptr_t)cnt > (uintptr_t)1U) { - cnt->cnts.curobjs++; - cnt->cnts.curbytes += usize; - if (opt_prof_accum) { - cnt->cnts.accumobjs++; - cnt->cnts.accumbytes += usize; - } - } - /*********/ - mb_write(); - /*********/ - if ((uintptr_t)told_cnt > (uintptr_t)1U) - told_cnt->epoch++; - if ((uintptr_t)cnt > (uintptr_t)1U) - cnt->epoch++; - /*********/ - mb_write(); /* Not strictly necessary. */ -} - -JEMALLOC_INLINE void -prof_free(const void *ptr, size_t size) -{ - prof_ctx_t *ctx = prof_ctx_get(ptr); - - cassert(config_prof); - - if ((uintptr_t)ctx > (uintptr_t)1) { - prof_thr_cnt_t *tcnt; - assert(size == isalloc(ptr, true)); - tcnt = prof_lookup(ctx->bt); - - if (tcnt != NULL) { - tcnt->epoch++; - /*********/ - mb_write(); - /*********/ - tcnt->cnts.curobjs--; - tcnt->cnts.curbytes -= size; - /*********/ - mb_write(); - /*********/ - tcnt->epoch++; - /*********/ - mb_write(); - /*********/ - } else { - /* - * OOM during free() cannot be propagated, so operate - * directly on cnt->ctx->cnt_merged. - */ - malloc_mutex_lock(ctx->lock); - ctx->cnt_merged.curobjs--; - ctx->cnt_merged.curbytes -= size; - malloc_mutex_unlock(ctx->lock); - } - } -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/public_namespace.sh b/lib/jemalloc-3.6.0/include/jemalloc/internal/public_namespace.sh deleted file mode 100755 index 362109f..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/public_namespace.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -for nm in `cat $1` ; do - n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` - echo "#define je_${n} JEMALLOC_N(${n})" -done diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/public_unnamespace.sh b/lib/jemalloc-3.6.0/include/jemalloc/internal/public_unnamespace.sh deleted file mode 100755 index 4239d17..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/public_unnamespace.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -for nm in `cat $1` ; do - n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` - echo "#undef je_${n}" -done diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/ql.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/ql.h deleted file mode 100644 index f70c5f6..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/ql.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * List definitions. - */ -#define ql_head(a_type) \ -struct { \ - a_type *qlh_first; \ -} - -#define ql_head_initializer(a_head) {NULL} - -#define ql_elm(a_type) qr(a_type) - -/* List functions. */ -#define ql_new(a_head) do { \ - (a_head)->qlh_first = NULL; \ -} while (0) - -#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) - -#define ql_first(a_head) ((a_head)->qlh_first) - -#define ql_last(a_head, a_field) \ - ((ql_first(a_head) != NULL) \ - ? qr_prev(ql_first(a_head), a_field) : NULL) - -#define ql_next(a_head, a_elm, a_field) \ - ((ql_last(a_head, a_field) != (a_elm)) \ - ? qr_next((a_elm), a_field) : NULL) - -#define ql_prev(a_head, a_elm, a_field) \ - ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ - : NULL) - -#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ - qr_before_insert((a_qlelm), (a_elm), a_field); \ - if (ql_first(a_head) == (a_qlelm)) { \ - ql_first(a_head) = (a_elm); \ - } \ -} while (0) - -#define ql_after_insert(a_qlelm, a_elm, a_field) \ - qr_after_insert((a_qlelm), (a_elm), a_field) - -#define ql_head_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = (a_elm); \ -} while (0) - -#define ql_tail_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = qr_next((a_elm), a_field); \ -} while (0) - -#define ql_remove(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) == (a_elm)) { \ - ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ - } \ - if (ql_first(a_head) != (a_elm)) { \ - qr_remove((a_elm), a_field); \ - } else { \ - ql_first(a_head) = NULL; \ - } \ -} while (0) - -#define ql_head_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_first(a_head); \ - ql_remove((a_head), t, a_field); \ -} while (0) - -#define ql_tail_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_last(a_head, a_field); \ - ql_remove((a_head), t, a_field); \ -} while (0) - -#define ql_foreach(a_var, a_head, a_field) \ - qr_foreach((a_var), ql_first(a_head), a_field) - -#define ql_reverse_foreach(a_var, a_head, a_field) \ - qr_reverse_foreach((a_var), ql_first(a_head), a_field) diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/qr.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/qr.h deleted file mode 100644 index 602944b..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/qr.h +++ /dev/null @@ -1,67 +0,0 @@ -/* Ring definitions. */ -#define qr(a_type) \ -struct { \ - a_type *qre_next; \ - a_type *qre_prev; \ -} - -/* Ring functions. */ -#define qr_new(a_qr, a_field) do { \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) - -#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) - -#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qrelm); \ - (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ - (a_qrelm)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_after_insert(a_qrelm, a_qr, a_field) \ - do \ - { \ - (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ - (a_qr)->a_field.qre_prev = (a_qrelm); \ - (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ - (a_qrelm)->a_field.qre_next = (a_qr); \ - } while (0) - -#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ - void *t; \ - (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ - (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ - t = (a_qr_a)->a_field.qre_prev; \ - (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ - (a_qr_b)->a_field.qre_prev = t; \ -} while (0) - -/* qr_meld() and qr_split() are functionally equivalent, so there's no need to - * have two copies of the code. */ -#define qr_split(a_qr_a, a_qr_b, a_field) \ - qr_meld((a_qr_a), (a_qr_b), a_field) - -#define qr_remove(a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev->a_field.qre_next \ - = (a_qr)->a_field.qre_next; \ - (a_qr)->a_field.qre_next->a_field.qre_prev \ - = (a_qr)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_foreach(var, a_qr, a_field) \ - for ((var) = (a_qr); \ - (var) != NULL; \ - (var) = (((var)->a_field.qre_next != (a_qr)) \ - ? (var)->a_field.qre_next : NULL)) - -#define qr_reverse_foreach(var, a_qr, a_field) \ - for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ - (var) != NULL; \ - (var) = (((var) != (a_qr)) \ - ? (var)->a_field.qre_prev : NULL)) diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/quarantine.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/quarantine.h deleted file mode 100644 index 16f677f..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/quarantine.h +++ /dev/null @@ -1,67 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct quarantine_obj_s quarantine_obj_t; -typedef struct quarantine_s quarantine_t; - -/* Default per thread quarantine size if valgrind is enabled. */ -#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct quarantine_obj_s { - void *ptr; - size_t usize; -}; - -struct quarantine_s { - size_t curbytes; - size_t curobjs; - size_t first; -#define LG_MAXOBJS_INIT 10 - size_t lg_maxobjs; - quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -quarantine_t *quarantine_init(size_t lg_maxobjs); -void quarantine(void *ptr); -void quarantine_cleanup(void *arg); -bool quarantine_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *) - -void quarantine_alloc_hook(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) -malloc_tsd_externs(quarantine, quarantine_t *) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL, - quarantine_cleanup) - -JEMALLOC_ALWAYS_INLINE void -quarantine_alloc_hook(void) -{ - quarantine_t *quarantine; - - assert(config_fill && opt_quarantine); - - quarantine = *quarantine_tsd_get(); - if (quarantine == NULL) - quarantine_init(LG_MAXOBJS_INIT); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/rb.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/rb.h deleted file mode 100644 index 423802e..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/rb.h +++ /dev/null @@ -1,969 +0,0 @@ -/*- - ******************************************************************************* - * - * cpp macro implementation of left-leaning 2-3 red-black trees. Parent - * pointers are not used, and color bits are stored in the least significant - * bit of right-child pointers (if RB_COMPACT is defined), thus making node - * linkage as compact as is possible for red-black trees. - * - * Usage: - * - * #include - * #include - * #define NDEBUG // (Optional, see assert(3).) - * #include - * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) - * #include - * ... - * - ******************************************************************************* - */ - -#ifndef RB_H_ -#define RB_H_ - -#ifdef RB_COMPACT -/* Node structure. */ -#define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right_red; \ -} -#else -#define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right; \ - bool rbn_red; \ -} -#endif - -/* Root structure. */ -#define rb_tree(a_type) \ -struct { \ - a_type *rbt_root; \ - a_type rbt_nil; \ -} - -/* Left accessors. */ -#define rbtn_left_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_left) -#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ - (a_node)->a_field.rbn_left = a_left; \ -} while (0) - -#ifdef RB_COMPACT -/* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ - ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ - & ((ssize_t)-2))) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ - | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ -} while (0) - -/* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ - ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ - & ((size_t)1))) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ - | ((ssize_t)a_red)); \ -} while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ - (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ -} while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ -} while (0) -#else -/* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_right) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right = a_right; \ -} while (0) - -/* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_red) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_red = (a_red); \ -} while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = true; \ -} while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = false; \ -} while (0) -#endif - -/* Node initializer. */ -#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ - rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ - rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ - rbtn_red_set(a_type, a_field, (a_node)); \ -} while (0) - -/* Tree initializer. */ -#define rb_new(a_type, a_field, a_rbt) do { \ - (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ - rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \ - rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \ -} while (0) - -/* Internal utility macros. */ -#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ - for (; \ - rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ - (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ - } \ - } \ -} while (0) - -#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ - for (; rbtn_right_get(a_type, a_field, (r_node)) != \ - &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ - (r_node))) { \ - } \ - } \ -} while (0) - -#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ - rbtn_right_set(a_type, a_field, (a_node), \ - rbtn_left_get(a_type, a_field, (r_node))); \ - rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ - rbtn_left_set(a_type, a_field, (a_node), \ - rbtn_right_get(a_type, a_field, (r_node))); \ - rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -/* - * The rb_proto() macro generates function prototypes that correspond to the - * functions generated by an equivalently parameterized call to rb_gen(). - */ - -#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key); \ -a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ -a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg); \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); - -/* - * The rb_gen() macro generates a type-specific red-black tree implementation, - * based on the above cpp macros. - * - * Arguments: - * - * a_attr : Function attribute for generated functions (ex: static). - * a_prefix : Prefix for generated functions (ex: ex_). - * a_rb_type : Type for red-black tree data structure (ex: ex_t). - * a_type : Type for red-black tree node data structure (ex: ex_node_t). - * a_field : Name of red-black tree node linkage (ex: ex_link). - * a_cmp : Node comparison function name, with the following prototype: - * int (a_cmp *)(a_type *a_node, a_type *a_other); - * ^^^^^^ - * or a_key - * Interpretation of comparision function return values: - * -1 : a_node < a_other - * 0 : a_node == a_other - * 1 : a_node > a_other - * In all cases, the a_node or a_key macro argument is the first - * argument to the comparison function, which makes it possible - * to write comparison functions that treat the first argument - * specially. - * - * Assuming the following setup: - * - * typedef struct ex_node_s ex_node_t; - * struct ex_node_s { - * rb_node(ex_node_t) ex_link; - * }; - * typedef rb_tree(ex_node_t) ex_t; - * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) - * - * The following API is generated: - * - * static void - * ex_new(ex_t *tree); - * Description: Initialize a red-black tree structure. - * Args: - * tree: Pointer to an uninitialized red-black tree object. - * - * static ex_node_t * - * ex_first(ex_t *tree); - * static ex_node_t * - * ex_last(ex_t *tree); - * Description: Get the first/last node in tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * Ret: First/last node in tree, or NULL if tree is empty. - * - * static ex_node_t * - * ex_next(ex_t *tree, ex_node_t *node); - * static ex_node_t * - * ex_prev(ex_t *tree, ex_node_t *node); - * Description: Get node's successor/predecessor. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: A node in tree. - * Ret: node's successor/predecessor in tree, or NULL if node is - * last/first. - * - * static ex_node_t * - * ex_search(ex_t *tree, ex_node_t *key); - * Description: Search for node that matches key. - * Args: - * tree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in tree that matches key, or NULL if no match. - * - * static ex_node_t * - * ex_nsearch(ex_t *tree, ex_node_t *key); - * static ex_node_t * - * ex_psearch(ex_t *tree, ex_node_t *key); - * Description: Search for node that matches key. If no match is found, - * return what would be key's successor/predecessor, were - * key in tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in tree that matches key, or if no match, hypothetical node's - * successor/predecessor (NULL if no successor/predecessor). - * - * static void - * ex_insert(ex_t *tree, ex_node_t *node); - * Description: Insert node into tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: Node to be inserted into tree. - * - * static void - * ex_remove(ex_t *tree, ex_node_t *node); - * Description: Remove node from tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: Node in tree to be removed. - * - * static ex_node_t * - * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * static ex_node_t * - * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * Description: Iterate forward/backward over tree, starting at node. If - * tree is modified, iteration must be immediately - * terminated by the callback function that causes the - * modification. - * Args: - * tree : Pointer to an initialized red-black tree object. - * start: Node at which to start iteration, or NULL to start at - * first/last node. - * cb : Callback function, which is called for each node during - * iteration. Under normal circumstances the callback function - * should return NULL, which causes iteration to continue. If a - * callback function returns non-NULL, iteration is immediately - * terminated and the non-NULL return value is returned by the - * iterator. This is useful for re-starting iteration after - * modifying tree. - * arg : Opaque pointer passed to cb(). - * Ret: NULL if iteration completed, or the non-NULL callback return value - * that caused termination of the iteration. - */ -#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree) { \ - rb_new(a_type, a_field, rbtree); \ -} \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ - if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ - rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ - assert(tnode != &rbtree->rbt_nil); \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ - if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ - rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ - assert(tnode != &rbtree->rbt_nil); \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ - a_type *ret; \ - int cmp; \ - ret = rbtree->rbt_root; \ - while (ret != &rbtree->rbt_nil \ - && (cmp = (a_cmp)(key, ret)) != 0) { \ - if (cmp < 0) { \ - ret = rbtn_left_get(a_type, a_field, ret); \ - } else { \ - ret = rbtn_right_get(a_type, a_field, ret); \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } path[sizeof(void *) << 4], *pathp; \ - rbt_node_new(a_type, a_field, rbtree, node); \ - /* Wind. */ \ - path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - assert(cmp != 0); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - } \ - } \ - pathp->node = node; \ - /* Unwind. */ \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - a_type *cnode = pathp->node; \ - if (pathp->cmp < 0) { \ - a_type *left = pathp[1].node; \ - rbtn_left_set(a_type, a_field, cnode, left); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ - /* Fix up 4-node. */ \ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, cnode, tnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } else { \ - a_type *right = pathp[1].node; \ - rbtn_right_set(a_type, a_field, cnode, right); \ - if (rbtn_red_get(a_type, a_field, right)) { \ - a_type *left = rbtn_left_get(a_type, a_field, cnode); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - /* Split 4-node. */ \ - rbtn_black_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, right); \ - rbtn_red_set(a_type, a_field, cnode); \ - } else { \ - /* Lean left. */ \ - a_type *tnode; \ - bool tred = rbtn_red_get(a_type, a_field, cnode); \ - rbtn_rotate_left(a_type, a_field, cnode, tnode); \ - rbtn_color_set(a_type, a_field, tnode, tred); \ - rbtn_red_set(a_type, a_field, cnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } \ - pathp->node = cnode; \ - } \ - /* Set root, and make it black. */ \ - rbtree->rbt_root = path->node; \ - rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ -} \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } *pathp, *nodep, path[sizeof(void *) << 4]; \ - /* Wind. */ \ - nodep = NULL; /* Silence compiler warning. */ \ - path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - if (cmp == 0) { \ - /* Find node's successor, in preparation for swap. */ \ - pathp->cmp = 1; \ - nodep = pathp; \ - for (pathp++; pathp->node != &rbtree->rbt_nil; \ - pathp++) { \ - pathp->cmp = -1; \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } \ - break; \ - } \ - } \ - } \ - assert(nodep->node == node); \ - pathp--; \ - if (pathp->node != node) { \ - /* Swap node with its successor. */ \ - bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ - rbtn_color_set(a_type, a_field, pathp->node, \ - rbtn_red_get(a_type, a_field, node)); \ - rbtn_left_set(a_type, a_field, pathp->node, \ - rbtn_left_get(a_type, a_field, node)); \ - /* If node's successor is its right child, the following code */\ - /* will do the wrong thing for the right child pointer. */\ - /* However, it doesn't matter, because the pointer will be */\ - /* properly set when the successor is pruned. */\ - rbtn_right_set(a_type, a_field, pathp->node, \ - rbtn_right_get(a_type, a_field, node)); \ - rbtn_color_set(a_type, a_field, node, tred); \ - /* The pruned leaf node's child pointers are never accessed */\ - /* again, so don't bother setting them to nil. */\ - nodep->node = pathp->node; \ - pathp->node = node; \ - if (nodep == path) { \ - rbtree->rbt_root = nodep->node; \ - } else { \ - if (nodep[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } else { \ - rbtn_right_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } \ - } \ - } else { \ - a_type *left = rbtn_left_get(a_type, a_field, node); \ - if (left != &rbtree->rbt_nil) { \ - /* node has no successor, but it has a left child. */\ - /* Splice node out, without losing the left child. */\ - assert(rbtn_red_get(a_type, a_field, node) == false); \ - assert(rbtn_red_get(a_type, a_field, left)); \ - rbtn_black_set(a_type, a_field, left); \ - if (pathp == path) { \ - rbtree->rbt_root = left; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - left); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - left); \ - } \ - } \ - return; \ - } else if (pathp == path) { \ - /* The tree only contained one node. */ \ - rbtree->rbt_root = &rbtree->rbt_nil; \ - return; \ - } \ - } \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - /* Prune red node, which requires no fixup. */ \ - assert(pathp[-1].cmp < 0); \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - &rbtree->rbt_nil); \ - return; \ - } \ - /* The node to be pruned is black, so unwind until balance is */\ - /* restored. */\ - pathp->node = &rbtree->rbt_nil; \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - assert(pathp->cmp != 0); \ - if (pathp->cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - assert(rbtn_red_get(a_type, a_field, pathp[1].node) \ - == false); \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - a_type *tnode; \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ - /* In the following diagrams, ||, //, and \\ */\ - /* indicate the path to the removed node. */\ - /* */\ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - /* */\ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - /* */\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, rightleft); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - a_type *tnode; \ - rbtn_red_set(a_type, a_field, pathp->node); \ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - pathp->node = tnode; \ - } \ - } \ - } else { \ - a_type *left; \ - rbtn_right_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - left = rbtn_left_get(a_type, a_field, pathp->node); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *tnode; \ - a_type *leftright = rbtn_right_get(a_type, a_field, \ - left); \ - a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ - leftright); \ - if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (r) */\ - a_type *unode; \ - rbtn_black_set(a_type, a_field, leftrightleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - unode); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_right_set(a_type, a_field, unode, tnode); \ - rbtn_rotate_left(a_type, a_field, unode, tnode); \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (b) */\ - assert(leftright != &rbtree->rbt_nil); \ - rbtn_red_set(a_type, a_field, leftright); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_black_set(a_type, a_field, tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root, which may actually be the tree root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - } \ - return; \ - } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, pathp->node); \ - /* Balance restored. */ \ - return; \ - } \ - } else { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - } \ - } \ - } \ - } \ - /* Set root. */ \ - rbtree->rbt_root = path->node; \ - assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ -} \ -a_attr a_type * \ -a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ - a_field, node), cb, arg)) != &rbtree->rbt_nil \ - || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp < 0) { \ - a_type *ret; \ - if ((ret = a_prefix##iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } else if (cmp > 0) { \ - return (a_prefix##iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ - cb, arg); \ - } else { \ - ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ - a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ - void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp > 0) { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } else if (cmp < 0) { \ - return (a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtree->rbt_root, cb, arg); \ - } else { \ - ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ - cb, arg); \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} - -#endif /* RB_H_ */ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/rtree.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/rtree.h deleted file mode 100644 index bc74769..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/rtree.h +++ /dev/null @@ -1,172 +0,0 @@ -/* - * This radix tree implementation is tailored to the singular purpose of - * tracking which chunks are currently owned by jemalloc. This functionality - * is mandatory for OS X, where jemalloc must be able to respond to object - * ownership queries. - * - ******************************************************************************* - */ -#ifdef JEMALLOC_H_TYPES - -typedef struct rtree_s rtree_t; - -/* - * Size of each radix tree node (must be a power of 2). This impacts tree - * depth. - */ -#define RTREE_NODESIZE (1U << 16) - -typedef void *(rtree_alloc_t)(size_t); -typedef void (rtree_dalloc_t)(void *); - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct rtree_s { - rtree_alloc_t *alloc; - rtree_dalloc_t *dalloc; - malloc_mutex_t mutex; - void **root; - unsigned height; - unsigned level2bits[1]; /* Dynamically sized. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc); -void rtree_delete(rtree_t *rtree); -void rtree_prefork(rtree_t *rtree); -void rtree_postfork_parent(rtree_t *rtree); -void rtree_postfork_child(rtree_t *rtree); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -#ifdef JEMALLOC_DEBUG -uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key); -#endif -uint8_t rtree_get(rtree_t *rtree, uintptr_t key); -bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) -#define RTREE_GET_GENERATE(f) \ -/* The least significant bits of the key are ignored. */ \ -JEMALLOC_INLINE uint8_t \ -f(rtree_t *rtree, uintptr_t key) \ -{ \ - uint8_t ret; \ - uintptr_t subkey; \ - unsigned i, lshift, height, bits; \ - void **node, **child; \ - \ - RTREE_LOCK(&rtree->mutex); \ - for (i = lshift = 0, height = rtree->height, node = rtree->root;\ - i < height - 1; \ - i++, lshift += bits, node = child) { \ - bits = rtree->level2bits[i]; \ - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \ - 3)) - bits); \ - child = (void**)node[subkey]; \ - if (child == NULL) { \ - RTREE_UNLOCK(&rtree->mutex); \ - return (0); \ - } \ - } \ - \ - /* \ - * node is a leaf, so it contains values rather than node \ - * pointers. \ - */ \ - bits = rtree->level2bits[i]; \ - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \ - bits); \ - { \ - uint8_t *leaf = (uint8_t *)node; \ - ret = leaf[subkey]; \ - } \ - RTREE_UNLOCK(&rtree->mutex); \ - \ - RTREE_GET_VALIDATE \ - return (ret); \ -} - -#ifdef JEMALLOC_DEBUG -# define RTREE_LOCK(l) malloc_mutex_lock(l) -# define RTREE_UNLOCK(l) malloc_mutex_unlock(l) -# define RTREE_GET_VALIDATE -RTREE_GET_GENERATE(rtree_get_locked) -# undef RTREE_LOCK -# undef RTREE_UNLOCK -# undef RTREE_GET_VALIDATE -#endif - -#define RTREE_LOCK(l) -#define RTREE_UNLOCK(l) -#ifdef JEMALLOC_DEBUG - /* - * Suppose that it were possible for a jemalloc-allocated chunk to be - * munmap()ped, followed by a different allocator in another thread re-using - * overlapping virtual memory, all without invalidating the cached rtree - * value. The result would be a false positive (the rtree would claim that - * jemalloc owns memory that it had actually discarded). This scenario - * seems impossible, but the following assertion is a prudent sanity check. - */ -# define RTREE_GET_VALIDATE \ - assert(rtree_get_locked(rtree, key) == ret); -#else -# define RTREE_GET_VALIDATE -#endif -RTREE_GET_GENERATE(rtree_get) -#undef RTREE_LOCK -#undef RTREE_UNLOCK -#undef RTREE_GET_VALIDATE - -JEMALLOC_INLINE bool -rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val) -{ - uintptr_t subkey; - unsigned i, lshift, height, bits; - void **node, **child; - - malloc_mutex_lock(&rtree->mutex); - for (i = lshift = 0, height = rtree->height, node = rtree->root; - i < height - 1; - i++, lshift += bits, node = child) { - bits = rtree->level2bits[i]; - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - - bits); - child = (void**)node[subkey]; - if (child == NULL) { - size_t size = ((i + 1 < height - 1) ? sizeof(void *) - : (sizeof(uint8_t))) << rtree->level2bits[i+1]; - child = (void**)rtree->alloc(size); - if (child == NULL) { - malloc_mutex_unlock(&rtree->mutex); - return (true); - } - memset(child, 0, size); - node[subkey] = child; - } - } - - /* node is a leaf, so it contains values rather than node pointers. */ - bits = rtree->level2bits[i]; - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits); - { - uint8_t *leaf = (uint8_t *)node; - leaf[subkey] = val; - } - malloc_mutex_unlock(&rtree->mutex); - - return (false); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/size_classes.sh b/lib/jemalloc-3.6.0/include/jemalloc/internal/size_classes.sh deleted file mode 100755 index 29c80c1..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/size_classes.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/sh - -# The following limits are chosen such that they cover all supported platforms. - -# Range of quanta. -lg_qmin=3 -lg_qmax=4 - -# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. -lg_tmin=3 - -# Range of page sizes. -lg_pmin=12 -lg_pmax=16 - -pow2() { - e=$1 - pow2_result=1 - while [ ${e} -gt 0 ] ; do - pow2_result=$((${pow2_result} + ${pow2_result})) - e=$((${e} - 1)) - done -} - -cat < 255) -# error "Too many small size classes" -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ -EOF diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/stats.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/stats.h deleted file mode 100644 index 27f68e3..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/stats.h +++ /dev/null @@ -1,173 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_stats_s tcache_bin_stats_t; -typedef struct malloc_bin_stats_s malloc_bin_stats_t; -typedef struct malloc_large_stats_s malloc_large_stats_t; -typedef struct arena_stats_s arena_stats_t; -typedef struct chunk_stats_s chunk_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct tcache_bin_stats_s { - /* - * Number of allocation requests that corresponded to the size of this - * bin. - */ - uint64_t nrequests; -}; - -struct malloc_bin_stats_s { - /* - * Current number of bytes allocated, including objects currently - * cached by tcache. - */ - size_t allocated; - - /* - * Total number of allocation/deallocation requests served directly by - * the bin. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* - * Number of allocation requests that correspond to the size of this - * bin. This includes requests served by tcache, though tcache only - * periodically merges into this counter. - */ - uint64_t nrequests; - - /* Number of tcache fills from this bin. */ - uint64_t nfills; - - /* Number of tcache flushes to this bin. */ - uint64_t nflushes; - - /* Total number of runs created for this bin's size class. */ - uint64_t nruns; - - /* - * Total number of runs reused by extracting them from the runs tree for - * this bin's size class. - */ - uint64_t reruns; - - /* Current number of runs in this bin. */ - size_t curruns; -}; - -struct malloc_large_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the arena. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* - * Number of allocation requests that correspond to this size class. - * This includes requests served by tcache, though tcache only - * periodically merges into this counter. - */ - uint64_t nrequests; - - /* Current number of runs of this size class. */ - size_t curruns; -}; - -struct arena_stats_s { - /* Number of bytes currently mapped. */ - size_t mapped; - - /* - * Total number of purge sweeps, total number of madvise calls made, - * and total pages purged in order to keep dirty unused memory under - * control. - */ - uint64_t npurge; - uint64_t nmadvise; - uint64_t purged; - - /* Per-size-category statistics. */ - size_t allocated_large; - uint64_t nmalloc_large; - uint64_t ndalloc_large; - uint64_t nrequests_large; - - /* - * One element for each possible size class, including sizes that - * overlap with bin size classes. This is necessary because ipalloc() - * sometimes has to use such large objects in order to assure proper - * alignment. - */ - malloc_large_stats_t *lstats; -}; - -struct chunk_stats_s { - /* Number of chunks that were allocated. */ - uint64_t nchunks; - - /* High-water mark for number of chunks allocated. */ - size_t highchunks; - - /* - * Current number of chunks allocated. This value isn't maintained for - * any other purpose, so keep track of it in order to be able to set - * highchunks. - */ - size_t curchunks; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_stats_print; - -extern size_t stats_cactive; - -void stats_print(void (*write)(void *, const char *), void *cbopaque, - const char *opts); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -size_t stats_cactive_get(void); -void stats_cactive_add(size_t size); -void stats_cactive_sub(size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) -JEMALLOC_INLINE size_t -stats_cactive_get(void) -{ - - return (atomic_read_z(&stats_cactive)); -} - -JEMALLOC_INLINE void -stats_cactive_add(size_t size) -{ - - atomic_add_z(&stats_cactive, size); -} - -JEMALLOC_INLINE void -stats_cactive_sub(size_t size) -{ - - atomic_sub_z(&stats_cactive, size); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/tcache.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/tcache.h deleted file mode 100644 index c3d4b58..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/tcache.h +++ /dev/null @@ -1,443 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_info_s tcache_bin_info_t; -typedef struct tcache_bin_s tcache_bin_t; -typedef struct tcache_s tcache_t; - -/* - * tcache pointers close to NULL are used to encode state information that is - * used for two purposes: preventing thread caching on a per thread basis and - * cleaning up during thread shutdown. - */ -#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) -#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) -#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) -#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY - -/* - * Absolute maximum number of cache slots for each small bin in the thread - * cache. This is an additional constraint beyond that imposed as: twice the - * number of regions per run for this size class. - * - * This constant must be an even number. - */ -#define TCACHE_NSLOTS_SMALL_MAX 200 - -/* Number of cache slots for large size classes. */ -#define TCACHE_NSLOTS_LARGE 20 - -/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ -#define LG_TCACHE_MAXCLASS_DEFAULT 15 - -/* - * TCACHE_GC_SWEEP is the approximate number of allocation events between - * full GC sweeps. Integer rounding may cause the actual number to be - * slightly higher, since GC is performed incrementally. - */ -#define TCACHE_GC_SWEEP 8192 - -/* Number of tcache allocation/deallocation events between incremental GCs. */ -#define TCACHE_GC_INCR \ - ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -typedef enum { - tcache_enabled_false = 0, /* Enable cast to/from bool. */ - tcache_enabled_true = 1, - tcache_enabled_default = 2 -} tcache_enabled_t; - -/* - * Read-only information associated with each element of tcache_t's tbins array - * is stored separately, mainly to reduce memory usage. - */ -struct tcache_bin_info_s { - unsigned ncached_max; /* Upper limit on ncached. */ -}; - -struct tcache_bin_s { - tcache_bin_stats_t tstats; - int low_water; /* Min # cached since last GC. */ - unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ - unsigned ncached; /* # of cached objects. */ - void **avail; /* Stack of available objects. */ -}; - -struct tcache_s { - ql_elm(tcache_t) link; /* Used for aggregating stats. */ - uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */ - arena_t *arena; /* This thread's arena. */ - unsigned ev_cnt; /* Event count since incremental GC. */ - unsigned next_gc_bin; /* Next bin to GC. */ - tcache_bin_t tbins[1]; /* Dynamically sized. */ - /* - * The pointer stacks associated with tbins follow as a contiguous - * array. During tcache initialization, the avail pointer in each - * element of tbins is initialized to point to the proper offset within - * this array. - */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_tcache; -extern ssize_t opt_lg_tcache_max; - -extern tcache_bin_info_t *tcache_bin_info; - -/* - * Number of tcache bins. There are NBINS small-object bins, plus 0 or more - * large-object bins. - */ -extern size_t nhbins; - -/* Maximum cached size class. */ -extern size_t tcache_maxclass; - -size_t tcache_salloc(const void *ptr); -void tcache_event_hard(tcache_t *tcache); -void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, - size_t binind); -void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache); -void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache); -void tcache_arena_associate(tcache_t *tcache, arena_t *arena); -void tcache_arena_dissociate(tcache_t *tcache); -tcache_t *tcache_create(arena_t *arena); -void tcache_destroy(tcache_t *tcache); -void tcache_thread_cleanup(void *arg); -void tcache_stats_merge(tcache_t *tcache, arena_t *arena); -bool tcache_boot0(void); -bool tcache_boot1(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *) -malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t) - -void tcache_event(tcache_t *tcache); -void tcache_flush(void); -bool tcache_enabled_get(void); -tcache_t *tcache_get(bool create); -void tcache_enabled_set(bool enabled); -void *tcache_alloc_easy(tcache_bin_t *tbin); -void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero); -void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero); -void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind); -void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) -/* Map of thread-specific caches. */ -malloc_tsd_externs(tcache, tcache_t *) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL, - tcache_thread_cleanup) -/* Per thread flag that allows thread caches to be disabled. */ -malloc_tsd_externs(tcache_enabled, tcache_enabled_t) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t, - tcache_enabled_default, malloc_tsd_no_cleanup) - -JEMALLOC_INLINE void -tcache_flush(void) -{ - tcache_t *tcache; - - cassert(config_tcache); - - tcache = *tcache_tsd_get(); - if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) - return; - tcache_destroy(tcache); - tcache = NULL; - tcache_tsd_set(&tcache); -} - -JEMALLOC_INLINE bool -tcache_enabled_get(void) -{ - tcache_enabled_t tcache_enabled; - - cassert(config_tcache); - - tcache_enabled = *tcache_enabled_tsd_get(); - if (tcache_enabled == tcache_enabled_default) { - tcache_enabled = (tcache_enabled_t)opt_tcache; - tcache_enabled_tsd_set(&tcache_enabled); - } - - return ((bool)tcache_enabled); -} - -JEMALLOC_INLINE void -tcache_enabled_set(bool enabled) -{ - tcache_enabled_t tcache_enabled; - tcache_t *tcache; - - cassert(config_tcache); - - tcache_enabled = (tcache_enabled_t)enabled; - tcache_enabled_tsd_set(&tcache_enabled); - tcache = *tcache_tsd_get(); - if (enabled) { - if (tcache == TCACHE_STATE_DISABLED) { - tcache = NULL; - tcache_tsd_set(&tcache); - } - } else /* disabled */ { - if (tcache > TCACHE_STATE_MAX) { - tcache_destroy(tcache); - tcache = NULL; - } - if (tcache == NULL) { - tcache = TCACHE_STATE_DISABLED; - tcache_tsd_set(&tcache); - } - } -} - -JEMALLOC_ALWAYS_INLINE tcache_t * -tcache_get(bool create) -{ - tcache_t *tcache; - - if (config_tcache == false) - return (NULL); - if (config_lazy_lock && isthreaded == false) - return (NULL); - - tcache = *tcache_tsd_get(); - if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) { - if (tcache == TCACHE_STATE_DISABLED) - return (NULL); - if (tcache == NULL) { - if (create == false) { - /* - * Creating a tcache here would cause - * allocation as a side effect of free(). - * Ordinarily that would be okay since - * tcache_create() failure is a soft failure - * that doesn't propagate. However, if TLS - * data are freed via free() as in glibc, - * subtle corruption could result from setting - * a TLS variable after its backing memory is - * freed. - */ - return (NULL); - } - if (tcache_enabled_get() == false) { - tcache_enabled_set(false); /* Memoize. */ - return (NULL); - } - return (tcache_create(choose_arena(NULL))); - } - if (tcache == TCACHE_STATE_PURGATORY) { - /* - * Make a note that an allocator function was called - * after tcache_thread_cleanup() was called. - */ - tcache = TCACHE_STATE_REINCARNATED; - tcache_tsd_set(&tcache); - return (NULL); - } - if (tcache == TCACHE_STATE_REINCARNATED) - return (NULL); - not_reached(); - } - - return (tcache); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_event(tcache_t *tcache) -{ - - if (TCACHE_GC_INCR == 0) - return; - - tcache->ev_cnt++; - assert(tcache->ev_cnt <= TCACHE_GC_INCR); - if (tcache->ev_cnt == TCACHE_GC_INCR) - tcache_event_hard(tcache); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_easy(tcache_bin_t *tbin) -{ - void *ret; - - if (tbin->ncached == 0) { - tbin->low_water = -1; - return (NULL); - } - tbin->ncached--; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; - ret = tbin->avail[tbin->ncached]; - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) -{ - void *ret; - size_t binind; - tcache_bin_t *tbin; - - binind = SMALL_SIZE2BIN(size); - assert(binind < NBINS); - tbin = &tcache->tbins[binind]; - size = arena_bin_info[binind].reg_size; - ret = tcache_alloc_easy(tbin); - if (ret == NULL) { - ret = tcache_alloc_small_hard(tcache, tbin, binind); - if (ret == NULL) - return (NULL); - } - assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size); - - if (zero == false) { - if (config_fill) { - if (opt_junk) { - arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); - } else if (opt_zero) - memset(ret, 0, size); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } else { - if (config_fill && opt_junk) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], - true); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - - if (config_stats) - tbin->tstats.nrequests++; - if (config_prof) - tcache->prof_accumbytes += arena_bin_info[binind].reg_size; - tcache_event(tcache); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) -{ - void *ret; - size_t binind; - tcache_bin_t *tbin; - - size = PAGE_CEILING(size); - assert(size <= tcache_maxclass); - binind = NBINS + (size >> LG_PAGE) - 1; - assert(binind < nhbins); - tbin = &tcache->tbins[binind]; - ret = tcache_alloc_easy(tbin); - if (ret == NULL) { - /* - * Only allocate one large object at a time, because it's quite - * expensive to create one and not use it. - */ - ret = arena_malloc_large(tcache->arena, size, zero); - if (ret == NULL) - return (NULL); - } else { - if (config_prof && prof_promote && size == PAGE) { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(ret); - size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> - LG_PAGE); - arena_mapbits_large_binind_set(chunk, pageind, - BININD_INVALID); - } - if (zero == false) { - if (config_fill) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } else { - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - - if (config_stats) - tbin->tstats.nrequests++; - if (config_prof) - tcache->prof_accumbytes += size; - } - - tcache_event(tcache); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind) -{ - tcache_bin_t *tbin; - tcache_bin_info_t *tbin_info; - - assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); - - if (config_fill && opt_junk) - arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); - - tbin = &tcache->tbins[binind]; - tbin_info = &tcache_bin_info[binind]; - if (tbin->ncached == tbin_info->ncached_max) { - tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >> - 1), tcache); - } - assert(tbin->ncached < tbin_info->ncached_max); - tbin->avail[tbin->ncached] = ptr; - tbin->ncached++; - - tcache_event(tcache); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size) -{ - size_t binind; - tcache_bin_t *tbin; - tcache_bin_info_t *tbin_info; - - assert((size & PAGE_MASK) == 0); - assert(tcache_salloc(ptr) > SMALL_MAXCLASS); - assert(tcache_salloc(ptr) <= tcache_maxclass); - - binind = NBINS + (size >> LG_PAGE) - 1; - - if (config_fill && opt_junk) - memset(ptr, 0x5a, size); - - tbin = &tcache->tbins[binind]; - tbin_info = &tcache_bin_info[binind]; - if (tbin->ncached == tbin_info->ncached_max) { - tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >> - 1), tcache); - } - assert(tbin->ncached < tbin_info->ncached_max); - tbin->avail[tbin->ncached] = ptr; - tbin->ncached++; - - tcache_event(tcache); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/tsd.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/tsd.h deleted file mode 100644 index 9fb4a23..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/tsd.h +++ /dev/null @@ -1,434 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Maximum number of malloc_tsd users with cleanup functions. */ -#define MALLOC_TSD_CLEANUPS_MAX 8 - -typedef bool (*malloc_tsd_cleanup_t)(void); - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -typedef struct tsd_init_block_s tsd_init_block_t; -typedef struct tsd_init_head_s tsd_init_head_t; -#endif - -/* - * TLS/TSD-agnostic macro-based implementation of thread-specific data. There - * are four macros that support (at least) three use cases: file-private, - * library-private, and library-private inlined. Following is an example - * library-private tsd variable: - * - * In example.h: - * typedef struct { - * int x; - * int y; - * } example_t; - * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) - * malloc_tsd_protos(, example, example_t *) - * malloc_tsd_externs(example, example_t *) - * In example.c: - * malloc_tsd_data(, example, example_t *, EX_INITIALIZER) - * malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER, - * example_tsd_cleanup) - * - * The result is a set of generated functions, e.g.: - * - * bool example_tsd_boot(void) {...} - * example_t **example_tsd_get() {...} - * void example_tsd_set(example_t **val) {...} - * - * Note that all of the functions deal in terms of (a_type *) rather than - * (a_type) so that it is possible to support non-pointer types (unlike - * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is - * cast to (void *). This means that the cleanup function needs to cast *and* - * dereference the function argument, e.g.: - * - * void - * example_tsd_cleanup(void *arg) - * { - * example_t *example = *(example_t **)arg; - * - * [...] - * if ([want the cleanup function to be called again]) { - * example_tsd_set(&example); - * } - * } - * - * If example_tsd_set() is called within example_tsd_cleanup(), it will be - * called again. This is similar to how pthreads TSD destruction works, except - * that pthreads only calls the cleanup function again if the value was set to - * non-NULL. - */ - -/* malloc_tsd_protos(). */ -#define malloc_tsd_protos(a_attr, a_name, a_type) \ -a_attr bool \ -a_name##_tsd_boot(void); \ -a_attr a_type * \ -a_name##_tsd_get(void); \ -a_attr void \ -a_name##_tsd_set(a_type *val); - -/* malloc_tsd_externs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##_tls; \ -extern __thread bool a_name##_initialized; \ -extern bool a_name##_booted; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##_tls; \ -extern pthread_key_t a_name##_tsd; \ -extern bool a_name##_booted; -#elif (defined(_WIN32)) -#define malloc_tsd_externs(a_name, a_type) \ -extern DWORD a_name##_tsd; \ -extern bool a_name##_booted; -#else -#define malloc_tsd_externs(a_name, a_type) \ -extern pthread_key_t a_name##_tsd; \ -extern tsd_init_head_t a_name##_tsd_init_head; \ -extern bool a_name##_booted; -#endif - -/* malloc_tsd_data(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##_tls = a_initializer; \ -a_attr __thread bool JEMALLOC_TLS_MODEL \ - a_name##_initialized = false; \ -a_attr bool a_name##_booted = false; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##_tls = a_initializer; \ -a_attr pthread_key_t a_name##_tsd; \ -a_attr bool a_name##_booted = false; -#elif (defined(_WIN32)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr DWORD a_name##_tsd; \ -a_attr bool a_name##_booted = false; -#else -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr pthread_key_t a_name##_tsd; \ -a_attr tsd_init_head_t a_name##_tsd_init_head = { \ - ql_head_initializer(blocks), \ - MALLOC_MUTEX_INITIALIZER \ -}; \ -a_attr bool a_name##_booted = false; -#endif - -/* malloc_tsd_funcs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_cleanup_wrapper(void) \ -{ \ - \ - if (a_name##_initialized) { \ - a_name##_initialized = false; \ - a_cleanup(&a_name##_tls); \ - } \ - return (a_name##_initialized); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##_tsd_cleanup_wrapper); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - \ - assert(a_name##_booted); \ - return (&a_name##_tls); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##_booted); \ - a_name##_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - a_name##_initialized = true; \ -} -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \ - return (true); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - \ - assert(a_name##_booted); \ - return (&a_name##_tls); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##_booted); \ - a_name##_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)(&a_name##_tls))) { \ - malloc_write(": Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - } \ -} -#elif (defined(_WIN32)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Data structure. */ \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##_tsd_wrapper_t; \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_cleanup_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \ - if (wrapper == NULL) \ - return (false); \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - a_type val = wrapper->val; \ - a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - a_cleanup(&val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - return (true); \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ - return (false); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - a_name##_tsd = TlsAlloc(); \ - if (a_name##_tsd == TLS_OUT_OF_INDEXES) \ - return (true); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##_tsd_cleanup_wrapper); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_name##_tsd_wrapper_t * \ -a_name##_tsd_get_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ - TlsGetValue(a_name##_tsd); \ - \ - if (wrapper == NULL) { \ - wrapper = (a_name##_tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - static a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - } \ - if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \ - malloc_write(": Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - } \ - return (wrapper); \ -} \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ -} -#else -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Data structure. */ \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##_tsd_wrapper_t; \ -/* Initialization/cleanup. */ \ -a_attr void \ -a_name##_tsd_cleanup_wrapper(void *arg) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\ - \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - wrapper->initialized = false; \ - a_cleanup(&wrapper->val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)wrapper)) { \ - malloc_write(": Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - return; \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (pthread_key_create(&a_name##_tsd, \ - a_name##_tsd_cleanup_wrapper) != 0) \ - return (true); \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_name##_tsd_wrapper_t * \ -a_name##_tsd_get_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ - pthread_getspecific(a_name##_tsd); \ - \ - if (wrapper == NULL) { \ - tsd_init_block_t block; \ - wrapper = tsd_init_check_recursion( \ - &a_name##_tsd_init_head, &block); \ - if (wrapper) \ - return (wrapper); \ - wrapper = (a_name##_tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ - block.data = wrapper; \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - static a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - } \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)wrapper)) { \ - malloc_write(": Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - tsd_init_finish(&a_name##_tsd_init_head, &block); \ - } \ - return (wrapper); \ -} \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ -} -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -struct tsd_init_block_s { - ql_elm(tsd_init_block_t) link; - pthread_t thread; - void *data; -}; -struct tsd_init_head_s { - ql_head(tsd_init_block_t) blocks; - malloc_mutex_t lock; -}; -#endif - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *malloc_tsd_malloc(size_t size); -void malloc_tsd_dalloc(void *wrapper); -void malloc_tsd_no_cleanup(void *); -void malloc_tsd_cleanup_register(bool (*f)(void)); -void malloc_tsd_boot(void); -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -void *tsd_init_check_recursion(tsd_init_head_t *head, - tsd_init_block_t *block); -void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); -#endif - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/internal/util.h b/lib/jemalloc-3.6.0/include/jemalloc/internal/util.h deleted file mode 100644 index 6b938f7..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/internal/util.h +++ /dev/null @@ -1,162 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Size of stack-allocated buffer passed to buferror(). */ -#define BUFERROR_BUF 64 - -/* - * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be - * large enough for all possible uses within jemalloc. - */ -#define MALLOC_PRINTF_BUFSIZE 4096 - -/* - * Wrap a cpp argument that contains commas such that it isn't broken up into - * multiple arguments. - */ -#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ - -/* - * Silence compiler warnings due to uninitialized values. This is used - * wherever the compiler fails to recognize that the variable is never used - * uninitialized. - */ -#ifdef JEMALLOC_CC_SILENCE -# define JEMALLOC_CC_SILENCE_INIT(v) = v -#else -# define JEMALLOC_CC_SILENCE_INIT(v) -#endif - -/* - * Define a custom assert() in order to reduce the chances of deadlock during - * assertion failure. - */ -#ifndef assert -#define assert(e) do { \ - if (config_debug && !(e)) { \ - malloc_printf( \ - ": %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef not_reached -#define not_reached() do { \ - if (config_debug) { \ - malloc_printf( \ - ": %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef not_implemented -#define not_implemented() do { \ - if (config_debug) { \ - malloc_printf(": %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef assert_not_implemented -#define assert_not_implemented(e) do { \ - if (config_debug && !(e)) \ - not_implemented(); \ -} while (0) -#endif - -/* Use to assert a particular configuration, e.g., cassert(config_debug). */ -#define cassert(c) do { \ - if ((c) == false) \ - not_reached(); \ -} while (0) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int buferror(int err, char *buf, size_t buflen); -uintmax_t malloc_strtoumax(const char *restrict nptr, - char **restrict endptr, int base); -void malloc_write(const char *s); - -/* - * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating - * point math. - */ -int malloc_vsnprintf(char *str, size_t size, const char *format, - va_list ap); -int malloc_snprintf(char *str, size_t size, const char *format, ...) - JEMALLOC_ATTR(format(printf, 3, 4)); -void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap); -void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, - const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); -void malloc_printf(const char *format, ...) - JEMALLOC_ATTR(format(printf, 1, 2)); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -size_t pow2_ceil(size_t x); -void set_errno(int errnum); -int get_errno(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) -/* Compute the smallest power of 2 that is >= x. */ -JEMALLOC_INLINE size_t -pow2_ceil(size_t x) -{ - - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; -#if (LG_SIZEOF_PTR == 3) - x |= x >> 32; -#endif - x++; - return (x); -} - -/* Sets error code */ -JEMALLOC_INLINE void -set_errno(int errnum) -{ - -#ifdef _WIN32 - SetLastError(errnum); -#else - errno = errnum; -#endif -} - -/* Get last error code */ -JEMALLOC_INLINE int -get_errno(void) -{ - -#ifdef _WIN32 - return (GetLastError()); -#else - return (errno); -#endif -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/include/jemalloc/jemalloc.sh b/lib/jemalloc-3.6.0/include/jemalloc/jemalloc.sh deleted file mode 100755 index e4738eb..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/jemalloc.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh - -objroot=$1 - -cat < -#include - -#define JEMALLOC_VERSION "@jemalloc_version@" -#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ -#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ -#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ -#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ -#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" - -# define MALLOCX_LG_ALIGN(la) (la) -# if LG_SIZEOF_PTR == 2 -# define MALLOCX_ALIGN(a) (ffs(a)-1) -# else -# define MALLOCX_ALIGN(a) \ - ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) -# endif -# define MALLOCX_ZERO ((int)0x40) -/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */ -# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8)) - -#ifdef JEMALLOC_EXPERIMENTAL -# define ALLOCM_LG_ALIGN(la) (la) -# if LG_SIZEOF_PTR == 2 -# define ALLOCM_ALIGN(a) (ffs(a)-1) -# else -# define ALLOCM_ALIGN(a) \ - ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) -# endif -# define ALLOCM_ZERO ((int)0x40) -# define ALLOCM_NO_MOVE ((int)0x80) -/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */ -# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8)) -# define ALLOCM_SUCCESS 0 -# define ALLOCM_ERR_OOM 1 -# define ALLOCM_ERR_NOT_MOVED 2 -#endif - -#ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ATTR(s) __attribute__((s)) -# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) -# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) -# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) -# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) -#elif _MSC_VER -# define JEMALLOC_ATTR(s) -# ifdef DLLEXPORT -# define JEMALLOC_EXPORT __declspec(dllexport) -# else -# define JEMALLOC_EXPORT __declspec(dllimport) -# endif -# define JEMALLOC_ALIGNED(s) __declspec(align(s)) -# define JEMALLOC_SECTION(s) __declspec(allocate(s)) -# define JEMALLOC_NOINLINE __declspec(noinline) -#else -# define JEMALLOC_ATTR(s) -# define JEMALLOC_EXPORT -# define JEMALLOC_ALIGNED(s) -# define JEMALLOC_SECTION(s) -# define JEMALLOC_NOINLINE -#endif diff --git a/lib/jemalloc-3.6.0/include/jemalloc/jemalloc_mangle.sh b/lib/jemalloc-3.6.0/include/jemalloc/jemalloc_mangle.sh deleted file mode 100755 index df328b7..0000000 --- a/lib/jemalloc-3.6.0/include/jemalloc/jemalloc_mangle.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/sh - -public_symbols_txt=$1 -symbol_prefix=$2 - -cat < 1000 -#pragma once -#endif - -#include "stdint.h" - -// 7.8 Format conversion of integer types - -typedef struct { - intmax_t quot; - intmax_t rem; -} imaxdiv_t; - -// 7.8.1 Macros for format specifiers - -#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198 - -#ifdef _WIN64 -# define __PRI64_PREFIX "l" -# define __PRIPTR_PREFIX "l" -#else -# define __PRI64_PREFIX "ll" -# define __PRIPTR_PREFIX -#endif - -// The fprintf macros for signed integers are: -#define PRId8 "d" -#define PRIi8 "i" -#define PRIdLEAST8 "d" -#define PRIiLEAST8 "i" -#define PRIdFAST8 "d" -#define PRIiFAST8 "i" - -#define PRId16 "hd" -#define PRIi16 "hi" -#define PRIdLEAST16 "hd" -#define PRIiLEAST16 "hi" -#define PRIdFAST16 "hd" -#define PRIiFAST16 "hi" - -#define PRId32 "d" -#define PRIi32 "i" -#define PRIdLEAST32 "d" -#define PRIiLEAST32 "i" -#define PRIdFAST32 "d" -#define PRIiFAST32 "i" - -#define PRId64 __PRI64_PREFIX "d" -#define PRIi64 __PRI64_PREFIX "i" -#define PRIdLEAST64 __PRI64_PREFIX "d" -#define PRIiLEAST64 __PRI64_PREFIX "i" -#define PRIdFAST64 __PRI64_PREFIX "d" -#define PRIiFAST64 __PRI64_PREFIX "i" - -#define PRIdMAX __PRI64_PREFIX "d" -#define PRIiMAX __PRI64_PREFIX "i" - -#define PRIdPTR __PRIPTR_PREFIX "d" -#define PRIiPTR __PRIPTR_PREFIX "i" - -// The fprintf macros for unsigned integers are: -#define PRIo8 "o" -#define PRIu8 "u" -#define PRIx8 "x" -#define PRIX8 "X" -#define PRIoLEAST8 "o" -#define PRIuLEAST8 "u" -#define PRIxLEAST8 "x" -#define PRIXLEAST8 "X" -#define PRIoFAST8 "o" -#define PRIuFAST8 "u" -#define PRIxFAST8 "x" -#define PRIXFAST8 "X" - -#define PRIo16 "ho" -#define PRIu16 "hu" -#define PRIx16 "hx" -#define PRIX16 "hX" -#define PRIoLEAST16 "ho" -#define PRIuLEAST16 "hu" -#define PRIxLEAST16 "hx" -#define PRIXLEAST16 "hX" -#define PRIoFAST16 "ho" -#define PRIuFAST16 "hu" -#define PRIxFAST16 "hx" -#define PRIXFAST16 "hX" - -#define PRIo32 "o" -#define PRIu32 "u" -#define PRIx32 "x" -#define PRIX32 "X" -#define PRIoLEAST32 "o" -#define PRIuLEAST32 "u" -#define PRIxLEAST32 "x" -#define PRIXLEAST32 "X" -#define PRIoFAST32 "o" -#define PRIuFAST32 "u" -#define PRIxFAST32 "x" -#define PRIXFAST32 "X" - -#define PRIo64 __PRI64_PREFIX "o" -#define PRIu64 __PRI64_PREFIX "u" -#define PRIx64 __PRI64_PREFIX "x" -#define PRIX64 __PRI64_PREFIX "X" -#define PRIoLEAST64 __PRI64_PREFIX "o" -#define PRIuLEAST64 __PRI64_PREFIX "u" -#define PRIxLEAST64 __PRI64_PREFIX "x" -#define PRIXLEAST64 __PRI64_PREFIX "X" -#define PRIoFAST64 __PRI64_PREFIX "o" -#define PRIuFAST64 __PRI64_PREFIX "u" -#define PRIxFAST64 __PRI64_PREFIX "x" -#define PRIXFAST64 __PRI64_PREFIX "X" - -#define PRIoMAX __PRI64_PREFIX "o" -#define PRIuMAX __PRI64_PREFIX "u" -#define PRIxMAX __PRI64_PREFIX "x" -#define PRIXMAX __PRI64_PREFIX "X" - -#define PRIoPTR __PRIPTR_PREFIX "o" -#define PRIuPTR __PRIPTR_PREFIX "u" -#define PRIxPTR __PRIPTR_PREFIX "x" -#define PRIXPTR __PRIPTR_PREFIX "X" - -// The fscanf macros for signed integers are: -#define SCNd8 "d" -#define SCNi8 "i" -#define SCNdLEAST8 "d" -#define SCNiLEAST8 "i" -#define SCNdFAST8 "d" -#define SCNiFAST8 "i" - -#define SCNd16 "hd" -#define SCNi16 "hi" -#define SCNdLEAST16 "hd" -#define SCNiLEAST16 "hi" -#define SCNdFAST16 "hd" -#define SCNiFAST16 "hi" - -#define SCNd32 "ld" -#define SCNi32 "li" -#define SCNdLEAST32 "ld" -#define SCNiLEAST32 "li" -#define SCNdFAST32 "ld" -#define SCNiFAST32 "li" - -#define SCNd64 "I64d" -#define SCNi64 "I64i" -#define SCNdLEAST64 "I64d" -#define SCNiLEAST64 "I64i" -#define SCNdFAST64 "I64d" -#define SCNiFAST64 "I64i" - -#define SCNdMAX "I64d" -#define SCNiMAX "I64i" - -#ifdef _WIN64 // [ -# define SCNdPTR "I64d" -# define SCNiPTR "I64i" -#else // _WIN64 ][ -# define SCNdPTR "ld" -# define SCNiPTR "li" -#endif // _WIN64 ] - -// The fscanf macros for unsigned integers are: -#define SCNo8 "o" -#define SCNu8 "u" -#define SCNx8 "x" -#define SCNX8 "X" -#define SCNoLEAST8 "o" -#define SCNuLEAST8 "u" -#define SCNxLEAST8 "x" -#define SCNXLEAST8 "X" -#define SCNoFAST8 "o" -#define SCNuFAST8 "u" -#define SCNxFAST8 "x" -#define SCNXFAST8 "X" - -#define SCNo16 "ho" -#define SCNu16 "hu" -#define SCNx16 "hx" -#define SCNX16 "hX" -#define SCNoLEAST16 "ho" -#define SCNuLEAST16 "hu" -#define SCNxLEAST16 "hx" -#define SCNXLEAST16 "hX" -#define SCNoFAST16 "ho" -#define SCNuFAST16 "hu" -#define SCNxFAST16 "hx" -#define SCNXFAST16 "hX" - -#define SCNo32 "lo" -#define SCNu32 "lu" -#define SCNx32 "lx" -#define SCNX32 "lX" -#define SCNoLEAST32 "lo" -#define SCNuLEAST32 "lu" -#define SCNxLEAST32 "lx" -#define SCNXLEAST32 "lX" -#define SCNoFAST32 "lo" -#define SCNuFAST32 "lu" -#define SCNxFAST32 "lx" -#define SCNXFAST32 "lX" - -#define SCNo64 "I64o" -#define SCNu64 "I64u" -#define SCNx64 "I64x" -#define SCNX64 "I64X" -#define SCNoLEAST64 "I64o" -#define SCNuLEAST64 "I64u" -#define SCNxLEAST64 "I64x" -#define SCNXLEAST64 "I64X" -#define SCNoFAST64 "I64o" -#define SCNuFAST64 "I64u" -#define SCNxFAST64 "I64x" -#define SCNXFAST64 "I64X" - -#define SCNoMAX "I64o" -#define SCNuMAX "I64u" -#define SCNxMAX "I64x" -#define SCNXMAX "I64X" - -#ifdef _WIN64 // [ -# define SCNoPTR "I64o" -# define SCNuPTR "I64u" -# define SCNxPTR "I64x" -# define SCNXPTR "I64X" -#else // _WIN64 ][ -# define SCNoPTR "lo" -# define SCNuPTR "lu" -# define SCNxPTR "lx" -# define SCNXPTR "lX" -#endif // _WIN64 ] - -#endif // __STDC_FORMAT_MACROS ] - -// 7.8.2 Functions for greatest-width integer types - -// 7.8.2.1 The imaxabs function -#define imaxabs _abs64 - -// 7.8.2.2 The imaxdiv function - -// This is modified version of div() function from Microsoft's div.c found -// in %MSVC.NET%\crt\src\div.c -#ifdef STATIC_IMAXDIV // [ -static -#else // STATIC_IMAXDIV ][ -_inline -#endif // STATIC_IMAXDIV ] -imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom) -{ - imaxdiv_t result; - - result.quot = numer / denom; - result.rem = numer % denom; - - if (numer < 0 && result.rem > 0) { - // did division wrong; must fix up - ++result.quot; - result.rem -= denom; - } - - return result; -} - -// 7.8.2.3 The strtoimax and strtoumax functions -#define strtoimax _strtoi64 -#define strtoumax _strtoui64 - -// 7.8.2.4 The wcstoimax and wcstoumax functions -#define wcstoimax _wcstoi64 -#define wcstoumax _wcstoui64 - - -#endif // _MSC_INTTYPES_H_ ] diff --git a/lib/jemalloc-3.6.0/include/msvc_compat/stdbool.h b/lib/jemalloc-3.6.0/include/msvc_compat/stdbool.h deleted file mode 100644 index da9ee8b..0000000 --- a/lib/jemalloc-3.6.0/include/msvc_compat/stdbool.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef stdbool_h -#define stdbool_h - -#include - -/* MSVC doesn't define _Bool or bool in C, but does have BOOL */ -/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ -typedef BOOL _Bool; - -#define bool _Bool -#define true 1 -#define false 0 - -#define __bool_true_false_are_defined 1 - -#endif /* stdbool_h */ diff --git a/lib/jemalloc-3.6.0/include/msvc_compat/stdint.h b/lib/jemalloc-3.6.0/include/msvc_compat/stdint.h deleted file mode 100644 index d02608a..0000000 --- a/lib/jemalloc-3.6.0/include/msvc_compat/stdint.h +++ /dev/null @@ -1,247 +0,0 @@ -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_STDINT_H_ // [ -#define _MSC_STDINT_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -#include - -// For Visual Studio 6 in C++ mode and for many Visual Studio versions when -// compiling for ARM we should wrap include with 'extern "C++" {}' -// or compiler give many errors like this: -// error C2733: second C linkage of overloaded function 'wmemchr' not allowed -#ifdef __cplusplus -extern "C" { -#endif -# include -#ifdef __cplusplus -} -#endif - -// Define _W64 macros to mark types changing their size, like intptr_t. -#ifndef _W64 -# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -# define _W64 __w64 -# else -# define _W64 -# endif -#endif - - -// 7.18.1 Integer types - -// 7.18.1.1 Exact-width integer types - -// Visual Studio 6 and Embedded Visual C++ 4 doesn't -// realize that, e.g. char has the same size as __int8 -// so we give up on __intX for them. -#if (_MSC_VER < 1300) - typedef signed char int8_t; - typedef signed short int16_t; - typedef signed int int32_t; - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; -#else - typedef signed __int8 int8_t; - typedef signed __int16 int16_t; - typedef signed __int32 int32_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; -#endif -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; - - -// 7.18.1.2 Minimum-width integer types -typedef int8_t int_least8_t; -typedef int16_t int_least16_t; -typedef int32_t int_least32_t; -typedef int64_t int_least64_t; -typedef uint8_t uint_least8_t; -typedef uint16_t uint_least16_t; -typedef uint32_t uint_least32_t; -typedef uint64_t uint_least64_t; - -// 7.18.1.3 Fastest minimum-width integer types -typedef int8_t int_fast8_t; -typedef int16_t int_fast16_t; -typedef int32_t int_fast32_t; -typedef int64_t int_fast64_t; -typedef uint8_t uint_fast8_t; -typedef uint16_t uint_fast16_t; -typedef uint32_t uint_fast32_t; -typedef uint64_t uint_fast64_t; - -// 7.18.1.4 Integer types capable of holding object pointers -#ifdef _WIN64 // [ - typedef signed __int64 intptr_t; - typedef unsigned __int64 uintptr_t; -#else // _WIN64 ][ - typedef _W64 signed int intptr_t; - typedef _W64 unsigned int uintptr_t; -#endif // _WIN64 ] - -// 7.18.1.5 Greatest-width integer types -typedef int64_t intmax_t; -typedef uint64_t uintmax_t; - - -// 7.18.2 Limits of specified-width integer types - -#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 - -// 7.18.2.1 Limits of exact-width integer types -#define INT8_MIN ((int8_t)_I8_MIN) -#define INT8_MAX _I8_MAX -#define INT16_MIN ((int16_t)_I16_MIN) -#define INT16_MAX _I16_MAX -#define INT32_MIN ((int32_t)_I32_MIN) -#define INT32_MAX _I32_MAX -#define INT64_MIN ((int64_t)_I64_MIN) -#define INT64_MAX _I64_MAX -#define UINT8_MAX _UI8_MAX -#define UINT16_MAX _UI16_MAX -#define UINT32_MAX _UI32_MAX -#define UINT64_MAX _UI64_MAX - -// 7.18.2.2 Limits of minimum-width integer types -#define INT_LEAST8_MIN INT8_MIN -#define INT_LEAST8_MAX INT8_MAX -#define INT_LEAST16_MIN INT16_MIN -#define INT_LEAST16_MAX INT16_MAX -#define INT_LEAST32_MIN INT32_MIN -#define INT_LEAST32_MAX INT32_MAX -#define INT_LEAST64_MIN INT64_MIN -#define INT_LEAST64_MAX INT64_MAX -#define UINT_LEAST8_MAX UINT8_MAX -#define UINT_LEAST16_MAX UINT16_MAX -#define UINT_LEAST32_MAX UINT32_MAX -#define UINT_LEAST64_MAX UINT64_MAX - -// 7.18.2.3 Limits of fastest minimum-width integer types -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MIN INT16_MIN -#define INT_FAST16_MAX INT16_MAX -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT16_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX - -// 7.18.2.4 Limits of integer types capable of holding object pointers -#ifdef _WIN64 // [ -# define INTPTR_MIN INT64_MIN -# define INTPTR_MAX INT64_MAX -# define UINTPTR_MAX UINT64_MAX -#else // _WIN64 ][ -# define INTPTR_MIN INT32_MIN -# define INTPTR_MAX INT32_MAX -# define UINTPTR_MAX UINT32_MAX -#endif // _WIN64 ] - -// 7.18.2.5 Limits of greatest-width integer types -#define INTMAX_MIN INT64_MIN -#define INTMAX_MAX INT64_MAX -#define UINTMAX_MAX UINT64_MAX - -// 7.18.3 Limits of other integer types - -#ifdef _WIN64 // [ -# define PTRDIFF_MIN _I64_MIN -# define PTRDIFF_MAX _I64_MAX -#else // _WIN64 ][ -# define PTRDIFF_MIN _I32_MIN -# define PTRDIFF_MAX _I32_MAX -#endif // _WIN64 ] - -#define SIG_ATOMIC_MIN INT_MIN -#define SIG_ATOMIC_MAX INT_MAX - -#ifndef SIZE_MAX // [ -# ifdef _WIN64 // [ -# define SIZE_MAX _UI64_MAX -# else // _WIN64 ][ -# define SIZE_MAX _UI32_MAX -# endif // _WIN64 ] -#endif // SIZE_MAX ] - -// WCHAR_MIN and WCHAR_MAX are also defined in -#ifndef WCHAR_MIN // [ -# define WCHAR_MIN 0 -#endif // WCHAR_MIN ] -#ifndef WCHAR_MAX // [ -# define WCHAR_MAX _UI16_MAX -#endif // WCHAR_MAX ] - -#define WINT_MIN 0 -#define WINT_MAX _UI16_MAX - -#endif // __STDC_LIMIT_MACROS ] - - -// 7.18.4 Limits of other integer types - -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 - -// 7.18.4.1 Macros for minimum-width integer constants - -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 - -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 - -// 7.18.4.2 Macros for greatest-width integer constants -#define INTMAX_C INT64_C -#define UINTMAX_C UINT64_C - -#endif // __STDC_CONSTANT_MACROS ] - - -#endif // _MSC_STDINT_H_ ] diff --git a/lib/jemalloc-3.6.0/include/msvc_compat/strings.h b/lib/jemalloc-3.6.0/include/msvc_compat/strings.h deleted file mode 100644 index c84975b..0000000 --- a/lib/jemalloc-3.6.0/include/msvc_compat/strings.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef strings_h -#define strings_h - -/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided - * for both */ -#include -#pragma intrinsic(_BitScanForward) -static __forceinline int ffsl(long x) -{ - unsigned long i; - - if (_BitScanForward(&i, x)) - return (i + 1); - return (0); -} - -static __forceinline int ffs(int x) -{ - - return (ffsl(x)); -} - -#endif diff --git a/lib/jemalloc-3.6.0/install-sh b/lib/jemalloc-3.6.0/install-sh deleted file mode 100755 index ebc6691..0000000 --- a/lib/jemalloc-3.6.0/install-sh +++ /dev/null @@ -1,250 +0,0 @@ -#! /bin/sh -# -# install - install a program, script, or datafile -# This comes from X11R5 (mit/util/scripts/install.sh). -# -# Copyright 1991 by the Massachusetts Institute of Technology -# -# Permission to use, copy, modify, distribute, and sell this software and its -# documentation for any purpose is hereby granted without fee, provided that -# the above copyright notice appear in all copies and that both that -# copyright notice and this permission notice appear in supporting -# documentation, and that the name of M.I.T. not be used in advertising or -# publicity pertaining to distribution of the software without specific, -# written prior permission. M.I.T. makes no representations about the -# suitability of this software for any purpose. It is provided "as is" -# without express or implied warranty. -# -# Calling this script install-sh is preferred over install.sh, to prevent -# `make' implicit rules from creating a file called install from it -# when there is no Makefile. -# -# This script is compatible with the BSD install script, but was written -# from scratch. It can only install one file at a time, a restriction -# shared with many OS's install programs. - - -# set DOITPROG to echo to test this script - -# Don't use :- since 4.3BSD and earlier shells don't like it. -doit="${DOITPROG-}" - - -# put in absolute paths if you don't have them in your path; or use env. vars. - -mvprog="${MVPROG-mv}" -cpprog="${CPPROG-cp}" -chmodprog="${CHMODPROG-chmod}" -chownprog="${CHOWNPROG-chown}" -chgrpprog="${CHGRPPROG-chgrp}" -stripprog="${STRIPPROG-strip}" -rmprog="${RMPROG-rm}" -mkdirprog="${MKDIRPROG-mkdir}" - -transformbasename="" -transform_arg="" -instcmd="$mvprog" -chmodcmd="$chmodprog 0755" -chowncmd="" -chgrpcmd="" -stripcmd="" -rmcmd="$rmprog -f" -mvcmd="$mvprog" -src="" -dst="" -dir_arg="" - -while [ x"$1" != x ]; do - case $1 in - -c) instcmd="$cpprog" - shift - continue;; - - -d) dir_arg=true - shift - continue;; - - -m) chmodcmd="$chmodprog $2" - shift - shift - continue;; - - -o) chowncmd="$chownprog $2" - shift - shift - continue;; - - -g) chgrpcmd="$chgrpprog $2" - shift - shift - continue;; - - -s) stripcmd="$stripprog" - shift - continue;; - - -t=*) transformarg=`echo $1 | sed 's/-t=//'` - shift - continue;; - - -b=*) transformbasename=`echo $1 | sed 's/-b=//'` - shift - continue;; - - *) if [ x"$src" = x ] - then - src=$1 - else - # this colon is to work around a 386BSD /bin/sh bug - : - dst=$1 - fi - shift - continue;; - esac -done - -if [ x"$src" = x ] -then - echo "install: no input file specified" - exit 1 -else - true -fi - -if [ x"$dir_arg" != x ]; then - dst=$src - src="" - - if [ -d $dst ]; then - instcmd=: - else - instcmd=mkdir - fi -else - -# Waiting for this to be detected by the "$instcmd $src $dsttmp" command -# might cause directories to be created, which would be especially bad -# if $src (and thus $dsttmp) contains '*'. - - if [ -f $src -o -d $src ] - then - true - else - echo "install: $src does not exist" - exit 1 - fi - - if [ x"$dst" = x ] - then - echo "install: no destination specified" - exit 1 - else - true - fi - -# If destination is a directory, append the input filename; if your system -# does not like double slashes in filenames, you may need to add some logic - - if [ -d $dst ] - then - dst="$dst"/`basename $src` - else - true - fi -fi - -## this sed command emulates the dirname command -dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` - -# Make sure that the destination directory exists. -# this part is taken from Noah Friedman's mkinstalldirs script - -# Skip lots of stat calls in the usual case. -if [ ! -d "$dstdir" ]; then -defaultIFS=' -' -IFS="${IFS-${defaultIFS}}" - -oIFS="${IFS}" -# Some sh's can't handle IFS=/ for some reason. -IFS='%' -set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` -IFS="${oIFS}" - -pathcomp='' - -while [ $# -ne 0 ] ; do - pathcomp="${pathcomp}${1}" - shift - - if [ ! -d "${pathcomp}" ] ; - then - $mkdirprog "${pathcomp}" - else - true - fi - - pathcomp="${pathcomp}/" -done -fi - -if [ x"$dir_arg" != x ] -then - $doit $instcmd $dst && - - if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && - if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && - if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && - if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi -else - -# If we're going to rename the final executable, determine the name now. - - if [ x"$transformarg" = x ] - then - dstfile=`basename $dst` - else - dstfile=`basename $dst $transformbasename | - sed $transformarg`$transformbasename - fi - -# don't allow the sed command to completely eliminate the filename - - if [ x"$dstfile" = x ] - then - dstfile=`basename $dst` - else - true - fi - -# Make a temp file name in the proper directory. - - dsttmp=$dstdir/#inst.$$# - -# Move or copy the file name to the temp name - - $doit $instcmd $src $dsttmp && - - trap "rm -f ${dsttmp}" 0 && - -# and set any options; do chmod last to preserve setuid bits - -# If any of these fail, we abort the whole thing. If we want to -# ignore errors from any of these, just make sure not to ignore -# errors from the above "$doit $instcmd $src $dsttmp" command. - - if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && - if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && - if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && - if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && - -# Now rename the file to the real destination. - - $doit $rmcmd -f $dstdir/$dstfile && - $doit $mvcmd $dsttmp $dstdir/$dstfile - -fi && - - -exit 0 diff --git a/lib/jemalloc-3.6.0/src/arena.c b/lib/jemalloc-3.6.0/src/arena.c deleted file mode 100644 index dad707b..0000000 --- a/lib/jemalloc-3.6.0/src/arena.c +++ /dev/null @@ -1,2577 +0,0 @@ -#define JEMALLOC_ARENA_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; -arena_bin_info_t arena_bin_info[NBINS]; - -JEMALLOC_ALIGNED(CACHELINE) -const uint8_t small_size2bin[] = { -#define S2B_8(i) i, -#define S2B_16(i) S2B_8(i) S2B_8(i) -#define S2B_32(i) S2B_16(i) S2B_16(i) -#define S2B_64(i) S2B_32(i) S2B_32(i) -#define S2B_128(i) S2B_64(i) S2B_64(i) -#define S2B_256(i) S2B_128(i) S2B_128(i) -#define S2B_512(i) S2B_256(i) S2B_256(i) -#define S2B_1024(i) S2B_512(i) S2B_512(i) -#define S2B_2048(i) S2B_1024(i) S2B_1024(i) -#define S2B_4096(i) S2B_2048(i) S2B_2048(i) -#define S2B_8192(i) S2B_4096(i) S2B_4096(i) -#define SIZE_CLASS(bin, delta, size) \ - S2B_##delta(bin) - SIZE_CLASSES -#undef S2B_8 -#undef S2B_16 -#undef S2B_32 -#undef S2B_64 -#undef S2B_128 -#undef S2B_256 -#undef S2B_512 -#undef S2B_1024 -#undef S2B_2048 -#undef S2B_4096 -#undef S2B_8192 -#undef SIZE_CLASS -}; - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -static void arena_purge(arena_t *arena, bool all); -static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, - bool cleaned); -static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); -static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); - -/******************************************************************************/ - -static inline int -arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) -{ - uintptr_t a_mapelm = (uintptr_t)a; - uintptr_t b_mapelm = (uintptr_t)b; - - assert(a != NULL); - assert(b != NULL); - - return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); -} - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, - u.rb_link, arena_run_comp) - -static inline int -arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) -{ - int ret; - size_t a_size = a->bits & ~PAGE_MASK; - size_t b_size = b->bits & ~PAGE_MASK; - - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_mapelm, b_mapelm; - - if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) - a_mapelm = (uintptr_t)a; - else { - /* - * Treat keys as though they are lower than anything - * else. - */ - a_mapelm = 0; - } - b_mapelm = (uintptr_t)b; - - ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); - } - - return (ret); -} - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, - u.rb_link, arena_avail_comp) - -static inline int -arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b) -{ - - assert(a != NULL); - assert(b != NULL); - - /* - * Short-circuit for self comparison. The following comparison code - * would come to the same result, but at the cost of executing the slow - * path. - */ - if (a == b) - return (0); - - /* - * Order such that chunks with higher fragmentation are "less than" - * those with lower fragmentation -- purging order is from "least" to - * "greatest". Fragmentation is measured as: - * - * mean current avail run size - * -------------------------------- - * mean defragmented avail run size - * - * navail - * ----------- - * nruns_avail nruns_avail-nruns_adjac - * = ========================= = ----------------------- - * navail nruns_avail - * ----------------------- - * nruns_avail-nruns_adjac - * - * The following code multiplies away the denominator prior to - * comparison, in order to avoid division. - * - */ - { - size_t a_val = (a->nruns_avail - a->nruns_adjac) * - b->nruns_avail; - size_t b_val = (b->nruns_avail - b->nruns_adjac) * - a->nruns_avail; - - if (a_val < b_val) - return (1); - if (a_val > b_val) - return (-1); - } - /* - * Break ties by chunk address. For fragmented chunks, report lower - * addresses as "lower", so that fragmentation reduction happens first - * at lower addresses. However, use the opposite ordering for - * unfragmented chunks, in order to increase the chances of - * re-allocating dirty runs. - */ - { - uintptr_t a_chunk = (uintptr_t)a; - uintptr_t b_chunk = (uintptr_t)b; - int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk)); - if (a->nruns_adjac == 0) { - assert(b->nruns_adjac == 0); - ret = -ret; - } - return (ret); - } -} - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t, - dirty_link, arena_chunk_dirty_comp) - -static inline bool -arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind) -{ - bool ret; - - if (pageind-1 < map_bias) - ret = false; - else { - ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0); - assert(ret == false || arena_mapbits_dirty_get(chunk, - pageind-1) != arena_mapbits_dirty_get(chunk, pageind)); - } - return (ret); -} - -static inline bool -arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages) -{ - bool ret; - - if (pageind+npages == chunk_npages) - ret = false; - else { - assert(pageind+npages < chunk_npages); - ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0); - assert(ret == false || arena_mapbits_dirty_get(chunk, pageind) - != arena_mapbits_dirty_get(chunk, pageind+npages)); - } - return (ret); -} - -static inline bool -arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages) -{ - - return (arena_avail_adjac_pred(chunk, pageind) || - arena_avail_adjac_succ(chunk, pageind, npages)); -} - -static void -arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) -{ - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - - /* - * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be - * removed and reinserted even if the run to be inserted is clean. - */ - if (chunk->ndirty != 0) - arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); - - if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) - chunk->nruns_adjac++; - if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) - chunk->nruns_adjac++; - chunk->nruns_avail++; - assert(chunk->nruns_avail > chunk->nruns_adjac); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0) { - arena->ndirty += npages; - chunk->ndirty += npages; - } - if (chunk->ndirty != 0) - arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); - - arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk, - pageind)); -} - -static void -arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) -{ - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - - /* - * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be - * removed and reinserted even if the run to be removed is clean. - */ - if (chunk->ndirty != 0) - arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); - - if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) - chunk->nruns_adjac--; - if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) - chunk->nruns_adjac--; - chunk->nruns_avail--; - assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail - == 0 && chunk->nruns_adjac == 0)); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0) { - arena->ndirty -= npages; - chunk->ndirty -= npages; - } - if (chunk->ndirty != 0) - arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); - - arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk, - pageind)); -} - -static inline void * -arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) -{ - void *ret; - unsigned regind; - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - assert(run->nfree > 0); - assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); - - regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); - ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + - (uintptr_t)(bin_info->reg_interval * regind)); - run->nfree--; - if (regind == run->nextind) - run->nextind++; - assert(regind < run->nextind); - return (ret); -} - -static inline void -arena_run_reg_dalloc(arena_run_t *run, void *ptr) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - size_t binind = arena_ptr_small_binind_get(ptr, mapbits); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - unsigned regind = arena_run_regind(run, bin_info, ptr); - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - assert(run->nfree < bin_info->nregs); - /* Freeing an interior pointer can cause assertion failure. */ - assert(((uintptr_t)ptr - ((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset)) % - (uintptr_t)bin_info->reg_interval == 0); - assert((uintptr_t)ptr >= (uintptr_t)run + - (uintptr_t)bin_info->reg0_offset); - /* Freeing an unallocated pointer can cause assertion failure. */ - assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); - - bitmap_unset(bitmap, &bin_info->bitmap_info, regind); - run->nfree++; -} - -static inline void -arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) -{ - - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), (npages << LG_PAGE)); - memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, - (npages << LG_PAGE)); -} - -static inline void -arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - - VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), PAGE); -} - -static inline void -arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - size_t i; - UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); - - arena_run_page_mark_zeroed(chunk, run_ind); - for (i = 0; i < PAGE / sizeof(size_t); i++) - assert(p[i] == 0); -} - -static void -arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) -{ - - if (config_stats) { - ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + - add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive - - sub_pages) << LG_PAGE); - if (cactive_diff != 0) - stats_cactive_add(cactive_diff); - } -} - -static void -arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, - size_t flag_dirty, size_t need_pages) -{ - size_t total_pages, rem_pages; - - total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> - LG_PAGE; - assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == - flag_dirty); - assert(need_pages <= total_pages); - rem_pages = total_pages - need_pages; - - arena_avail_remove(arena, chunk, run_ind, total_pages, true, true); - arena_cactive_update(arena, need_pages, 0); - arena->nactive += need_pages; - - /* Keep track of trailing unused pages for later use. */ - if (rem_pages > 0) { - if (flag_dirty != 0) { - arena_mapbits_unallocated_set(chunk, - run_ind+need_pages, (rem_pages << LG_PAGE), - flag_dirty); - arena_mapbits_unallocated_set(chunk, - run_ind+total_pages-1, (rem_pages << LG_PAGE), - flag_dirty); - } else { - arena_mapbits_unallocated_set(chunk, run_ind+need_pages, - (rem_pages << LG_PAGE), - arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages)); - arena_mapbits_unallocated_set(chunk, - run_ind+total_pages-1, (rem_pages << LG_PAGE), - arena_mapbits_unzeroed_get(chunk, - run_ind+total_pages-1)); - } - arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages, - false, true); - } -} - -static void -arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, - bool remove, bool zero) -{ - arena_chunk_t *chunk; - size_t flag_dirty, run_ind, need_pages, i; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); - - if (remove) { - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, - need_pages); - } - - if (zero) { - if (flag_dirty == 0) { - /* - * The run is clean, so some pages may be zeroed (i.e. - * never before touched). - */ - for (i = 0; i < need_pages; i++) { - if (arena_mapbits_unzeroed_get(chunk, run_ind+i) - != 0) - arena_run_zero(chunk, run_ind+i, 1); - else if (config_debug) { - arena_run_page_validate_zeroed(chunk, - run_ind+i); - } else { - arena_run_page_mark_zeroed(chunk, - run_ind+i); - } - } - } else { - /* The run is dirty, so all pages must be zeroed. */ - arena_run_zero(chunk, run_ind, need_pages); - } - } else { - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); - } - - /* - * Set the last element first, in case the run only contains one page - * (i.e. both statements set the same element). - */ - arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); -} - -static void -arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ - - arena_run_split_large_helper(arena, run, size, true, zero); -} - -static void -arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ - - arena_run_split_large_helper(arena, run, size, false, zero); -} - -static void -arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, - size_t binind) -{ - arena_chunk_t *chunk; - size_t flag_dirty, run_ind, need_pages, i; - - assert(binind != BININD_INVALID); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); - - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); - - /* - * Propagate the dirty and unzeroed flags to the allocated small run, - * so that arena_dalloc_bin_run() has the ability to conditionally trim - * clean pages. - */ - arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty); - /* - * The first page will always be dirtied during small run - * initialization, so a validation failure here would not actually - * cause an observable failure. - */ - if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, - run_ind) == 0) - arena_run_page_validate_zeroed(chunk, run_ind); - for (i = 1; i < need_pages - 1; i++) { - arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); - if (config_debug && flag_dirty == 0 && - arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) - arena_run_page_validate_zeroed(chunk, run_ind+i); - } - arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1, - binind, flag_dirty); - if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages-1) == 0) - arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1); - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); -} - -static arena_chunk_t * -arena_chunk_init_spare(arena_t *arena) -{ - arena_chunk_t *chunk; - - assert(arena->spare != NULL); - - chunk = arena->spare; - arena->spare = NULL; - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxclass); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - - return (chunk); -} - -static arena_chunk_t * -arena_chunk_init_hard(arena_t *arena) -{ - arena_chunk_t *chunk; - bool zero; - size_t unzeroed, i; - - assert(arena->spare == NULL); - - zero = false; - malloc_mutex_unlock(&arena->lock); - chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false, - &zero, arena->dss_prec); - malloc_mutex_lock(&arena->lock); - if (chunk == NULL) - return (NULL); - if (config_stats) - arena->stats.mapped += chunksize; - - chunk->arena = arena; - - /* - * Claim that no pages are in use, since the header is merely overhead. - */ - chunk->ndirty = 0; - - chunk->nruns_avail = 0; - chunk->nruns_adjac = 0; - - /* - * Initialize the map to contain one maximal free untouched run. Mark - * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. - */ - unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; - arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, - unzeroed); - /* - * There is no need to initialize the internal page map entries unless - * the chunk is not zeroed. - */ - if (zero == false) { - VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk, - map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); - for (i = map_bias+1; i < chunk_npages-1; i++) - arena_mapbits_unzeroed_set(chunk, i, unzeroed); - } else { - VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk, - map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); - if (config_debug) { - for (i = map_bias+1; i < chunk_npages-1; i++) { - assert(arena_mapbits_unzeroed_get(chunk, i) == - unzeroed); - } - } - } - arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass, - unzeroed); - - return (chunk); -} - -static arena_chunk_t * -arena_chunk_alloc(arena_t *arena) -{ - arena_chunk_t *chunk; - - if (arena->spare != NULL) - chunk = arena_chunk_init_spare(arena); - else { - chunk = arena_chunk_init_hard(arena); - if (chunk == NULL) - return (NULL); - } - - /* Insert the run into the runs_avail tree. */ - arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias, - false, false); - - return (chunk); -} - -static void -arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) -{ - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxclass); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - - /* - * Remove run from the runs_avail tree, so that the arena does not use - * it. - */ - arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias, - false, false); - - if (arena->spare != NULL) { - arena_chunk_t *spare = arena->spare; - - arena->spare = chunk; - malloc_mutex_unlock(&arena->lock); - chunk_dealloc((void *)spare, chunksize, true); - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena->stats.mapped -= chunksize; - } else - arena->spare = chunk; -} - -static arena_run_t * -arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) -{ - arena_run_t *run; - arena_chunk_map_t *mapelm, key; - - key.bits = size | CHUNK_MAP_KEY; - mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); - if (mapelm != NULL) { - arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); - size_t pageind = (((uintptr_t)mapelm - - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) - + map_bias; - - run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << - LG_PAGE)); - arena_run_split_large(arena, run, size, zero); - return (run); - } - - return (NULL); -} - -static arena_run_t * -arena_run_alloc_large(arena_t *arena, size_t size, bool zero) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxclass); - assert((size & PAGE_MASK) == 0); - - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_large_helper(arena, size, zero); - if (run != NULL) - return (run); - - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split_large(arena, run, size, zero); - return (run); - } - - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_large_helper(arena, size, zero)); -} - -static arena_run_t * -arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind) -{ - arena_run_t *run; - arena_chunk_map_t *mapelm, key; - - key.bits = size | CHUNK_MAP_KEY; - mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); - if (mapelm != NULL) { - arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); - size_t pageind = (((uintptr_t)mapelm - - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) - + map_bias; - - run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << - LG_PAGE)); - arena_run_split_small(arena, run, size, binind); - return (run); - } - - return (NULL); -} - -static arena_run_t * -arena_run_alloc_small(arena_t *arena, size_t size, size_t binind) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxclass); - assert((size & PAGE_MASK) == 0); - assert(binind != BININD_INVALID); - - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_small_helper(arena, size, binind); - if (run != NULL) - return (run); - - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split_small(arena, run, size, binind); - return (run); - } - - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_small_helper(arena, size, binind)); -} - -static inline void -arena_maybe_purge(arena_t *arena) -{ - size_t npurgeable, threshold; - - /* Don't purge if the option is disabled. */ - if (opt_lg_dirty_mult < 0) - return; - /* Don't purge if all dirty pages are already being purged. */ - if (arena->ndirty <= arena->npurgatory) - return; - npurgeable = arena->ndirty - arena->npurgatory; - threshold = (arena->nactive >> opt_lg_dirty_mult); - /* - * Don't purge unless the number of purgeable pages exceeds the - * threshold. - */ - if (npurgeable <= threshold) - return; - - arena_purge(arena, false); -} - -static arena_chunk_t * -chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) -{ - size_t *ndirty = (size_t *)arg; - - assert(chunk->ndirty != 0); - *ndirty += chunk->ndirty; - return (NULL); -} - -static size_t -arena_compute_npurgatory(arena_t *arena, bool all) -{ - size_t npurgatory, npurgeable; - - /* - * Compute the minimum number of pages that this thread should try to - * purge. - */ - npurgeable = arena->ndirty - arena->npurgatory; - - if (all == false) { - size_t threshold = (arena->nactive >> opt_lg_dirty_mult); - - npurgatory = npurgeable - threshold; - } else - npurgatory = npurgeable; - - return (npurgatory); -} - -static void -arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all, - arena_chunk_mapelms_t *mapelms) -{ - size_t pageind, npages; - - /* - * Temporarily allocate free dirty runs within chunk. If all is false, - * only operate on dirty runs that are fragments; otherwise operate on - * all dirty runs. - */ - for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - if (arena_mapbits_allocated_get(chunk, pageind) == 0) { - size_t run_size = - arena_mapbits_unallocated_size_get(chunk, pageind); - - npages = run_size >> LG_PAGE; - assert(pageind + npages <= chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+npages-1)); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0 && - (all || arena_avail_adjac(chunk, pageind, - npages))) { - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - arena_run_split_large(arena, run, run_size, - false); - /* Append to list for later processing. */ - ql_elm_new(mapelm, u.ql_link); - ql_tail_insert(mapelms, mapelm, u.ql_link); - } - } else { - /* Skip run. */ - if (arena_mapbits_large_get(chunk, pageind) != 0) { - npages = arena_mapbits_large_size_get(chunk, - pageind) >> LG_PAGE; - } else { - size_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - assert(arena_mapbits_small_runind_get(chunk, - pageind) == 0); - binind = arena_bin_index(arena, run->bin); - bin_info = &arena_bin_info[binind]; - npages = bin_info->run_size >> LG_PAGE; - } - } - } - assert(pageind == chunk_npages); - assert(chunk->ndirty == 0 || all == false); - assert(chunk->nruns_adjac == 0); -} - -static size_t -arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk, - arena_chunk_mapelms_t *mapelms) -{ - size_t npurged, pageind, npages, nmadvise; - arena_chunk_map_t *mapelm; - - malloc_mutex_unlock(&arena->lock); - if (config_stats) - nmadvise = 0; - npurged = 0; - ql_foreach(mapelm, mapelms, u.ql_link) { - bool unzeroed; - size_t flag_unzeroed, i; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - npages = arena_mapbits_large_size_get(chunk, pageind) >> - LG_PAGE; - assert(pageind + npages <= chunk_npages); - unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << - LG_PAGE)), (npages << LG_PAGE)); - flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; - /* - * Set the unzeroed flag for all pages, now that pages_purge() - * has returned whether the pages were zeroed as a side effect - * of purging. This chunk map modification is safe even though - * the arena mutex isn't currently owned by this thread, - * because the run is marked as allocated, thus protecting it - * from being modified by any other thread. As long as these - * writes don't perturb the first and last elements' - * CHUNK_MAP_ALLOCATED bits, behavior is well defined. - */ - for (i = 0; i < npages; i++) { - arena_mapbits_unzeroed_set(chunk, pageind+i, - flag_unzeroed); - } - npurged += npages; - if (config_stats) - nmadvise++; - } - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena->stats.nmadvise += nmadvise; - - return (npurged); -} - -static void -arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk, - arena_chunk_mapelms_t *mapelms) -{ - arena_chunk_map_t *mapelm; - size_t pageind; - - /* Deallocate runs. */ - for (mapelm = ql_first(mapelms); mapelm != NULL; - mapelm = ql_first(mapelms)) { - arena_run_t *run; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << - LG_PAGE)); - ql_remove(mapelms, mapelm, u.ql_link); - arena_run_dalloc(arena, run, false, true); - } -} - -static inline size_t -arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) -{ - size_t npurged; - arena_chunk_mapelms_t mapelms; - - ql_new(&mapelms); - - /* - * If chunk is the spare, temporarily re-allocate it, 1) so that its - * run is reinserted into runs_avail, and 2) so that it cannot be - * completely discarded by another thread while arena->lock is dropped - * by this thread. Note that the arena_run_dalloc() call will - * implicitly deallocate the chunk, so no explicit action is required - * in this function to deallocate the chunk. - * - * Note that once a chunk contains dirty pages, it cannot again contain - * a single run unless 1) it is a dirty run, or 2) this function purges - * dirty pages and causes the transition to a single clean run. Thus - * (chunk == arena->spare) is possible, but it is not possible for - * this function to be called on the spare unless it contains a dirty - * run. - */ - if (chunk == arena->spare) { - assert(arena_mapbits_dirty_get(chunk, map_bias) != 0); - assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0); - - arena_chunk_alloc(arena); - } - - if (config_stats) - arena->stats.purged += chunk->ndirty; - - /* - * Operate on all dirty runs if there is no clean/dirty run - * fragmentation. - */ - if (chunk->nruns_adjac == 0) - all = true; - - arena_chunk_stash_dirty(arena, chunk, all, &mapelms); - npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms); - arena_chunk_unstash_purged(arena, chunk, &mapelms); - - return (npurged); -} - -static void -arena_purge(arena_t *arena, bool all) -{ - arena_chunk_t *chunk; - size_t npurgatory; - if (config_debug) { - size_t ndirty = 0; - - arena_chunk_dirty_iter(&arena->chunks_dirty, NULL, - chunks_dirty_iter_cb, (void *)&ndirty); - assert(ndirty == arena->ndirty); - } - assert(arena->ndirty > arena->npurgatory || all); - assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - - arena->npurgatory) || all); - - if (config_stats) - arena->stats.npurge++; - - /* - * Add the minimum number of pages this thread should try to purge to - * arena->npurgatory. This will keep multiple threads from racing to - * reduce ndirty below the threshold. - */ - npurgatory = arena_compute_npurgatory(arena, all); - arena->npurgatory += npurgatory; - - while (npurgatory > 0) { - size_t npurgeable, npurged, nunpurged; - - /* Get next chunk with dirty pages. */ - chunk = arena_chunk_dirty_first(&arena->chunks_dirty); - if (chunk == NULL) { - /* - * This thread was unable to purge as many pages as - * originally intended, due to races with other threads - * that either did some of the purging work, or re-used - * dirty pages. - */ - arena->npurgatory -= npurgatory; - return; - } - npurgeable = chunk->ndirty; - assert(npurgeable != 0); - - if (npurgeable > npurgatory && chunk->nruns_adjac == 0) { - /* - * This thread will purge all the dirty pages in chunk, - * so set npurgatory to reflect this thread's intent to - * purge the pages. This tends to reduce the chances - * of the following scenario: - * - * 1) This thread sets arena->npurgatory such that - * (arena->ndirty - arena->npurgatory) is at the - * threshold. - * 2) This thread drops arena->lock. - * 3) Another thread causes one or more pages to be - * dirtied, and immediately determines that it must - * purge dirty pages. - * - * If this scenario *does* play out, that's okay, - * because all of the purging work being done really - * needs to happen. - */ - arena->npurgatory += npurgeable - npurgatory; - npurgatory = npurgeable; - } - - /* - * Keep track of how many pages are purgeable, versus how many - * actually get purged, and adjust counters accordingly. - */ - arena->npurgatory -= npurgeable; - npurgatory -= npurgeable; - npurged = arena_chunk_purge(arena, chunk, all); - nunpurged = npurgeable - npurged; - arena->npurgatory += nunpurged; - npurgatory += nunpurged; - } -} - -void -arena_purge_all(arena_t *arena) -{ - - malloc_mutex_lock(&arena->lock); - arena_purge(arena, true); - malloc_mutex_unlock(&arena->lock); -} - -static void -arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, - size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) -{ - size_t size = *p_size; - size_t run_ind = *p_run_ind; - size_t run_pages = *p_run_pages; - - /* Try to coalesce forward. */ - if (run_ind + run_pages < chunk_npages && - arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && - arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { - size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages); - size_t nrun_pages = nrun_size >> LG_PAGE; - - /* - * Remove successor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages+nrun_pages-1) == nrun_size); - assert(arena_mapbits_dirty_get(chunk, - run_ind+run_pages+nrun_pages-1) == flag_dirty); - arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages, - false, true); - - size += nrun_size; - run_pages += nrun_pages; - - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); - } - - /* Try to coalesce backward. */ - if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, - run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == - flag_dirty) { - size_t prun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind-1); - size_t prun_pages = prun_size >> LG_PAGE; - - run_ind -= prun_pages; - - /* - * Remove predecessor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - prun_size); - assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); - arena_avail_remove(arena, chunk, run_ind, prun_pages, true, - false); - - size += prun_size; - run_pages += prun_pages; - - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); - } - - *p_size = size; - *p_run_ind = run_ind; - *p_run_pages = run_pages; -} - -static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) -{ - arena_chunk_t *chunk; - size_t size, run_ind, run_pages, flag_dirty; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - if (arena_mapbits_large_get(chunk, run_ind) != 0) { - size = arena_mapbits_large_size_get(chunk, run_ind); - assert(size == PAGE || - arena_mapbits_large_size_get(chunk, - run_ind+(size>>LG_PAGE)-1) == 0); - } else { - size_t binind = arena_bin_index(arena, run->bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - size = bin_info->run_size; - } - run_pages = (size >> LG_PAGE); - arena_cactive_update(arena, 0, run_pages); - arena->nactive -= run_pages; - - /* - * The run is dirty if the caller claims to have dirtied it, as well as - * if it was already dirty before being allocated and the caller - * doesn't claim to have cleaned it. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0) - dirty = true; - flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; - - /* Mark pages as unallocated in the chunk map. */ - if (dirty) { - arena_mapbits_unallocated_set(chunk, run_ind, size, - CHUNK_MAP_DIRTY); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - CHUNK_MAP_DIRTY); - } else { - arena_mapbits_unallocated_set(chunk, run_ind, size, - arena_mapbits_unzeroed_get(chunk, run_ind)); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); - } - - arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, - flag_dirty); - - /* Insert into runs_avail, now that coalescing is complete. */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - arena_avail_insert(arena, chunk, run_ind, run_pages, true, true); - - /* Deallocate chunk if it is now completely unused. */ - if (size == arena_maxclass) { - assert(run_ind == map_bias); - assert(run_pages == (arena_maxclass >> LG_PAGE)); - arena_chunk_dealloc(arena, chunk); - } - - /* - * It is okay to do dirty page processing here even if the chunk was - * deallocated above, since in that case it is the spare. Waiting - * until after possible chunk deallocation to do dirty processing - * allows for an old spare to be fully deallocated, thus decreasing the - * chances of spuriously crossing the dirty page purging threshold. - */ - if (dirty) - arena_maybe_purge(arena); -} - -static void -arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize) -{ - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - size_t head_npages = (oldsize - newsize) >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - - assert(oldsize > newsize); - - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * leading run as separately allocated. Set the last element of each - * run first, in case of single-page runs. - */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); - - if (config_debug) { - UNUSED size_t tail_npages = newsize >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); - } - arena_mapbits_large_set(chunk, pageind+head_npages, newsize, - flag_dirty); - - arena_run_dalloc(arena, run, false, false); -} - -static void -arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize, bool dirty) -{ - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - size_t head_npages = newsize >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - - assert(oldsize > newsize); - - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * trailing run as separately allocated. Set the last element of each - * run first, in case of single-page runs. - */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); - - if (config_debug) { - UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); - } - arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, - flag_dirty); - - arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), - dirty, false); -} - -static arena_run_t * -arena_bin_runs_first(arena_bin_t *bin) -{ - arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs); - if (mapelm != NULL) { - arena_chunk_t *chunk; - size_t pageind; - arena_run_t *run; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); - pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t))) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << - LG_PAGE)); - return (run); - } - - return (NULL); -} - -static void -arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_t *chunk = CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - - assert(arena_run_tree_search(&bin->runs, mapelm) == NULL); - - arena_run_tree_insert(&bin->runs, mapelm); -} - -static void -arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - - assert(arena_run_tree_search(&bin->runs, mapelm) != NULL); - - arena_run_tree_remove(&bin->runs, mapelm); -} - -static arena_run_t * -arena_bin_nonfull_run_tryget(arena_bin_t *bin) -{ - arena_run_t *run = arena_bin_runs_first(bin); - if (run != NULL) { - arena_bin_runs_remove(bin, run); - if (config_stats) - bin->stats.reruns++; - } - return (run); -} - -static arena_run_t * -arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) -{ - arena_run_t *run; - size_t binind; - arena_bin_info_t *bin_info; - - /* Look for a usable run. */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - /* No existing runs have any space available. */ - - binind = arena_bin_index(arena, bin); - bin_info = &arena_bin_info[binind]; - - /* Allocate a new run. */ - malloc_mutex_unlock(&bin->lock); - /******************************/ - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_small(arena, bin_info->run_size, binind); - if (run != NULL) { - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - /* Initialize run internals. */ - run->bin = bin; - run->nextind = 0; - run->nfree = bin_info->nregs; - bitmap_init(bitmap, &bin_info->bitmap_info); - } - malloc_mutex_unlock(&arena->lock); - /********************************/ - malloc_mutex_lock(&bin->lock); - if (run != NULL) { - if (config_stats) { - bin->stats.nruns++; - bin->stats.curruns++; - } - return (run); - } - - /* - * arena_run_alloc_small() failed, but another thread may have made - * sufficient memory available while this one dropped bin->lock above, - * so search one more time. - */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - - return (NULL); -} - -/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ -static void * -arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) -{ - void *ret; - size_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run; - - binind = arena_bin_index(arena, bin); - bin_info = &arena_bin_info[binind]; - bin->runcur = NULL; - run = arena_bin_nonfull_run_get(arena, bin); - if (bin->runcur != NULL && bin->runcur->nfree > 0) { - /* - * Another thread updated runcur while this one ran without the - * bin lock in arena_bin_nonfull_run_get(). - */ - assert(bin->runcur->nfree > 0); - ret = arena_run_reg_alloc(bin->runcur, bin_info); - if (run != NULL) { - arena_chunk_t *chunk; - - /* - * arena_run_alloc_small() may have allocated run, or - * it may have pulled run from the bin's run tree. - * Therefore it is unsafe to make any assumptions about - * how run has previously been used, and - * arena_bin_lower_run() must be called, as if a region - * were just deallocated from the run. - */ - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - if (run->nfree == bin_info->nregs) - arena_dalloc_bin_run(arena, chunk, run, bin); - else - arena_bin_lower_run(arena, chunk, run, bin); - } - return (ret); - } - - if (run == NULL) - return (NULL); - - bin->runcur = run; - - assert(bin->runcur->nfree > 0); - - return (arena_run_reg_alloc(bin->runcur, bin_info)); -} - -void -arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, - uint64_t prof_accumbytes) -{ - unsigned i, nfill; - arena_bin_t *bin; - arena_run_t *run; - void *ptr; - - assert(tbin->ncached == 0); - - if (config_prof && arena_prof_accum(arena, prof_accumbytes)) - prof_idump(); - bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); - for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> - tbin->lg_fill_div); i < nfill; i++) { - if ((run = bin->runcur) != NULL && run->nfree > 0) - ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ptr = arena_bin_malloc_hard(arena, bin); - if (ptr == NULL) - break; - if (config_fill && opt_junk) { - arena_alloc_junk_small(ptr, &arena_bin_info[binind], - true); - } - /* Insert such that low regions get used first. */ - tbin->avail[nfill - 1 - i] = ptr; - } - if (config_stats) { - bin->stats.allocated += i * arena_bin_info[binind].reg_size; - bin->stats.nmalloc += i; - bin->stats.nrequests += tbin->tstats.nrequests; - bin->stats.nfills++; - tbin->tstats.nrequests = 0; - } - malloc_mutex_unlock(&bin->lock); - tbin->ncached = i; -} - -void -arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) -{ - - if (zero) { - size_t redzone_size = bin_info->redzone_size; - memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, - redzone_size); - memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, - redzone_size); - } else { - memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, - bin_info->reg_interval); - } -} - -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) -#endif -static void -arena_redzone_corruption(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - malloc_printf(": Corrupt redzone %zu byte%s %s %p " - "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", - after ? "after" : "before", ptr, usize, byte); -} -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) -arena_redzone_corruption_t *arena_redzone_corruption = - JEMALLOC_N(arena_redzone_corruption_impl); -#endif - -static void -arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) -{ - size_t size = bin_info->reg_size; - size_t redzone_size = bin_info->redzone_size; - size_t i; - bool error = false; - - for (i = 1; i <= redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, false, i, *byte); - if (reset) - *byte = 0xa5; - } - } - for (i = 0; i < redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, true, i, *byte); - if (reset) - *byte = 0xa5; - } - } - if (opt_abort && error) - abort(); -} - -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) -#endif -void -arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) -{ - size_t redzone_size = bin_info->redzone_size; - - arena_redzones_validate(ptr, bin_info, false); - memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, - bin_info->reg_interval); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) -arena_dalloc_junk_small_t *arena_dalloc_junk_small = - JEMALLOC_N(arena_dalloc_junk_small_impl); -#endif - -void -arena_quarantine_junk_small(void *ptr, size_t usize) -{ - size_t binind; - arena_bin_info_t *bin_info; - cassert(config_fill); - assert(opt_junk); - assert(opt_quarantine); - assert(usize <= SMALL_MAXCLASS); - - binind = SMALL_SIZE2BIN(usize); - bin_info = &arena_bin_info[binind]; - arena_redzones_validate(ptr, bin_info, true); -} - -void * -arena_malloc_small(arena_t *arena, size_t size, bool zero) -{ - void *ret; - arena_bin_t *bin; - arena_run_t *run; - size_t binind; - - binind = SMALL_SIZE2BIN(size); - assert(binind < NBINS); - bin = &arena->bins[binind]; - size = arena_bin_info[binind].reg_size; - - malloc_mutex_lock(&bin->lock); - if ((run = bin->runcur) != NULL && run->nfree > 0) - ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ret = arena_bin_malloc_hard(arena, bin); - - if (ret == NULL) { - malloc_mutex_unlock(&bin->lock); - return (NULL); - } - - if (config_stats) { - bin->stats.allocated += size; - bin->stats.nmalloc++; - bin->stats.nrequests++; - } - malloc_mutex_unlock(&bin->lock); - if (config_prof && isthreaded == false && arena_prof_accum(arena, size)) - prof_idump(); - - if (zero == false) { - if (config_fill) { - if (opt_junk) { - arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); - } else if (opt_zero) - memset(ret, 0, size); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } else { - if (config_fill && opt_junk) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], - true); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - - return (ret); -} - -void * -arena_malloc_large(arena_t *arena, size_t size, bool zero) -{ - void *ret; - UNUSED bool idump; - - /* Large allocation. */ - size = PAGE_CEILING(size); - malloc_mutex_lock(&arena->lock); - ret = (void *)arena_run_alloc_large(arena, size, zero); - if (ret == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - if (config_stats) { - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - if (config_prof) - idump = arena_prof_accum_locked(arena, size); - malloc_mutex_unlock(&arena->lock); - if (config_prof && idump) - prof_idump(); - - if (zero == false) { - if (config_fill) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } - } - - return (ret); -} - -/* Only handles large allocations that require more than page alignment. */ -void * -arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) -{ - void *ret; - size_t alloc_size, leadsize, trailsize; - arena_run_t *run; - arena_chunk_t *chunk; - - assert((size & PAGE_MASK) == 0); - - alignment = PAGE_CEILING(alignment); - alloc_size = size + alignment - PAGE; - - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_large(arena, alloc_size, false); - if (run == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - - leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) - - (uintptr_t)run; - assert(alloc_size >= leadsize + size); - trailsize = alloc_size - leadsize - size; - ret = (void *)((uintptr_t)run + leadsize); - if (leadsize != 0) { - arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size - - leadsize); - } - if (trailsize != 0) { - arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, - false); - } - arena_run_init_large(arena, (arena_run_t *)ret, size, zero); - - if (config_stats) { - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - malloc_mutex_unlock(&arena->lock); - - if (config_fill && zero == false) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } - return (ret); -} - -void -arena_prof_promoted(const void *ptr, size_t size) -{ - arena_chunk_t *chunk; - size_t pageind, binind; - - cassert(config_prof); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(ptr, false) == PAGE); - assert(isalloc(ptr, true) == PAGE); - assert(size <= SMALL_MAXCLASS); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - binind = SMALL_SIZE2BIN(size); - assert(binind < NBINS); - arena_mapbits_large_binind_set(chunk, pageind, binind); - - assert(isalloc(ptr, false) == PAGE); - assert(isalloc(ptr, true) == size); -} - -static void -arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - /* Dissociate run from bin. */ - if (run == bin->runcur) - bin->runcur = NULL; - else { - size_t binind = arena_bin_index(chunk->arena, bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - - if (bin_info->nregs != 1) { - /* - * This block's conditional is necessary because if the - * run only contains one region, then it never gets - * inserted into the non-full runs tree. - */ - arena_bin_runs_remove(bin, run); - } - } -} - -static void -arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - size_t binind; - arena_bin_info_t *bin_info; - size_t npages, run_ind, past; - - assert(run != bin->runcur); - assert(arena_run_tree_search(&bin->runs, - arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)) - == NULL); - - binind = arena_bin_index(chunk->arena, run->bin); - bin_info = &arena_bin_info[binind]; - - malloc_mutex_unlock(&bin->lock); - /******************************/ - npages = bin_info->run_size >> LG_PAGE; - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - past = (size_t)(PAGE_CEILING((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * - bin_info->reg_interval - bin_info->redzone_size) - - (uintptr_t)chunk) >> LG_PAGE); - malloc_mutex_lock(&arena->lock); - - /* - * If the run was originally clean, and some pages were never touched, - * trim the clean pages before deallocating the dirty portion of the - * run. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+npages-1)); - if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind < - npages) { - /* Trim clean pages. Convert to large run beforehand. */ - assert(npages > 0); - arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0); - arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0); - arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), - ((past - run_ind) << LG_PAGE), false); - /* npages = past - run_ind; */ - } - arena_run_dalloc(arena, run, true, false); - malloc_mutex_unlock(&arena->lock); - /****************************/ - malloc_mutex_lock(&bin->lock); - if (config_stats) - bin->stats.curruns--; -} - -static void -arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - /* - * Make sure that if bin->runcur is non-NULL, it refers to the lowest - * non-full run. It is okay to NULL runcur out rather than proactively - * keeping it pointing at the lowest non-full run. - */ - if ((uintptr_t)run < (uintptr_t)bin->runcur) { - /* Switch runcur. */ - if (bin->runcur->nfree > 0) - arena_bin_runs_insert(bin, bin->runcur); - bin->runcur = run; - if (config_stats) - bin->stats.reruns++; - } else - arena_bin_runs_insert(bin, run); -} - -void -arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_t *mapelm) -{ - size_t pageind; - arena_run_t *run; - arena_bin_t *bin; - arena_bin_info_t *bin_info; - size_t size, binind; - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); - bin = run->bin; - binind = arena_ptr_small_binind_get(ptr, mapelm->bits); - bin_info = &arena_bin_info[binind]; - if (config_fill || config_stats) - size = bin_info->reg_size; - - if (config_fill && opt_junk) - arena_dalloc_junk_small(ptr, bin_info); - - arena_run_reg_dalloc(run, ptr); - if (run->nfree == bin_info->nregs) { - arena_dissociate_bin_run(chunk, run, bin); - arena_dalloc_bin_run(arena, chunk, run, bin); - } else if (run->nfree == 1 && run != bin->runcur) - arena_bin_lower_run(arena, chunk, run, bin); - - if (config_stats) { - bin->stats.allocated -= size; - bin->stats.ndalloc++; - } -} - -void -arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_t *mapelm) -{ - arena_run_t *run; - arena_bin_t *bin; - - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); - bin = run->bin; - malloc_mutex_lock(&bin->lock); - arena_dalloc_bin_locked(arena, chunk, ptr, mapelm); - malloc_mutex_unlock(&bin->lock); -} - -void -arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind) -{ - arena_chunk_map_t *mapelm; - - if (config_debug) { - /* arena_ptr_small_binind_get() does extra sanity checking. */ - assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) != BININD_INVALID); - } - mapelm = arena_mapp_get(chunk, pageind); - arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); -} - -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) -#endif -static void -arena_dalloc_junk_large(void *ptr, size_t usize) -{ - - if (config_fill && opt_junk) - memset(ptr, 0x5a, usize); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) -arena_dalloc_junk_large_t *arena_dalloc_junk_large = - JEMALLOC_N(arena_dalloc_junk_large_impl); -#endif - -void -arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ - - if (config_fill || config_stats) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t usize = arena_mapbits_large_size_get(chunk, pageind); - - arena_dalloc_junk_large(ptr, usize); - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= usize; - arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--; - } - } - - arena_run_dalloc(arena, (arena_run_t *)ptr, true, false); -} - -void -arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ - - malloc_mutex_lock(&arena->lock); - arena_dalloc_large_locked(arena, chunk, ptr); - malloc_mutex_unlock(&arena->lock); -} - -static void -arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size) -{ - - assert(size < oldsize); - - /* - * Shrink the run, and make trailing pages available for other - * allocations. - */ - malloc_mutex_lock(&arena->lock); - arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, - true); - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - malloc_mutex_unlock(&arena->lock); -} - -static bool -arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size, size_t extra, bool zero) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t npages = oldsize >> LG_PAGE; - size_t followsize; - - assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); - - /* Try to extend the run. */ - assert(size + extra > oldsize); - malloc_mutex_lock(&arena->lock); - if (pageind + npages < chunk_npages && - arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && - (followsize = arena_mapbits_unallocated_size_get(chunk, - pageind+npages)) >= size - oldsize) { - /* - * The next run is available and sufficiently large. Split the - * following run, then merge the first part with the existing - * allocation. - */ - size_t flag_dirty; - size_t splitsize = (oldsize + followsize <= size + extra) - ? followsize : size + extra - oldsize; - arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk + - ((pageind+npages) << LG_PAGE)), splitsize, zero); - - size = oldsize + splitsize; - npages = size >> LG_PAGE; - - /* - * Mark the extended run as dirty if either portion of the run - * was dirty before allocation. This is rather pedantic, - * because there's not actually any sequence of events that - * could cause the resulting run to be passed to - * arena_run_dalloc() with the dirty argument set to false - * (which is when dirty flag consistency would really matter). - */ - flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | - arena_mapbits_dirty_get(chunk, pageind+npages-1); - arena_mapbits_large_set(chunk, pageind, size, flag_dirty); - arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); - - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - malloc_mutex_unlock(&arena->lock); - return (false); - } - malloc_mutex_unlock(&arena->lock); - - return (true); -} - -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) -#endif -static void -arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) -{ - - if (config_fill && opt_junk) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, - old_usize - usize); - } -} -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) -arena_ralloc_junk_large_t *arena_ralloc_junk_large = - JEMALLOC_N(arena_ralloc_junk_large_impl); -#endif - -/* - * Try to resize a large allocation, in order to avoid copying. This will - * always fail if growing an object, and the following run is already in use. - */ -static bool -arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) -{ - size_t psize; - - psize = PAGE_CEILING(size + extra); - if (psize == oldsize) { - /* Same size class. */ - return (false); - } else { - arena_chunk_t *chunk; - arena_t *arena; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - - if (psize < oldsize) { - /* Fill before shrinking in order avoid a race. */ - arena_ralloc_junk_large(ptr, oldsize, psize); - arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, - psize); - return (false); - } else { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, - oldsize, PAGE_CEILING(size), - psize - PAGE_CEILING(size), zero); - if (config_fill && ret == false && zero == false) { - if (opt_junk) { - memset((void *)((uintptr_t)ptr + - oldsize), 0xa5, isalloc(ptr, - config_prof) - oldsize); - } else if (opt_zero) { - memset((void *)((uintptr_t)ptr + - oldsize), 0, isalloc(ptr, - config_prof) - oldsize); - } - } - return (ret); - } - } -} - -bool -arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) -{ - - /* - * Avoid moving the allocation if the size class can be left the same. - */ - if (oldsize <= arena_maxclass) { - if (oldsize <= SMALL_MAXCLASS) { - assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size - == oldsize); - if ((size + extra <= SMALL_MAXCLASS && - SMALL_SIZE2BIN(size + extra) == - SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && - size + extra >= oldsize)) - return (false); - } else { - assert(size <= arena_maxclass); - if (size + extra > SMALL_MAXCLASS) { - if (arena_ralloc_large(ptr, oldsize, size, - extra, zero) == false) - return (false); - } - } - } - - /* Reallocation would require a move. */ - return (true); -} - -void * -arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, - bool try_tcache_dalloc) -{ - void *ret; - size_t copysize; - - /* Try to avoid moving the allocation. */ - if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false) - return (ptr); - - /* - * size and oldsize are different enough that we need to move the - * object. In that case, fall back to allocating new space and - * copying. - */ - if (alignment != 0) { - size_t usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); - } else - ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); - - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ - if (alignment != 0) { - size_t usize = sa2u(size, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(usize, alignment, zero, try_tcache_alloc, - arena); - } else - ret = arena_malloc(arena, size, zero, try_tcache_alloc); - - if (ret == NULL) - return (NULL); - } - - /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ - - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); - memcpy(ret, ptr, copysize); - iqalloct(ptr, try_tcache_dalloc); - return (ret); -} - -dss_prec_t -arena_dss_prec_get(arena_t *arena) -{ - dss_prec_t ret; - - malloc_mutex_lock(&arena->lock); - ret = arena->dss_prec; - malloc_mutex_unlock(&arena->lock); - return (ret); -} - -void -arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) -{ - - malloc_mutex_lock(&arena->lock); - arena->dss_prec = dss_prec; - malloc_mutex_unlock(&arena->lock); -} - -void -arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats) -{ - unsigned i; - - malloc_mutex_lock(&arena->lock); - *dss = dss_prec_names[arena->dss_prec]; - *nactive += arena->nactive; - *ndirty += arena->ndirty; - - astats->mapped += arena->stats.mapped; - astats->npurge += arena->stats.npurge; - astats->nmadvise += arena->stats.nmadvise; - astats->purged += arena->stats.purged; - astats->allocated_large += arena->stats.allocated_large; - astats->nmalloc_large += arena->stats.nmalloc_large; - astats->ndalloc_large += arena->stats.ndalloc_large; - astats->nrequests_large += arena->stats.nrequests_large; - - for (i = 0; i < nlclasses; i++) { - lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; - lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; - lstats[i].nrequests += arena->stats.lstats[i].nrequests; - lstats[i].curruns += arena->stats.lstats[i].curruns; - } - malloc_mutex_unlock(&arena->lock); - - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - - malloc_mutex_lock(&bin->lock); - bstats[i].allocated += bin->stats.allocated; - bstats[i].nmalloc += bin->stats.nmalloc; - bstats[i].ndalloc += bin->stats.ndalloc; - bstats[i].nrequests += bin->stats.nrequests; - if (config_tcache) { - bstats[i].nfills += bin->stats.nfills; - bstats[i].nflushes += bin->stats.nflushes; - } - bstats[i].nruns += bin->stats.nruns; - bstats[i].reruns += bin->stats.reruns; - bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(&bin->lock); - } -} - -bool -arena_new(arena_t *arena, unsigned ind) -{ - unsigned i; - arena_bin_t *bin; - - arena->ind = ind; - arena->nthreads = 0; - - if (malloc_mutex_init(&arena->lock)) - return (true); - - if (config_stats) { - memset(&arena->stats, 0, sizeof(arena_stats_t)); - arena->stats.lstats = - (malloc_large_stats_t *)base_alloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (arena->stats.lstats == NULL) - return (true); - memset(arena->stats.lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - if (config_tcache) - ql_new(&arena->tcache_ql); - } - - if (config_prof) - arena->prof_accumbytes = 0; - - arena->dss_prec = chunk_dss_prec_get(); - - /* Initialize chunks. */ - arena_chunk_dirty_new(&arena->chunks_dirty); - arena->spare = NULL; - - arena->nactive = 0; - arena->ndirty = 0; - arena->npurgatory = 0; - - arena_avail_tree_new(&arena->runs_avail); - - /* Initialize bins. */ - for (i = 0; i < NBINS; i++) { - bin = &arena->bins[i]; - if (malloc_mutex_init(&bin->lock)) - return (true); - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - if (config_stats) - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); - } - - return (false); -} - -/* - * Calculate bin_info->run_size such that it meets the following constraints: - * - * *) bin_info->run_size >= min_run_size - * *) bin_info->run_size <= arena_maxclass - * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). - * *) bin_info->nregs <= RUN_MAXREGS - * - * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also - * calculated here, since these settings are all interdependent. - */ -static size_t -bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) -{ - size_t pad_size; - size_t try_run_size, good_run_size; - uint32_t try_nregs, good_nregs; - uint32_t try_hdr_size, good_hdr_size; - uint32_t try_bitmap_offset, good_bitmap_offset; - uint32_t try_ctx0_offset, good_ctx0_offset; - uint32_t try_redzone0_offset, good_redzone0_offset; - - assert(min_run_size >= PAGE); - assert(min_run_size <= arena_maxclass); - - /* - * Determine redzone size based on minimum alignment and minimum - * redzone size. Add padding to the end of the run if it is needed to - * align the regions. The padding allows each redzone to be half the - * minimum alignment; without the padding, each redzone would have to - * be twice as large in order to maintain alignment. - */ - if (config_fill && opt_redzone) { - size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1); - if (align_min <= REDZONE_MINSIZE) { - bin_info->redzone_size = REDZONE_MINSIZE; - pad_size = 0; - } else { - bin_info->redzone_size = align_min >> 1; - pad_size = bin_info->redzone_size; - } - } else { - bin_info->redzone_size = 0; - pad_size = 0; - } - bin_info->reg_interval = bin_info->reg_size + - (bin_info->redzone_size << 1); - - /* - * Calculate known-valid settings before entering the run_size - * expansion loop, so that the first part of the loop always copies - * valid settings. - * - * The do..while loop iteratively reduces the number of regions until - * the run header and the regions no longer overlap. A closed formula - * would be quite messy, since there is an interdependency between the - * header's mask length and the number of regions. - */ - try_run_size = min_run_size; - try_nregs = ((try_run_size - sizeof(arena_run_t)) / - bin_info->reg_interval) - + 1; /* Counter-act try_nregs-- in loop. */ - if (try_nregs > RUN_MAXREGS) { - try_nregs = RUN_MAXREGS - + 1; /* Counter-act try_nregs-- in loop. */ - } - do { - try_nregs--; - try_hdr_size = sizeof(arena_run_t); - /* Pad to a long boundary. */ - try_hdr_size = LONG_CEILING(try_hdr_size); - try_bitmap_offset = try_hdr_size; - /* Add space for bitmap. */ - try_hdr_size += bitmap_size(try_nregs); - if (config_prof && opt_prof && prof_promote == false) { - /* Pad to a quantum boundary. */ - try_hdr_size = QUANTUM_CEILING(try_hdr_size); - try_ctx0_offset = try_hdr_size; - /* Add space for one (prof_ctx_t *) per region. */ - try_hdr_size += try_nregs * sizeof(prof_ctx_t *); - } else - try_ctx0_offset = 0; - try_redzone0_offset = try_run_size - (try_nregs * - bin_info->reg_interval) - pad_size; - } while (try_hdr_size > try_redzone0_offset); - - /* run_size expansion loop. */ - do { - /* - * Copy valid settings before trying more aggressive settings. - */ - good_run_size = try_run_size; - good_nregs = try_nregs; - good_hdr_size = try_hdr_size; - good_bitmap_offset = try_bitmap_offset; - good_ctx0_offset = try_ctx0_offset; - good_redzone0_offset = try_redzone0_offset; - - /* Try more aggressive settings. */ - try_run_size += PAGE; - try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) / - bin_info->reg_interval) - + 1; /* Counter-act try_nregs-- in loop. */ - if (try_nregs > RUN_MAXREGS) { - try_nregs = RUN_MAXREGS - + 1; /* Counter-act try_nregs-- in loop. */ - } - do { - try_nregs--; - try_hdr_size = sizeof(arena_run_t); - /* Pad to a long boundary. */ - try_hdr_size = LONG_CEILING(try_hdr_size); - try_bitmap_offset = try_hdr_size; - /* Add space for bitmap. */ - try_hdr_size += bitmap_size(try_nregs); - if (config_prof && opt_prof && prof_promote == false) { - /* Pad to a quantum boundary. */ - try_hdr_size = QUANTUM_CEILING(try_hdr_size); - try_ctx0_offset = try_hdr_size; - /* - * Add space for one (prof_ctx_t *) per region. - */ - try_hdr_size += try_nregs * - sizeof(prof_ctx_t *); - } - try_redzone0_offset = try_run_size - (try_nregs * - bin_info->reg_interval) - pad_size; - } while (try_hdr_size > try_redzone0_offset); - } while (try_run_size <= arena_maxclass - && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) > - RUN_MAX_OVRHD_RELAX - && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size - && try_nregs < RUN_MAXREGS); - - assert(good_hdr_size <= good_redzone0_offset); - - /* Copy final settings. */ - bin_info->run_size = good_run_size; - bin_info->nregs = good_nregs; - bin_info->bitmap_offset = good_bitmap_offset; - bin_info->ctx0_offset = good_ctx0_offset; - bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size; - - assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs - * bin_info->reg_interval) + pad_size == bin_info->run_size); - - return (good_run_size); -} - -static void -bin_info_init(void) -{ - arena_bin_info_t *bin_info; - size_t prev_run_size = PAGE; - -#define SIZE_CLASS(bin, delta, size) \ - bin_info = &arena_bin_info[bin]; \ - bin_info->reg_size = size; \ - prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\ - bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); - SIZE_CLASSES -#undef SIZE_CLASS -} - -void -arena_boot(void) -{ - size_t header_size; - unsigned i; - - /* - * Compute the header size such that it is large enough to contain the - * page map. The page map is biased to omit entries for the header - * itself, so some iteration is necessary to compute the map bias. - * - * 1) Compute safe header_size and map_bias values that include enough - * space for an unbiased page map. - * 2) Refine map_bias based on (1) to omit the header pages in the page - * map. The resulting map_bias may be one too small. - * 3) Refine map_bias based on (2). The result will be >= the result - * from (2), and will always be correct. - */ - map_bias = 0; - for (i = 0; i < 3; i++) { - header_size = offsetof(arena_chunk_t, map) + - (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); - map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK) - != 0); - } - assert(map_bias > 0); - - arena_maxclass = chunksize - (map_bias << LG_PAGE); - - bin_info_init(); -} - -void -arena_prefork(arena_t *arena) -{ - unsigned i; - - malloc_mutex_prefork(&arena->lock); - for (i = 0; i < NBINS; i++) - malloc_mutex_prefork(&arena->bins[i].lock); -} - -void -arena_postfork_parent(arena_t *arena) -{ - unsigned i; - - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_parent(&arena->bins[i].lock); - malloc_mutex_postfork_parent(&arena->lock); -} - -void -arena_postfork_child(arena_t *arena) -{ - unsigned i; - - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_child(&arena->bins[i].lock); - malloc_mutex_postfork_child(&arena->lock); -} diff --git a/lib/jemalloc-3.6.0/src/atomic.c b/lib/jemalloc-3.6.0/src/atomic.c deleted file mode 100644 index 77ee313..0000000 --- a/lib/jemalloc-3.6.0/src/atomic.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_ATOMIC_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/lib/jemalloc-3.6.0/src/base.c b/lib/jemalloc-3.6.0/src/base.c deleted file mode 100644 index 4e62e8f..0000000 --- a/lib/jemalloc-3.6.0/src/base.c +++ /dev/null @@ -1,142 +0,0 @@ -#define JEMALLOC_BASE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -static malloc_mutex_t base_mtx; - -/* - * Current pages that are being used for internal memory allocations. These - * pages are carved up in cacheline-size quanta, so that there is no chance of - * false cache line sharing. - */ -static void *base_pages; -static void *base_next_addr; -static void *base_past_addr; /* Addr immediately past base_pages. */ -static extent_node_t *base_nodes; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static bool base_pages_alloc(size_t minsize); - -/******************************************************************************/ - -static bool -base_pages_alloc(size_t minsize) -{ - size_t csize; - bool zero; - - assert(minsize != 0); - csize = CHUNK_CEILING(minsize); - zero = false; - base_pages = chunk_alloc(csize, chunksize, true, &zero, - chunk_dss_prec_get()); - if (base_pages == NULL) - return (true); - base_next_addr = base_pages; - base_past_addr = (void *)((uintptr_t)base_pages + csize); - - return (false); -} - -void * -base_alloc(size_t size) -{ - void *ret; - size_t csize; - - /* Round size up to nearest multiple of the cacheline size. */ - csize = CACHELINE_CEILING(size); - - malloc_mutex_lock(&base_mtx); - /* Make sure there's enough space for the allocation. */ - if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { - if (base_pages_alloc(csize)) { - malloc_mutex_unlock(&base_mtx); - return (NULL); - } - } - /* Allocate. */ - ret = base_next_addr; - base_next_addr = (void *)((uintptr_t)base_next_addr + csize); - malloc_mutex_unlock(&base_mtx); - VALGRIND_MAKE_MEM_UNDEFINED(ret, csize); - - return (ret); -} - -void * -base_calloc(size_t number, size_t size) -{ - void *ret = base_alloc(number * size); - - if (ret != NULL) - memset(ret, 0, number * size); - - return (ret); -} - -extent_node_t * -base_node_alloc(void) -{ - extent_node_t *ret; - - malloc_mutex_lock(&base_mtx); - if (base_nodes != NULL) { - ret = base_nodes; - base_nodes = *(extent_node_t **)ret; - malloc_mutex_unlock(&base_mtx); - VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t)); - } else { - malloc_mutex_unlock(&base_mtx); - ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); - } - - return (ret); -} - -void -base_node_dealloc(extent_node_t *node) -{ - - VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); - malloc_mutex_lock(&base_mtx); - *(extent_node_t **)node = base_nodes; - base_nodes = node; - malloc_mutex_unlock(&base_mtx); -} - -bool -base_boot(void) -{ - - base_nodes = NULL; - if (malloc_mutex_init(&base_mtx)) - return (true); - - return (false); -} - -void -base_prefork(void) -{ - - malloc_mutex_prefork(&base_mtx); -} - -void -base_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&base_mtx); -} - -void -base_postfork_child(void) -{ - - malloc_mutex_postfork_child(&base_mtx); -} diff --git a/lib/jemalloc-3.6.0/src/bitmap.c b/lib/jemalloc-3.6.0/src/bitmap.c deleted file mode 100644 index e2bd907..0000000 --- a/lib/jemalloc-3.6.0/src/bitmap.c +++ /dev/null @@ -1,90 +0,0 @@ -#define JEMALLOC_BITMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static size_t bits2groups(size_t nbits); - -/******************************************************************************/ - -static size_t -bits2groups(size_t nbits) -{ - - return ((nbits >> LG_BITMAP_GROUP_NBITS) + - !!(nbits & BITMAP_GROUP_NBITS_MASK)); -} - -void -bitmap_info_init(bitmap_info_t *binfo, size_t nbits) -{ - unsigned i; - size_t group_count; - - assert(nbits > 0); - assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); - - /* - * Compute the number of groups necessary to store nbits bits, and - * progressively work upward through the levels until reaching a level - * that requires only one group. - */ - binfo->levels[0].group_offset = 0; - group_count = bits2groups(nbits); - for (i = 1; group_count > 1; i++) { - assert(i < BITMAP_MAX_LEVELS); - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; - group_count = bits2groups(group_count); - } - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; - binfo->nlevels = i; - binfo->nbits = nbits; -} - -size_t -bitmap_info_ngroups(const bitmap_info_t *binfo) -{ - - return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); -} - -size_t -bitmap_size(size_t nbits) -{ - bitmap_info_t binfo; - - bitmap_info_init(&binfo, nbits); - return (bitmap_info_ngroups(&binfo)); -} - -void -bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - size_t extra; - unsigned i; - - /* - * Bits are actually inverted with regard to the external bitmap - * interface, so the bitmap starts out with all 1 bits, except for - * trailing unused bits (if any). Note that each group uses bit 0 to - * correspond to the first logical bit in the group, so extra bits - * are the most significant bits of the last group. - */ - memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << - LG_SIZEOF_BITMAP); - extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) - & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) - bitmap[binfo->levels[1].group_offset - 1] >>= extra; - for (i = 1; i < binfo->nlevels; i++) { - size_t group_count = binfo->levels[i].group_offset - - binfo->levels[i-1].group_offset; - extra = (BITMAP_GROUP_NBITS - (group_count & - BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) - bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; - } -} diff --git a/lib/jemalloc-3.6.0/src/chunk.c b/lib/jemalloc-3.6.0/src/chunk.c deleted file mode 100644 index 90ab116..0000000 --- a/lib/jemalloc-3.6.0/src/chunk.c +++ /dev/null @@ -1,395 +0,0 @@ -#define JEMALLOC_CHUNK_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -const char *opt_dss = DSS_DEFAULT; -size_t opt_lg_chunk = LG_CHUNK_DEFAULT; - -malloc_mutex_t chunks_mtx; -chunk_stats_t stats_chunks; - -/* - * Trees of chunks that were previously allocated (trees differ only in node - * ordering). These are used when allocating chunks, in an attempt to re-use - * address space. Depending on function, different tree orderings are needed, - * which is why there are two trees with the same contents. - */ -static extent_tree_t chunks_szad_mmap; -static extent_tree_t chunks_ad_mmap; -static extent_tree_t chunks_szad_dss; -static extent_tree_t chunks_ad_dss; - -rtree_t *chunks_rtree; - -/* Various chunk-related settings. */ -size_t chunksize; -size_t chunksize_mask; /* (chunksize - 1). */ -size_t chunk_npages; -size_t map_bias; -size_t arena_maxclass; /* Max size class for arenas. */ - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void *chunk_recycle(extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base, - bool *zero); -static void chunk_record(extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, void *chunk, size_t size); - -/******************************************************************************/ - -static void * -chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, - size_t alignment, bool base, bool *zero) -{ - void *ret; - extent_node_t *node; - extent_node_t key; - size_t alloc_size, leadsize, trailsize; - bool zeroed; - - if (base) { - /* - * This function may need to call base_node_{,de}alloc(), but - * the current chunk allocation request is on behalf of the - * base allocator. Avoid deadlock (and if that weren't an - * issue, potential for infinite recursion) by returning NULL. - */ - return (NULL); - } - - alloc_size = size + alignment - chunksize; - /* Beware size_t wrap-around. */ - if (alloc_size < size) - return (NULL); - key.addr = NULL; - key.size = alloc_size; - malloc_mutex_lock(&chunks_mtx); - node = extent_tree_szad_nsearch(chunks_szad, &key); - if (node == NULL) { - malloc_mutex_unlock(&chunks_mtx); - return (NULL); - } - leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - - (uintptr_t)node->addr; - assert(node->size >= leadsize + size); - trailsize = node->size - leadsize - size; - ret = (void *)((uintptr_t)node->addr + leadsize); - zeroed = node->zeroed; - if (zeroed) - *zero = true; - /* Remove node from the tree. */ - extent_tree_szad_remove(chunks_szad, node); - extent_tree_ad_remove(chunks_ad, node); - if (leadsize != 0) { - /* Insert the leading space as a smaller chunk. */ - node->size = leadsize; - extent_tree_szad_insert(chunks_szad, node); - extent_tree_ad_insert(chunks_ad, node); - node = NULL; - } - if (trailsize != 0) { - /* Insert the trailing space as a smaller chunk. */ - if (node == NULL) { - /* - * An additional node is required, but - * base_node_alloc() can cause a new base chunk to be - * allocated. Drop chunks_mtx in order to avoid - * deadlock, and if node allocation fails, deallocate - * the result before returning an error. - */ - malloc_mutex_unlock(&chunks_mtx); - node = base_node_alloc(); - if (node == NULL) { - chunk_dealloc(ret, size, true); - return (NULL); - } - malloc_mutex_lock(&chunks_mtx); - } - node->addr = (void *)((uintptr_t)(ret) + size); - node->size = trailsize; - node->zeroed = zeroed; - extent_tree_szad_insert(chunks_szad, node); - extent_tree_ad_insert(chunks_ad, node); - node = NULL; - } - malloc_mutex_unlock(&chunks_mtx); - - if (node != NULL) - base_node_dealloc(node); - if (*zero) { - if (zeroed == false) - memset(ret, 0, size); - else if (config_debug) { - size_t i; - size_t *p = (size_t *)(uintptr_t)ret; - - VALGRIND_MAKE_MEM_DEFINED(ret, size); - for (i = 0; i < size / sizeof(size_t); i++) - assert(p[i] == 0); - } - } - return (ret); -} - -/* - * If the caller specifies (*zero == false), it is still possible to receive - * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() - * takes advantage of this to avoid demanding zeroed chunks, but taking - * advantage of them if they are returned. - */ -void * -chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, - dss_prec_t dss_prec) -{ - void *ret; - - assert(size != 0); - assert((size & chunksize_mask) == 0); - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - /* "primary" dss. */ - if (config_dss && dss_prec == dss_prec_primary) { - if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) - goto label_return; - } - /* mmap. */ - if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL) - goto label_return; - /* "secondary" dss. */ - if (config_dss && dss_prec == dss_prec_secondary) { - if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) - goto label_return; - } - - /* All strategies for allocation failed. */ - ret = NULL; -label_return: - if (ret != NULL) { - if (config_ivsalloc && base == false) { - if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) { - chunk_dealloc(ret, size, true); - return (NULL); - } - } - if (config_stats || config_prof) { - bool gdump; - malloc_mutex_lock(&chunks_mtx); - if (config_stats) - stats_chunks.nchunks += (size / chunksize); - stats_chunks.curchunks += (size / chunksize); - if (stats_chunks.curchunks > stats_chunks.highchunks) { - stats_chunks.highchunks = - stats_chunks.curchunks; - if (config_prof) - gdump = true; - } else if (config_prof) - gdump = false; - malloc_mutex_unlock(&chunks_mtx); - if (config_prof && opt_prof && opt_prof_gdump && gdump) - prof_gdump(); - } - if (config_valgrind) - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } - assert(CHUNK_ADDR2BASE(ret) == ret); - return (ret); -} - -static void -chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, - size_t size) -{ - bool unzeroed; - extent_node_t *xnode, *node, *prev, *xprev, key; - - unzeroed = pages_purge(chunk, size); - VALGRIND_MAKE_MEM_NOACCESS(chunk, size); - - /* - * Allocate a node before acquiring chunks_mtx even though it might not - * be needed, because base_node_alloc() may cause a new base chunk to - * be allocated, which could cause deadlock if chunks_mtx were already - * held. - */ - xnode = base_node_alloc(); - /* Use xprev to implement conditional deferred deallocation of prev. */ - xprev = NULL; - - malloc_mutex_lock(&chunks_mtx); - key.addr = (void *)((uintptr_t)chunk + size); - node = extent_tree_ad_nsearch(chunks_ad, &key); - /* Try to coalesce forward. */ - if (node != NULL && node->addr == key.addr) { - /* - * Coalesce chunk with the following address range. This does - * not change the position within chunks_ad, so only - * remove/insert from/into chunks_szad. - */ - extent_tree_szad_remove(chunks_szad, node); - node->addr = chunk; - node->size += size; - node->zeroed = (node->zeroed && (unzeroed == false)); - extent_tree_szad_insert(chunks_szad, node); - } else { - /* Coalescing forward failed, so insert a new node. */ - if (xnode == NULL) { - /* - * base_node_alloc() failed, which is an exceedingly - * unlikely failure. Leak chunk; its pages have - * already been purged, so this is only a virtual - * memory leak. - */ - goto label_return; - } - node = xnode; - xnode = NULL; /* Prevent deallocation below. */ - node->addr = chunk; - node->size = size; - node->zeroed = (unzeroed == false); - extent_tree_ad_insert(chunks_ad, node); - extent_tree_szad_insert(chunks_szad, node); - } - - /* Try to coalesce backward. */ - prev = extent_tree_ad_prev(chunks_ad, node); - if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == - chunk) { - /* - * Coalesce chunk with the previous address range. This does - * not change the position within chunks_ad, so only - * remove/insert node from/into chunks_szad. - */ - extent_tree_szad_remove(chunks_szad, prev); - extent_tree_ad_remove(chunks_ad, prev); - - extent_tree_szad_remove(chunks_szad, node); - node->addr = prev->addr; - node->size += prev->size; - node->zeroed = (node->zeroed && prev->zeroed); - extent_tree_szad_insert(chunks_szad, node); - - xprev = prev; - } - -label_return: - malloc_mutex_unlock(&chunks_mtx); - /* - * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to - * avoid potential deadlock. - */ - if (xnode != NULL) - base_node_dealloc(xnode); - if (xprev != NULL) - base_node_dealloc(xprev); -} - -void -chunk_unmap(void *chunk, size_t size) -{ - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - - if (config_dss && chunk_in_dss(chunk)) - chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size); - else if (chunk_dealloc_mmap(chunk, size)) - chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size); -} - -void -chunk_dealloc(void *chunk, size_t size, bool unmap) -{ - - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - - if (config_ivsalloc) - rtree_set(chunks_rtree, (uintptr_t)chunk, 0); - if (config_stats || config_prof) { - malloc_mutex_lock(&chunks_mtx); - assert(stats_chunks.curchunks >= (size / chunksize)); - stats_chunks.curchunks -= (size / chunksize); - malloc_mutex_unlock(&chunks_mtx); - } - - if (unmap) - chunk_unmap(chunk, size); -} - -bool -chunk_boot(void) -{ - - /* Set variables according to the value of opt_lg_chunk. */ - chunksize = (ZU(1) << opt_lg_chunk); - assert(chunksize >= PAGE); - chunksize_mask = chunksize - 1; - chunk_npages = (chunksize >> LG_PAGE); - - if (config_stats || config_prof) { - if (malloc_mutex_init(&chunks_mtx)) - return (true); - memset(&stats_chunks, 0, sizeof(chunk_stats_t)); - } - if (config_dss && chunk_dss_boot()) - return (true); - extent_tree_szad_new(&chunks_szad_mmap); - extent_tree_ad_new(&chunks_ad_mmap); - extent_tree_szad_new(&chunks_szad_dss); - extent_tree_ad_new(&chunks_ad_dss); - if (config_ivsalloc) { - chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - - opt_lg_chunk, base_alloc, NULL); - if (chunks_rtree == NULL) - return (true); - } - - return (false); -} - -void -chunk_prefork(void) -{ - - malloc_mutex_prefork(&chunks_mtx); - if (config_ivsalloc) - rtree_prefork(chunks_rtree); - chunk_dss_prefork(); -} - -void -chunk_postfork_parent(void) -{ - - chunk_dss_postfork_parent(); - if (config_ivsalloc) - rtree_postfork_parent(chunks_rtree); - malloc_mutex_postfork_parent(&chunks_mtx); -} - -void -chunk_postfork_child(void) -{ - - chunk_dss_postfork_child(); - if (config_ivsalloc) - rtree_postfork_child(chunks_rtree); - malloc_mutex_postfork_child(&chunks_mtx); -} diff --git a/lib/jemalloc-3.6.0/src/chunk_dss.c b/lib/jemalloc-3.6.0/src/chunk_dss.c deleted file mode 100644 index 510bb8b..0000000 --- a/lib/jemalloc-3.6.0/src/chunk_dss.c +++ /dev/null @@ -1,198 +0,0 @@ -#define JEMALLOC_CHUNK_DSS_C_ -#include "jemalloc/internal/jemalloc_internal.h" -/******************************************************************************/ -/* Data. */ - -const char *dss_prec_names[] = { - "disabled", - "primary", - "secondary", - "N/A" -}; - -/* Current dss precedence default, used when creating new arenas. */ -static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; - -/* - * Protects sbrk() calls. This avoids malloc races among threads, though it - * does not protect against races with threads that call sbrk() directly. - */ -static malloc_mutex_t dss_mtx; - -/* Base address of the DSS. */ -static void *dss_base; -/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ -static void *dss_prev; -/* Current upper limit on DSS addresses. */ -static void *dss_max; - -/******************************************************************************/ - -static void * -chunk_dss_sbrk(intptr_t increment) -{ - -#ifdef JEMALLOC_HAVE_SBRK - return (sbrk(increment)); -#else - not_implemented(); - return (NULL); -#endif -} - -dss_prec_t -chunk_dss_prec_get(void) -{ - dss_prec_t ret; - - if (config_dss == false) - return (dss_prec_disabled); - malloc_mutex_lock(&dss_mtx); - ret = dss_prec_default; - malloc_mutex_unlock(&dss_mtx); - return (ret); -} - -bool -chunk_dss_prec_set(dss_prec_t dss_prec) -{ - - if (config_dss == false) - return (true); - malloc_mutex_lock(&dss_mtx); - dss_prec_default = dss_prec; - malloc_mutex_unlock(&dss_mtx); - return (false); -} - -void * -chunk_alloc_dss(size_t size, size_t alignment, bool *zero) -{ - void *ret; - - cassert(config_dss); - assert(size > 0 && (size & chunksize_mask) == 0); - assert(alignment > 0 && (alignment & chunksize_mask) == 0); - - /* - * sbrk() uses a signed increment argument, so take care not to - * interpret a huge allocation request as a negative increment. - */ - if ((intptr_t)size < 0) - return (NULL); - - malloc_mutex_lock(&dss_mtx); - if (dss_prev != (void *)-1) { - size_t gap_size, cpad_size; - void *cpad, *dss_next; - intptr_t incr; - - /* - * The loop is necessary to recover from races with other - * threads that are using the DSS for something other than - * malloc. - */ - do { - /* Get the current end of the DSS. */ - dss_max = chunk_dss_sbrk(0); - /* - * Calculate how much padding is necessary to - * chunk-align the end of the DSS. - */ - gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & - chunksize_mask; - /* - * Compute how much chunk-aligned pad space (if any) is - * necessary to satisfy alignment. This space can be - * recycled for later use. - */ - cpad = (void *)((uintptr_t)dss_max + gap_size); - ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, - alignment); - cpad_size = (uintptr_t)ret - (uintptr_t)cpad; - dss_next = (void *)((uintptr_t)ret + size); - if ((uintptr_t)ret < (uintptr_t)dss_max || - (uintptr_t)dss_next < (uintptr_t)dss_max) { - /* Wrap-around. */ - malloc_mutex_unlock(&dss_mtx); - return (NULL); - } - incr = gap_size + cpad_size + size; - dss_prev = chunk_dss_sbrk(incr); - if (dss_prev == dss_max) { - /* Success. */ - dss_max = dss_next; - malloc_mutex_unlock(&dss_mtx); - if (cpad_size != 0) - chunk_unmap(cpad, cpad_size); - if (*zero) { - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - return (ret); - } - } while (dss_prev != (void *)-1); - } - malloc_mutex_unlock(&dss_mtx); - - return (NULL); -} - -bool -chunk_in_dss(void *chunk) -{ - bool ret; - - cassert(config_dss); - - malloc_mutex_lock(&dss_mtx); - if ((uintptr_t)chunk >= (uintptr_t)dss_base - && (uintptr_t)chunk < (uintptr_t)dss_max) - ret = true; - else - ret = false; - malloc_mutex_unlock(&dss_mtx); - - return (ret); -} - -bool -chunk_dss_boot(void) -{ - - cassert(config_dss); - - if (malloc_mutex_init(&dss_mtx)) - return (true); - dss_base = chunk_dss_sbrk(0); - dss_prev = dss_base; - dss_max = dss_base; - - return (false); -} - -void -chunk_dss_prefork(void) -{ - - if (config_dss) - malloc_mutex_prefork(&dss_mtx); -} - -void -chunk_dss_postfork_parent(void) -{ - - if (config_dss) - malloc_mutex_postfork_parent(&dss_mtx); -} - -void -chunk_dss_postfork_child(void) -{ - - if (config_dss) - malloc_mutex_postfork_child(&dss_mtx); -} - -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/src/chunk_mmap.c b/lib/jemalloc-3.6.0/src/chunk_mmap.c deleted file mode 100644 index 2056d79..0000000 --- a/lib/jemalloc-3.6.0/src/chunk_mmap.c +++ /dev/null @@ -1,210 +0,0 @@ -#define JEMALLOC_CHUNK_MMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void *pages_map(void *addr, size_t size); -static void pages_unmap(void *addr, size_t size); -static void *chunk_alloc_mmap_slow(size_t size, size_t alignment, - bool *zero); - -/******************************************************************************/ - -static void * -pages_map(void *addr, size_t size) -{ - void *ret; - - assert(size != 0); - -#ifdef _WIN32 - /* - * If VirtualAlloc can't allocate at the given address when one is - * given, it fails and returns NULL. - */ - ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, - PAGE_READWRITE); -#else - /* - * We don't use MAP_FIXED here, because it can cause the *replacement* - * of existing mappings, and we only want to create new mappings. - */ - ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, - -1, 0); - assert(ret != NULL); - - if (ret == MAP_FAILED) - ret = NULL; - else if (addr != NULL && ret != addr) { - /* - * We succeeded in mapping memory, but not in the right place. - */ - if (munmap(ret, size) == -1) { - char buf[BUFERROR_BUF]; - - buferror(get_errno(), buf, sizeof(buf)); - malloc_printf(": Error in " -#ifdef _WIN32 - "VirtualFree" -#else - "munmap" -#endif - "(): %s\n", buf); - if (opt_abort) - abort(); - } -} - -static void * -pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) -{ - void *ret = (void *)((uintptr_t)addr + leadsize); - - assert(alloc_size >= leadsize + size); -#ifdef _WIN32 - { - void *new_addr; - - pages_unmap(addr, alloc_size); - new_addr = pages_map(ret, size); - if (new_addr == ret) - return (ret); - if (new_addr) - pages_unmap(new_addr, size); - return (NULL); - } -#else - { - size_t trailsize = alloc_size - leadsize - size; - - if (leadsize != 0) - pages_unmap(addr, leadsize); - if (trailsize != 0) - pages_unmap((void *)((uintptr_t)ret + size), trailsize); - return (ret); - } -#endif -} - -bool -pages_purge(void *addr, size_t length) -{ - bool unzeroed; - -#ifdef _WIN32 - VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE); - unzeroed = true; -#else -# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED -# define JEMALLOC_MADV_PURGE MADV_DONTNEED -# define JEMALLOC_MADV_ZEROS true -# elif defined(JEMALLOC_PURGE_MADVISE_FREE) -# define JEMALLOC_MADV_PURGE MADV_FREE -# define JEMALLOC_MADV_ZEROS false -# else -# error "No method defined for purging unused dirty pages." -# endif - int err = madvise(addr, length, JEMALLOC_MADV_PURGE); - unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0); -# undef JEMALLOC_MADV_PURGE -# undef JEMALLOC_MADV_ZEROS -#endif - return (unzeroed); -} - -static void * -chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero) -{ - void *ret, *pages; - size_t alloc_size, leadsize; - - alloc_size = size + alignment - PAGE; - /* Beware size_t wrap-around. */ - if (alloc_size < size) - return (NULL); - do { - pages = pages_map(NULL, alloc_size); - if (pages == NULL) - return (NULL); - leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - - (uintptr_t)pages; - ret = pages_trim(pages, alloc_size, leadsize, size); - } while (ret == NULL); - - assert(ret != NULL); - *zero = true; - return (ret); -} - -void * -chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) -{ - void *ret; - size_t offset; - - /* - * Ideally, there would be a way to specify alignment to mmap() (like - * NetBSD has), but in the absence of such a feature, we have to work - * hard to efficiently create aligned mappings. The reliable, but - * slow method is to create a mapping that is over-sized, then trim the - * excess. However, that always results in one or two calls to - * pages_unmap(). - * - * Optimistically try mapping precisely the right amount before falling - * back to the slow method, with the expectation that the optimistic - * approach works most of the time. - */ - - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - ret = pages_map(NULL, size); - if (ret == NULL) - return (NULL); - offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); - if (offset != 0) { - pages_unmap(ret, size); - return (chunk_alloc_mmap_slow(size, alignment, zero)); - } - - assert(ret != NULL); - *zero = true; - return (ret); -} - -bool -chunk_dealloc_mmap(void *chunk, size_t size) -{ - - if (config_munmap) - pages_unmap(chunk, size); - - return (config_munmap == false); -} diff --git a/lib/jemalloc-3.6.0/src/ckh.c b/lib/jemalloc-3.6.0/src/ckh.c deleted file mode 100644 index 04c5296..0000000 --- a/lib/jemalloc-3.6.0/src/ckh.c +++ /dev/null @@ -1,563 +0,0 @@ -/* - ******************************************************************************* - * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each - * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash - * functions are employed. The original cuckoo hashing algorithm was described - * in: - * - * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms - * 51(2):122-144. - * - * Generalization of cuckoo hashing was discussed in: - * - * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical - * alternative to traditional hash tables. In Proceedings of the 7th - * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, - * January 2006. - * - * This implementation uses precisely two hash functions because that is the - * fewest that can work, and supporting multiple hashes is an implementation - * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) - * that shows approximate expected maximum load factors for various - * configurations: - * - * | #cells/bucket | - * #hashes | 1 | 2 | 4 | 8 | - * --------+-------+-------+-------+-------+ - * 1 | 0.006 | 0.006 | 0.03 | 0.12 | - * 2 | 0.49 | 0.86 |>0.93< |>0.96< | - * 3 | 0.91 | 0.97 | 0.98 | 0.999 | - * 4 | 0.97 | 0.99 | 0.999 | | - * - * The number of cells per bucket is chosen such that a bucket fits in one cache - * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, - * respectively. - * - ******************************************************************************/ -#define JEMALLOC_CKH_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static bool ckh_grow(ckh_t *ckh); -static void ckh_shrink(ckh_t *ckh); - -/******************************************************************************/ - -/* - * Search bucket for key and return the cell number if found; SIZE_T_MAX - * otherwise. - */ -JEMALLOC_INLINE_C size_t -ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) -{ - ckhc_t *cell; - unsigned i; - - for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - if (cell->key != NULL && ckh->keycomp(key, cell->key)) - return ((bucket << LG_CKH_BUCKET_CELLS) + i); - } - - return (SIZE_T_MAX); -} - -/* - * Search table for key and return cell number if found; SIZE_T_MAX otherwise. - */ -JEMALLOC_INLINE_C size_t -ckh_isearch(ckh_t *ckh, const void *key) -{ - size_t hashes[2], bucket, cell; - - assert(ckh != NULL); - - ckh->hash(key, hashes); - - /* Search primary bucket. */ - bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - cell = ckh_bucket_search(ckh, bucket, key); - if (cell != SIZE_T_MAX) - return (cell); - - /* Search secondary bucket. */ - bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - cell = ckh_bucket_search(ckh, bucket, key); - return (cell); -} - -JEMALLOC_INLINE_C bool -ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, - const void *data) -{ - ckhc_t *cell; - unsigned offset, i; - - /* - * Cycle through the cells in the bucket, starting at a random position. - * The randomness avoids worst-case search overhead as buckets fill up. - */ - prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); - for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + - ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; - if (cell->key == NULL) { - cell->key = key; - cell->data = data; - ckh->count++; - return (false); - } - } - - return (true); -} - -/* - * No space is available in bucket. Randomly evict an item, then try to find an - * alternate location for that item. Iteratively repeat this - * eviction/relocation procedure until either success or detection of an - * eviction/relocation bucket cycle. - */ -JEMALLOC_INLINE_C bool -ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, - void const **argdata) -{ - const void *key, *data, *tkey, *tdata; - ckhc_t *cell; - size_t hashes[2], bucket, tbucket; - unsigned i; - - bucket = argbucket; - key = *argkey; - data = *argdata; - while (true) { - /* - * Choose a random item within the bucket to evict. This is - * critical to correct function, because without (eventually) - * evicting all items within a bucket during iteration, it - * would be possible to get stuck in an infinite loop if there - * were an item for which both hashes indicated the same - * bucket. - */ - prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - assert(cell->key != NULL); - - /* Swap cell->{key,data} and {key,data} (evict). */ - tkey = cell->key; tdata = cell->data; - cell->key = key; cell->data = data; - key = tkey; data = tdata; - -#ifdef CKH_COUNT - ckh->nrelocs++; -#endif - - /* Find the alternate bucket for the evicted item. */ - ckh->hash(key, hashes); - tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (tbucket == bucket) { - tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - - 1); - /* - * It may be that (tbucket == bucket) still, if the - * item's hashes both indicate this bucket. However, - * we are guaranteed to eventually escape this bucket - * during iteration, assuming pseudo-random item - * selection (true randomness would make infinite - * looping a remote possibility). The reason we can - * never get trapped forever is that there are two - * cases: - * - * 1) This bucket == argbucket, so we will quickly - * detect an eviction cycle and terminate. - * 2) An item was evicted to this bucket from another, - * which means that at least one item in this bucket - * has hashes that indicate distinct buckets. - */ - } - /* Check for a cycle. */ - if (tbucket == argbucket) { - *argkey = key; - *argdata = data; - return (true); - } - - bucket = tbucket; - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); - } -} - -JEMALLOC_INLINE_C bool -ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) -{ - size_t hashes[2], bucket; - const void *key = *argkey; - const void *data = *argdata; - - ckh->hash(key, hashes); - - /* Try to insert in primary bucket. */ - bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); - - /* Try to insert in secondary bucket. */ - bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); - - /* - * Try to find a place for this item via iterative eviction/relocation. - */ - return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); -} - -/* - * Try to rebuild the hash table from scratch by inserting all items from the - * old table into the new. - */ -JEMALLOC_INLINE_C bool -ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) -{ - size_t count, i, nins; - const void *key, *data; - - count = ckh->count; - ckh->count = 0; - for (i = nins = 0; nins < count; i++) { - if (aTab[i].key != NULL) { - key = aTab[i].key; - data = aTab[i].data; - if (ckh_try_insert(ckh, &key, &data)) { - ckh->count = count; - return (true); - } - nins++; - } - } - - return (false); -} - -static bool -ckh_grow(ckh_t *ckh) -{ - bool ret; - ckhc_t *tab, *ttab; - size_t lg_curcells; - unsigned lg_prevbuckets; - -#ifdef CKH_COUNT - ckh->ngrows++; -#endif - - /* - * It is possible (though unlikely, given well behaved hashes) that the - * table will have to be doubled more than once in order to create a - * usable table. - */ - lg_prevbuckets = ckh->lg_curbuckets; - lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; - while (true) { - size_t usize; - - lg_curcells++; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) { - ret = true; - goto label_return; - } - tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); - if (tab == NULL) { - ret = true; - goto label_return; - } - /* Swap in new table. */ - ttab = ckh->tab; - ckh->tab = tab; - tab = ttab; - ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - - if (ckh_rebuild(ckh, tab) == false) { - idalloc(tab); - break; - } - - /* Rebuilding failed, so back out partially rebuilt table. */ - idalloc(ckh->tab); - ckh->tab = tab; - ckh->lg_curbuckets = lg_prevbuckets; - } - - ret = false; -label_return: - return (ret); -} - -static void -ckh_shrink(ckh_t *ckh) -{ - ckhc_t *tab, *ttab; - size_t lg_curcells, usize; - unsigned lg_prevbuckets; - - /* - * It is possible (though unlikely, given well behaved hashes) that the - * table rebuild will fail. - */ - lg_prevbuckets = ckh->lg_curbuckets; - lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) - return; - tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); - if (tab == NULL) { - /* - * An OOM error isn't worth propagating, since it doesn't - * prevent this or future operations from proceeding. - */ - return; - } - /* Swap in new table. */ - ttab = ckh->tab; - ckh->tab = tab; - tab = ttab; - ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - - if (ckh_rebuild(ckh, tab) == false) { - idalloc(tab); -#ifdef CKH_COUNT - ckh->nshrinks++; -#endif - return; - } - - /* Rebuilding failed, so back out partially rebuilt table. */ - idalloc(ckh->tab); - ckh->tab = tab; - ckh->lg_curbuckets = lg_prevbuckets; -#ifdef CKH_COUNT - ckh->nshrinkfails++; -#endif -} - -bool -ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) -{ - bool ret; - size_t mincells, usize; - unsigned lg_mincells; - - assert(minitems > 0); - assert(hash != NULL); - assert(keycomp != NULL); - -#ifdef CKH_COUNT - ckh->ngrows = 0; - ckh->nshrinks = 0; - ckh->nshrinkfails = 0; - ckh->ninserts = 0; - ckh->nrelocs = 0; -#endif - ckh->prng_state = 42; /* Value doesn't really matter. */ - ckh->count = 0; - - /* - * Find the minimum power of 2 that is large enough to fit aBaseCount - * entries. We are using (2+,2) cuckoo hashing, which has an expected - * maximum load factor of at least ~0.86, so 0.75 is a conservative load - * factor that will typically allow 2^aLgMinItems to fit without ever - * growing the table. - */ - assert(LG_CKH_BUCKET_CELLS > 0); - mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; - for (lg_mincells = LG_CKH_BUCKET_CELLS; - (ZU(1) << lg_mincells) < mincells; - lg_mincells++) - ; /* Do nothing. */ - ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; - ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; - ckh->hash = hash; - ckh->keycomp = keycomp; - - usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); - if (usize == 0) { - ret = true; - goto label_return; - } - ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); - if (ckh->tab == NULL) { - ret = true; - goto label_return; - } - - ret = false; -label_return: - return (ret); -} - -void -ckh_delete(ckh_t *ckh) -{ - - assert(ckh != NULL); - -#ifdef CKH_VERBOSE - malloc_printf( - "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64"," - " nshrinkfails: %"PRIu64", ninserts: %"PRIu64"," - " nrelocs: %"PRIu64"\n", __func__, ckh, - (unsigned long long)ckh->ngrows, - (unsigned long long)ckh->nshrinks, - (unsigned long long)ckh->nshrinkfails, - (unsigned long long)ckh->ninserts, - (unsigned long long)ckh->nrelocs); -#endif - - idalloc(ckh->tab); - if (config_debug) - memset(ckh, 0x5a, sizeof(ckh_t)); -} - -size_t -ckh_count(ckh_t *ckh) -{ - - assert(ckh != NULL); - - return (ckh->count); -} - -bool -ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) -{ - size_t i, ncells; - - for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + - LG_CKH_BUCKET_CELLS)); i < ncells; i++) { - if (ckh->tab[i].key != NULL) { - if (key != NULL) - *key = (void *)ckh->tab[i].key; - if (data != NULL) - *data = (void *)ckh->tab[i].data; - *tabind = i + 1; - return (false); - } - } - - return (true); -} - -bool -ckh_insert(ckh_t *ckh, const void *key, const void *data) -{ - bool ret; - - assert(ckh != NULL); - assert(ckh_search(ckh, key, NULL, NULL)); - -#ifdef CKH_COUNT - ckh->ninserts++; -#endif - - while (ckh_try_insert(ckh, &key, &data)) { - if (ckh_grow(ckh)) { - ret = true; - goto label_return; - } - } - - ret = false; -label_return: - return (ret); -} - -bool -ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ - size_t cell; - - assert(ckh != NULL); - - cell = ckh_isearch(ckh, searchkey); - if (cell != SIZE_T_MAX) { - if (key != NULL) - *key = (void *)ckh->tab[cell].key; - if (data != NULL) - *data = (void *)ckh->tab[cell].data; - ckh->tab[cell].key = NULL; - ckh->tab[cell].data = NULL; /* Not necessary. */ - - ckh->count--; - /* Try to halve the table if it is less than 1/4 full. */ - if (ckh->count < (ZU(1) << (ckh->lg_curbuckets - + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets - > ckh->lg_minbuckets) { - /* Ignore error due to OOM. */ - ckh_shrink(ckh); - } - - return (false); - } - - return (true); -} - -bool -ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ - size_t cell; - - assert(ckh != NULL); - - cell = ckh_isearch(ckh, searchkey); - if (cell != SIZE_T_MAX) { - if (key != NULL) - *key = (void *)ckh->tab[cell].key; - if (data != NULL) - *data = (void *)ckh->tab[cell].data; - return (false); - } - - return (true); -} - -void -ckh_string_hash(const void *key, size_t r_hash[2]) -{ - - hash(key, strlen((const char *)key), 0x94122f33U, r_hash); -} - -bool -ckh_string_keycomp(const void *k1, const void *k2) -{ - - assert(k1 != NULL); - assert(k2 != NULL); - - return (strcmp((char *)k1, (char *)k2) ? false : true); -} - -void -ckh_pointer_hash(const void *key, size_t r_hash[2]) -{ - union { - const void *v; - size_t i; - } u; - - assert(sizeof(u.v) == sizeof(u.i)); - u.v = key; - hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); -} - -bool -ckh_pointer_keycomp(const void *k1, const void *k2) -{ - - return ((k1 == k2) ? true : false); -} diff --git a/lib/jemalloc-3.6.0/src/ctl.c b/lib/jemalloc-3.6.0/src/ctl.c deleted file mode 100644 index cc2c5ae..0000000 --- a/lib/jemalloc-3.6.0/src/ctl.c +++ /dev/null @@ -1,1684 +0,0 @@ -#define JEMALLOC_CTL_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -/* - * ctl_mtx protects the following: - * - ctl_stats.* - * - opt_prof_active - */ -static malloc_mutex_t ctl_mtx; -static bool ctl_initialized; -static uint64_t ctl_epoch; -static ctl_stats_t ctl_stats; - -/******************************************************************************/ -/* Helpers for named and indexed nodes. */ - -static inline const ctl_named_node_t * -ctl_named_node(const ctl_node_t *node) -{ - - return ((node->named) ? (const ctl_named_node_t *)node : NULL); -} - -static inline const ctl_named_node_t * -ctl_named_children(const ctl_named_node_t *node, int index) -{ - const ctl_named_node_t *children = ctl_named_node(node->children); - - return (children ? &children[index] : NULL); -} - -static inline const ctl_indexed_node_t * -ctl_indexed_node(const ctl_node_t *node) -{ - - return ((node->named == false) ? (const ctl_indexed_node_t *)node : - NULL); -} - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -#define CTL_PROTO(n) \ -static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen); - -#define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(const size_t *mib, \ - size_t miblen, size_t i); - -static bool ctl_arena_init(ctl_arena_stats_t *astats); -static void ctl_arena_clear(ctl_arena_stats_t *astats); -static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, - arena_t *arena); -static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, - ctl_arena_stats_t *astats); -static void ctl_arena_refresh(arena_t *arena, unsigned i); -static bool ctl_grow(void); -static void ctl_refresh(void); -static bool ctl_init(void); -static int ctl_lookup(const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp); - -CTL_PROTO(version) -CTL_PROTO(epoch) -CTL_PROTO(thread_tcache_enabled) -CTL_PROTO(thread_tcache_flush) -CTL_PROTO(thread_arena) -CTL_PROTO(thread_allocated) -CTL_PROTO(thread_allocatedp) -CTL_PROTO(thread_deallocated) -CTL_PROTO(thread_deallocatedp) -CTL_PROTO(config_debug) -CTL_PROTO(config_dss) -CTL_PROTO(config_fill) -CTL_PROTO(config_lazy_lock) -CTL_PROTO(config_mremap) -CTL_PROTO(config_munmap) -CTL_PROTO(config_prof) -CTL_PROTO(config_prof_libgcc) -CTL_PROTO(config_prof_libunwind) -CTL_PROTO(config_stats) -CTL_PROTO(config_tcache) -CTL_PROTO(config_tls) -CTL_PROTO(config_utrace) -CTL_PROTO(config_valgrind) -CTL_PROTO(config_xmalloc) -CTL_PROTO(opt_abort) -CTL_PROTO(opt_dss) -CTL_PROTO(opt_lg_chunk) -CTL_PROTO(opt_narenas) -CTL_PROTO(opt_lg_dirty_mult) -CTL_PROTO(opt_stats_print) -CTL_PROTO(opt_junk) -CTL_PROTO(opt_zero) -CTL_PROTO(opt_quarantine) -CTL_PROTO(opt_redzone) -CTL_PROTO(opt_utrace) -CTL_PROTO(opt_valgrind) -CTL_PROTO(opt_xmalloc) -CTL_PROTO(opt_tcache) -CTL_PROTO(opt_lg_tcache_max) -CTL_PROTO(opt_prof) -CTL_PROTO(opt_prof_prefix) -CTL_PROTO(opt_prof_active) -CTL_PROTO(opt_lg_prof_sample) -CTL_PROTO(opt_lg_prof_interval) -CTL_PROTO(opt_prof_gdump) -CTL_PROTO(opt_prof_final) -CTL_PROTO(opt_prof_leak) -CTL_PROTO(opt_prof_accum) -CTL_PROTO(arena_i_purge) -static void arena_purge(unsigned arena_ind); -CTL_PROTO(arena_i_dss) -INDEX_PROTO(arena_i) -CTL_PROTO(arenas_bin_i_size) -CTL_PROTO(arenas_bin_i_nregs) -CTL_PROTO(arenas_bin_i_run_size) -INDEX_PROTO(arenas_bin_i) -CTL_PROTO(arenas_lrun_i_size) -INDEX_PROTO(arenas_lrun_i) -CTL_PROTO(arenas_narenas) -CTL_PROTO(arenas_initialized) -CTL_PROTO(arenas_quantum) -CTL_PROTO(arenas_page) -CTL_PROTO(arenas_tcache_max) -CTL_PROTO(arenas_nbins) -CTL_PROTO(arenas_nhbins) -CTL_PROTO(arenas_nlruns) -CTL_PROTO(arenas_purge) -CTL_PROTO(arenas_extend) -CTL_PROTO(prof_active) -CTL_PROTO(prof_dump) -CTL_PROTO(prof_interval) -CTL_PROTO(stats_chunks_current) -CTL_PROTO(stats_chunks_total) -CTL_PROTO(stats_chunks_high) -CTL_PROTO(stats_huge_allocated) -CTL_PROTO(stats_huge_nmalloc) -CTL_PROTO(stats_huge_ndalloc) -CTL_PROTO(stats_arenas_i_small_allocated) -CTL_PROTO(stats_arenas_i_small_nmalloc) -CTL_PROTO(stats_arenas_i_small_ndalloc) -CTL_PROTO(stats_arenas_i_small_nrequests) -CTL_PROTO(stats_arenas_i_large_allocated) -CTL_PROTO(stats_arenas_i_large_nmalloc) -CTL_PROTO(stats_arenas_i_large_ndalloc) -CTL_PROTO(stats_arenas_i_large_nrequests) -CTL_PROTO(stats_arenas_i_bins_j_allocated) -CTL_PROTO(stats_arenas_i_bins_j_nmalloc) -CTL_PROTO(stats_arenas_i_bins_j_ndalloc) -CTL_PROTO(stats_arenas_i_bins_j_nrequests) -CTL_PROTO(stats_arenas_i_bins_j_nfills) -CTL_PROTO(stats_arenas_i_bins_j_nflushes) -CTL_PROTO(stats_arenas_i_bins_j_nruns) -CTL_PROTO(stats_arenas_i_bins_j_nreruns) -CTL_PROTO(stats_arenas_i_bins_j_curruns) -INDEX_PROTO(stats_arenas_i_bins_j) -CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) -CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) -CTL_PROTO(stats_arenas_i_lruns_j_nrequests) -CTL_PROTO(stats_arenas_i_lruns_j_curruns) -INDEX_PROTO(stats_arenas_i_lruns_j) -CTL_PROTO(stats_arenas_i_nthreads) -CTL_PROTO(stats_arenas_i_dss) -CTL_PROTO(stats_arenas_i_pactive) -CTL_PROTO(stats_arenas_i_pdirty) -CTL_PROTO(stats_arenas_i_mapped) -CTL_PROTO(stats_arenas_i_npurge) -CTL_PROTO(stats_arenas_i_nmadvise) -CTL_PROTO(stats_arenas_i_purged) -INDEX_PROTO(stats_arenas_i) -CTL_PROTO(stats_cactive) -CTL_PROTO(stats_allocated) -CTL_PROTO(stats_active) -CTL_PROTO(stats_mapped) - -/******************************************************************************/ -/* mallctl tree. */ - -/* Maximum tree depth. */ -#define CTL_MAX_DEPTH 6 - -#define NAME(n) {true}, n -#define CHILD(t, c) \ - sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ - (ctl_node_t *)c##_node, \ - NULL -#define CTL(c) 0, NULL, c##_ctl - -/* - * Only handles internal indexed nodes, since there are currently no external - * ones. - */ -#define INDEX(i) {false}, i##_index - -static const ctl_named_node_t tcache_node[] = { - {NAME("enabled"), CTL(thread_tcache_enabled)}, - {NAME("flush"), CTL(thread_tcache_flush)} -}; - -static const ctl_named_node_t thread_node[] = { - {NAME("arena"), CTL(thread_arena)}, - {NAME("allocated"), CTL(thread_allocated)}, - {NAME("allocatedp"), CTL(thread_allocatedp)}, - {NAME("deallocated"), CTL(thread_deallocated)}, - {NAME("deallocatedp"), CTL(thread_deallocatedp)}, - {NAME("tcache"), CHILD(named, tcache)} -}; - -static const ctl_named_node_t config_node[] = { - {NAME("debug"), CTL(config_debug)}, - {NAME("dss"), CTL(config_dss)}, - {NAME("fill"), CTL(config_fill)}, - {NAME("lazy_lock"), CTL(config_lazy_lock)}, - {NAME("mremap"), CTL(config_mremap)}, - {NAME("munmap"), CTL(config_munmap)}, - {NAME("prof"), CTL(config_prof)}, - {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, - {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, - {NAME("stats"), CTL(config_stats)}, - {NAME("tcache"), CTL(config_tcache)}, - {NAME("tls"), CTL(config_tls)}, - {NAME("utrace"), CTL(config_utrace)}, - {NAME("valgrind"), CTL(config_valgrind)}, - {NAME("xmalloc"), CTL(config_xmalloc)} -}; - -static const ctl_named_node_t opt_node[] = { - {NAME("abort"), CTL(opt_abort)}, - {NAME("dss"), CTL(opt_dss)}, - {NAME("lg_chunk"), CTL(opt_lg_chunk)}, - {NAME("narenas"), CTL(opt_narenas)}, - {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, - {NAME("stats_print"), CTL(opt_stats_print)}, - {NAME("junk"), CTL(opt_junk)}, - {NAME("zero"), CTL(opt_zero)}, - {NAME("quarantine"), CTL(opt_quarantine)}, - {NAME("redzone"), CTL(opt_redzone)}, - {NAME("utrace"), CTL(opt_utrace)}, - {NAME("valgrind"), CTL(opt_valgrind)}, - {NAME("xmalloc"), CTL(opt_xmalloc)}, - {NAME("tcache"), CTL(opt_tcache)}, - {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, - {NAME("prof"), CTL(opt_prof)}, - {NAME("prof_prefix"), CTL(opt_prof_prefix)}, - {NAME("prof_active"), CTL(opt_prof_active)}, - {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, - {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, - {NAME("prof_gdump"), CTL(opt_prof_gdump)}, - {NAME("prof_final"), CTL(opt_prof_final)}, - {NAME("prof_leak"), CTL(opt_prof_leak)}, - {NAME("prof_accum"), CTL(opt_prof_accum)} -}; - -static const ctl_named_node_t arena_i_node[] = { - {NAME("purge"), CTL(arena_i_purge)}, - {NAME("dss"), CTL(arena_i_dss)} -}; -static const ctl_named_node_t super_arena_i_node[] = { - {NAME(""), CHILD(named, arena_i)} -}; - -static const ctl_indexed_node_t arena_node[] = { - {INDEX(arena_i)} -}; - -static const ctl_named_node_t arenas_bin_i_node[] = { - {NAME("size"), CTL(arenas_bin_i_size)}, - {NAME("nregs"), CTL(arenas_bin_i_nregs)}, - {NAME("run_size"), CTL(arenas_bin_i_run_size)} -}; -static const ctl_named_node_t super_arenas_bin_i_node[] = { - {NAME(""), CHILD(named, arenas_bin_i)} -}; - -static const ctl_indexed_node_t arenas_bin_node[] = { - {INDEX(arenas_bin_i)} -}; - -static const ctl_named_node_t arenas_lrun_i_node[] = { - {NAME("size"), CTL(arenas_lrun_i_size)} -}; -static const ctl_named_node_t super_arenas_lrun_i_node[] = { - {NAME(""), CHILD(named, arenas_lrun_i)} -}; - -static const ctl_indexed_node_t arenas_lrun_node[] = { - {INDEX(arenas_lrun_i)} -}; - -static const ctl_named_node_t arenas_node[] = { - {NAME("narenas"), CTL(arenas_narenas)}, - {NAME("initialized"), CTL(arenas_initialized)}, - {NAME("quantum"), CTL(arenas_quantum)}, - {NAME("page"), CTL(arenas_page)}, - {NAME("tcache_max"), CTL(arenas_tcache_max)}, - {NAME("nbins"), CTL(arenas_nbins)}, - {NAME("nhbins"), CTL(arenas_nhbins)}, - {NAME("bin"), CHILD(indexed, arenas_bin)}, - {NAME("nlruns"), CTL(arenas_nlruns)}, - {NAME("lrun"), CHILD(indexed, arenas_lrun)}, - {NAME("purge"), CTL(arenas_purge)}, - {NAME("extend"), CTL(arenas_extend)} -}; - -static const ctl_named_node_t prof_node[] = { - {NAME("active"), CTL(prof_active)}, - {NAME("dump"), CTL(prof_dump)}, - {NAME("interval"), CTL(prof_interval)} -}; - -static const ctl_named_node_t stats_chunks_node[] = { - {NAME("current"), CTL(stats_chunks_current)}, - {NAME("total"), CTL(stats_chunks_total)}, - {NAME("high"), CTL(stats_chunks_high)} -}; - -static const ctl_named_node_t stats_huge_node[] = { - {NAME("allocated"), CTL(stats_huge_allocated)}, - {NAME("nmalloc"), CTL(stats_huge_nmalloc)}, - {NAME("ndalloc"), CTL(stats_huge_ndalloc)} -}; - -static const ctl_named_node_t stats_arenas_i_small_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} -}; - -static const ctl_named_node_t stats_arenas_i_large_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} -}; - -static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, - {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, - {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, - {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, - {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, - {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} -}; -static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_bins_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { - {INDEX(stats_arenas_i_bins_j)} -}; - -static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, - {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} -}; -static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { - {INDEX(stats_arenas_i_lruns_j)} -}; - -static const ctl_named_node_t stats_arenas_i_node[] = { - {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, - {NAME("dss"), CTL(stats_arenas_i_dss)}, - {NAME("pactive"), CTL(stats_arenas_i_pactive)}, - {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, - {NAME("mapped"), CTL(stats_arenas_i_mapped)}, - {NAME("npurge"), CTL(stats_arenas_i_npurge)}, - {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, - {NAME("purged"), CTL(stats_arenas_i_purged)}, - {NAME("small"), CHILD(named, stats_arenas_i_small)}, - {NAME("large"), CHILD(named, stats_arenas_i_large)}, - {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, - {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)} -}; -static const ctl_named_node_t super_stats_arenas_i_node[] = { - {NAME(""), CHILD(named, stats_arenas_i)} -}; - -static const ctl_indexed_node_t stats_arenas_node[] = { - {INDEX(stats_arenas_i)} -}; - -static const ctl_named_node_t stats_node[] = { - {NAME("cactive"), CTL(stats_cactive)}, - {NAME("allocated"), CTL(stats_allocated)}, - {NAME("active"), CTL(stats_active)}, - {NAME("mapped"), CTL(stats_mapped)}, - {NAME("chunks"), CHILD(named, stats_chunks)}, - {NAME("huge"), CHILD(named, stats_huge)}, - {NAME("arenas"), CHILD(indexed, stats_arenas)} -}; - -static const ctl_named_node_t root_node[] = { - {NAME("version"), CTL(version)}, - {NAME("epoch"), CTL(epoch)}, - {NAME("thread"), CHILD(named, thread)}, - {NAME("config"), CHILD(named, config)}, - {NAME("opt"), CHILD(named, opt)}, - {NAME("arena"), CHILD(indexed, arena)}, - {NAME("arenas"), CHILD(named, arenas)}, - {NAME("prof"), CHILD(named, prof)}, - {NAME("stats"), CHILD(named, stats)} -}; -static const ctl_named_node_t super_root_node[] = { - {NAME(""), CHILD(named, root)} -}; - -#undef NAME -#undef CHILD -#undef CTL -#undef INDEX - -/******************************************************************************/ - -static bool -ctl_arena_init(ctl_arena_stats_t *astats) -{ - - if (astats->lstats == NULL) { - astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (astats->lstats == NULL) - return (true); - } - - return (false); -} - -static void -ctl_arena_clear(ctl_arena_stats_t *astats) -{ - - astats->dss = dss_prec_names[dss_prec_limit]; - astats->pactive = 0; - astats->pdirty = 0; - if (config_stats) { - memset(&astats->astats, 0, sizeof(arena_stats_t)); - astats->allocated_small = 0; - astats->nmalloc_small = 0; - astats->ndalloc_small = 0; - astats->nrequests_small = 0; - memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); - memset(astats->lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - } -} - -static void -ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) -{ - unsigned i; - - arena_stats_merge(arena, &cstats->dss, &cstats->pactive, - &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats); - - for (i = 0; i < NBINS; i++) { - cstats->allocated_small += cstats->bstats[i].allocated; - cstats->nmalloc_small += cstats->bstats[i].nmalloc; - cstats->ndalloc_small += cstats->bstats[i].ndalloc; - cstats->nrequests_small += cstats->bstats[i].nrequests; - } -} - -static void -ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) -{ - unsigned i; - - sstats->pactive += astats->pactive; - sstats->pdirty += astats->pdirty; - - sstats->astats.mapped += astats->astats.mapped; - sstats->astats.npurge += astats->astats.npurge; - sstats->astats.nmadvise += astats->astats.nmadvise; - sstats->astats.purged += astats->astats.purged; - - sstats->allocated_small += astats->allocated_small; - sstats->nmalloc_small += astats->nmalloc_small; - sstats->ndalloc_small += astats->ndalloc_small; - sstats->nrequests_small += astats->nrequests_small; - - sstats->astats.allocated_large += astats->astats.allocated_large; - sstats->astats.nmalloc_large += astats->astats.nmalloc_large; - sstats->astats.ndalloc_large += astats->astats.ndalloc_large; - sstats->astats.nrequests_large += astats->astats.nrequests_large; - - for (i = 0; i < nlclasses; i++) { - sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; - sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; - sstats->lstats[i].nrequests += astats->lstats[i].nrequests; - sstats->lstats[i].curruns += astats->lstats[i].curruns; - } - - for (i = 0; i < NBINS; i++) { - sstats->bstats[i].allocated += astats->bstats[i].allocated; - sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; - sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; - sstats->bstats[i].nrequests += astats->bstats[i].nrequests; - if (config_tcache) { - sstats->bstats[i].nfills += astats->bstats[i].nfills; - sstats->bstats[i].nflushes += - astats->bstats[i].nflushes; - } - sstats->bstats[i].nruns += astats->bstats[i].nruns; - sstats->bstats[i].reruns += astats->bstats[i].reruns; - sstats->bstats[i].curruns += astats->bstats[i].curruns; - } -} - -static void -ctl_arena_refresh(arena_t *arena, unsigned i) -{ - ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; - ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; - - ctl_arena_clear(astats); - - sstats->nthreads += astats->nthreads; - if (config_stats) { - ctl_arena_stats_amerge(astats, arena); - /* Merge into sum stats as well. */ - ctl_arena_stats_smerge(sstats, astats); - } else { - astats->pactive += arena->nactive; - astats->pdirty += arena->ndirty; - /* Merge into sum stats as well. */ - sstats->pactive += arena->nactive; - sstats->pdirty += arena->ndirty; - } -} - -static bool -ctl_grow(void) -{ - ctl_arena_stats_t *astats; - arena_t **tarenas; - - /* Allocate extended arena stats and arenas arrays. */ - astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) * - sizeof(ctl_arena_stats_t)); - if (astats == NULL) - return (true); - tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) * - sizeof(arena_t *)); - if (tarenas == NULL) { - idalloc(astats); - return (true); - } - - /* Initialize the new astats element. */ - memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); - if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { - idalloc(tarenas); - idalloc(astats); - return (true); - } - /* Swap merged stats to their new location. */ - { - ctl_arena_stats_t tstats; - memcpy(&tstats, &astats[ctl_stats.narenas], - sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas], - &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas + 1], &tstats, - sizeof(ctl_arena_stats_t)); - } - /* Initialize the new arenas element. */ - tarenas[ctl_stats.narenas] = NULL; - { - arena_t **arenas_old = arenas; - /* - * Swap extended arenas array into place. Although ctl_mtx - * protects this function from other threads extending the - * array, it does not protect from other threads mutating it - * (i.e. initializing arenas and setting array elements to - * point to them). Therefore, array copying must happen under - * the protection of arenas_lock. - */ - malloc_mutex_lock(&arenas_lock); - arenas = tarenas; - memcpy(arenas, arenas_old, ctl_stats.narenas * - sizeof(arena_t *)); - narenas_total++; - arenas_extend(narenas_total - 1); - malloc_mutex_unlock(&arenas_lock); - /* - * Deallocate arenas_old only if it came from imalloc() (not - * base_alloc()). - */ - if (ctl_stats.narenas != narenas_auto) - idalloc(arenas_old); - } - ctl_stats.arenas = astats; - ctl_stats.narenas++; - - return (false); -} - -static void -ctl_refresh(void) -{ - unsigned i; - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); - - if (config_stats) { - malloc_mutex_lock(&chunks_mtx); - ctl_stats.chunks.current = stats_chunks.curchunks; - ctl_stats.chunks.total = stats_chunks.nchunks; - ctl_stats.chunks.high = stats_chunks.highchunks; - malloc_mutex_unlock(&chunks_mtx); - - malloc_mutex_lock(&huge_mtx); - ctl_stats.huge.allocated = huge_allocated; - ctl_stats.huge.nmalloc = huge_nmalloc; - ctl_stats.huge.ndalloc = huge_ndalloc; - malloc_mutex_unlock(&huge_mtx); - } - - /* - * Clear sum stats, since they will be merged into by - * ctl_arena_refresh(). - */ - ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; - ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); - - malloc_mutex_lock(&arenas_lock); - memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); - for (i = 0; i < ctl_stats.narenas; i++) { - if (arenas[i] != NULL) - ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; - else - ctl_stats.arenas[i].nthreads = 0; - } - malloc_mutex_unlock(&arenas_lock); - for (i = 0; i < ctl_stats.narenas; i++) { - bool initialized = (tarenas[i] != NULL); - - ctl_stats.arenas[i].initialized = initialized; - if (initialized) - ctl_arena_refresh(tarenas[i], i); - } - - if (config_stats) { - ctl_stats.allocated = - ctl_stats.arenas[ctl_stats.narenas].allocated_small - + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large - + ctl_stats.huge.allocated; - ctl_stats.active = - (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE) - + ctl_stats.huge.allocated; - ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); - } - - ctl_epoch++; -} - -static bool -ctl_init(void) -{ - bool ret; - - malloc_mutex_lock(&ctl_mtx); - if (ctl_initialized == false) { - /* - * Allocate space for one extra arena stats element, which - * contains summed stats across all arenas. - */ - assert(narenas_auto == narenas_total_get()); - ctl_stats.narenas = narenas_auto; - ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( - (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); - if (ctl_stats.arenas == NULL) { - ret = true; - goto label_return; - } - memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - - /* - * Initialize all stats structures, regardless of whether they - * ever get used. Lazy initialization would allow errors to - * cause inconsistent state to be viewable by the application. - */ - if (config_stats) { - unsigned i; - for (i = 0; i <= ctl_stats.narenas; i++) { - if (ctl_arena_init(&ctl_stats.arenas[i])) { - ret = true; - goto label_return; - } - } - } - ctl_stats.arenas[ctl_stats.narenas].initialized = true; - - ctl_epoch = 0; - ctl_refresh(); - ctl_initialized = true; - } - - ret = false; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, - size_t *depthp) -{ - int ret; - const char *elm, *tdot, *dot; - size_t elen, i, j; - const ctl_named_node_t *node; - - elm = name; - /* Equivalent to strchrnul(). */ - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); - elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); - if (elen == 0) { - ret = ENOENT; - goto label_return; - } - node = super_root_node; - for (i = 0; i < *depthp; i++) { - assert(node); - assert(node->nchildren > 0); - if (ctl_named_node(node->children) != NULL) { - const ctl_named_node_t *pnode = node; - - /* Children are named. */ - for (j = 0; j < node->nchildren; j++) { - const ctl_named_node_t *child = - ctl_named_children(node, j); - if (strlen(child->name) == elen && - strncmp(elm, child->name, elen) == 0) { - node = child; - if (nodesp != NULL) - nodesp[i] = - (const ctl_node_t *)node; - mibp[i] = j; - break; - } - } - if (node == pnode) { - ret = ENOENT; - goto label_return; - } - } else { - uintmax_t index; - const ctl_indexed_node_t *inode; - - /* Children are indexed. */ - index = malloc_strtoumax(elm, NULL, 10); - if (index == UINTMAX_MAX || index > SIZE_T_MAX) { - ret = ENOENT; - goto label_return; - } - - inode = ctl_indexed_node(node->children); - node = inode->index(mibp, *depthp, (size_t)index); - if (node == NULL) { - ret = ENOENT; - goto label_return; - } - - if (nodesp != NULL) - nodesp[i] = (const ctl_node_t *)node; - mibp[i] = (size_t)index; - } - - if (node->ctl != NULL) { - /* Terminal node. */ - if (*dot != '\0') { - /* - * The name contains more elements than are - * in this path through the tree. - */ - ret = ENOENT; - goto label_return; - } - /* Complete lookup successful. */ - *depthp = i + 1; - break; - } - - /* Update elm. */ - if (*dot == '\0') { - /* No more elements. */ - ret = ENOENT; - goto label_return; - } - elm = &dot[1]; - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : - strchr(elm, '\0'); - elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); - } - - ret = 0; -label_return: - return (ret); -} - -int -ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ - int ret; - size_t depth; - ctl_node_t const *nodes[CTL_MAX_DEPTH]; - size_t mib[CTL_MAX_DEPTH]; - const ctl_named_node_t *node; - - if (ctl_initialized == false && ctl_init()) { - ret = EAGAIN; - goto label_return; - } - - depth = CTL_MAX_DEPTH; - ret = ctl_lookup(name, nodes, mib, &depth); - if (ret != 0) - goto label_return; - - node = ctl_named_node(nodes[depth-1]); - if (node != NULL && node->ctl) - ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); - else { - /* The name refers to a partial path through the ctl tree. */ - ret = ENOENT; - } - -label_return: - return(ret); -} - -int -ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) -{ - int ret; - - if (ctl_initialized == false && ctl_init()) { - ret = EAGAIN; - goto label_return; - } - - ret = ctl_lookup(name, NULL, mibp, miblenp); -label_return: - return(ret); -} - -int -ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - const ctl_named_node_t *node; - size_t i; - - if (ctl_initialized == false && ctl_init()) { - ret = EAGAIN; - goto label_return; - } - - /* Iterate down the tree. */ - node = super_root_node; - for (i = 0; i < miblen; i++) { - assert(node); - assert(node->nchildren > 0); - if (ctl_named_node(node->children) != NULL) { - /* Children are named. */ - if (node->nchildren <= mib[i]) { - ret = ENOENT; - goto label_return; - } - node = ctl_named_children(node, mib[i]); - } else { - const ctl_indexed_node_t *inode; - - /* Indexed element. */ - inode = ctl_indexed_node(node->children); - node = inode->index(mib, miblen, mib[i]); - if (node == NULL) { - ret = ENOENT; - goto label_return; - } - } - } - - /* Call the ctl function. */ - if (node && node->ctl) - ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); - else { - /* Partial MIB. */ - ret = ENOENT; - } - -label_return: - return(ret); -} - -bool -ctl_boot(void) -{ - - if (malloc_mutex_init(&ctl_mtx)) - return (true); - - ctl_initialized = false; - - return (false); -} - -void -ctl_prefork(void) -{ - - malloc_mutex_prefork(&ctl_mtx); -} - -void -ctl_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&ctl_mtx); -} - -void -ctl_postfork_child(void) -{ - - malloc_mutex_postfork_child(&ctl_mtx); -} - -/******************************************************************************/ -/* *_ctl() functions. */ - -#define READONLY() do { \ - if (newp != NULL || newlen != 0) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - -#define WRITEONLY() do { \ - if (oldp != NULL || oldlenp != NULL) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - -#define READ(v, t) do { \ - if (oldp != NULL && oldlenp != NULL) { \ - if (*oldlenp != sizeof(t)) { \ - size_t copylen = (sizeof(t) <= *oldlenp) \ - ? sizeof(t) : *oldlenp; \ - memcpy(oldp, (void *)&(v), copylen); \ - ret = EINVAL; \ - goto label_return; \ - } else \ - *(t *)oldp = (v); \ - } \ -} while (0) - -#define WRITE(v, t) do { \ - if (newp != NULL) { \ - if (newlen != sizeof(t)) { \ - ret = EINVAL; \ - goto label_return; \ - } \ - (v) = *(t *)newp; \ - } \ -} while (0) - -/* - * There's a lot of code duplication in the following macros due to limitations - * in how nested cpp macros are expanded. - */ -#define CTL_RO_CLGEN(c, l, n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if ((c) == false) \ - return (ENOENT); \ - if (l) \ - malloc_mutex_lock(&ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - if (l) \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ -} - -#define CTL_RO_CGEN(c, n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if ((c) == false) \ - return (ENOENT); \ - malloc_mutex_lock(&ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ -} - -#define CTL_RO_GEN(n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - malloc_mutex_lock(&ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ -} - -/* - * ctl_mtx is not acquired, under the assumption that no pertinent data will - * mutate during the call. - */ -#define CTL_RO_NL_CGEN(c, n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if ((c) == false) \ - return (ENOENT); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -#define CTL_RO_NL_GEN(n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -#define CTL_RO_BOOL_CONFIG_GEN(n) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - bool oldval; \ - \ - READONLY(); \ - oldval = n; \ - READ(oldval, bool); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -/******************************************************************************/ - -CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) - -static int -epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - UNUSED uint64_t newval; - - malloc_mutex_lock(&ctl_mtx); - WRITE(newval, uint64_t); - if (newp != NULL) - ctl_refresh(); - READ(ctl_epoch, uint64_t); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -/******************************************************************************/ - -CTL_RO_BOOL_CONFIG_GEN(config_debug) -CTL_RO_BOOL_CONFIG_GEN(config_dss) -CTL_RO_BOOL_CONFIG_GEN(config_fill) -CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) -CTL_RO_BOOL_CONFIG_GEN(config_mremap) -CTL_RO_BOOL_CONFIG_GEN(config_munmap) -CTL_RO_BOOL_CONFIG_GEN(config_prof) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) -CTL_RO_BOOL_CONFIG_GEN(config_stats) -CTL_RO_BOOL_CONFIG_GEN(config_tcache) -CTL_RO_BOOL_CONFIG_GEN(config_tls) -CTL_RO_BOOL_CONFIG_GEN(config_utrace) -CTL_RO_BOOL_CONFIG_GEN(config_valgrind) -CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) - -/******************************************************************************/ - -CTL_RO_NL_GEN(opt_abort, opt_abort, bool) -CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) -CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) -CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) -CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) -CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) -CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool) -CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) -CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) -CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) -CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) -CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool) -CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) -CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) -CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) -CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) -CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) - -/******************************************************************************/ - -static int -thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - unsigned newind, oldind; - - malloc_mutex_lock(&ctl_mtx); - newind = oldind = choose_arena(NULL)->ind; - WRITE(newind, unsigned); - READ(oldind, unsigned); - if (newind != oldind) { - arena_t *arena; - - if (newind >= ctl_stats.narenas) { - /* New arena index is out of range. */ - ret = EFAULT; - goto label_return; - } - - /* Initialize arena if necessary. */ - malloc_mutex_lock(&arenas_lock); - if ((arena = arenas[newind]) == NULL && (arena = - arenas_extend(newind)) == NULL) { - malloc_mutex_unlock(&arenas_lock); - ret = EAGAIN; - goto label_return; - } - assert(arena == arenas[newind]); - arenas[oldind]->nthreads--; - arenas[newind]->nthreads++; - malloc_mutex_unlock(&arenas_lock); - - /* Set new arena association. */ - if (config_tcache) { - tcache_t *tcache; - if ((uintptr_t)(tcache = *tcache_tsd_get()) > - (uintptr_t)TCACHE_STATE_MAX) { - tcache_arena_dissociate(tcache); - tcache_arena_associate(tcache, arena); - } - } - arenas_tsd_set(&arena); - } - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -CTL_RO_NL_CGEN(config_stats, thread_allocated, - thread_allocated_tsd_get()->allocated, uint64_t) -CTL_RO_NL_CGEN(config_stats, thread_allocatedp, - &thread_allocated_tsd_get()->allocated, uint64_t *) -CTL_RO_NL_CGEN(config_stats, thread_deallocated, - thread_allocated_tsd_get()->deallocated, uint64_t) -CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, - &thread_allocated_tsd_get()->deallocated, uint64_t *) - -static int -thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (config_tcache == false) - return (ENOENT); - - oldval = tcache_enabled_get(); - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - tcache_enabled_set(*(bool *)newp); - } - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - if (config_tcache == false) - return (ENOENT); - - READONLY(); - WRITEONLY(); - - tcache_flush(); - - ret = 0; -label_return: - return (ret); -} - -/******************************************************************************/ - -/* ctl_mutex must be held during execution of this function. */ -static void -arena_purge(unsigned arena_ind) -{ - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); - - malloc_mutex_lock(&arenas_lock); - memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); - malloc_mutex_unlock(&arenas_lock); - - if (arena_ind == ctl_stats.narenas) { - unsigned i; - for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - arena_purge_all(tarenas[i]); - } - } else { - assert(arena_ind < ctl_stats.narenas); - if (tarenas[arena_ind] != NULL) - arena_purge_all(tarenas[arena_ind]); - } -} - -static int -arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - - READONLY(); - WRITEONLY(); - malloc_mutex_lock(&ctl_mtx); - arena_purge(mib[1]); - malloc_mutex_unlock(&ctl_mtx); - - ret = 0; -label_return: - return (ret); -} - -static int -arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret, i; - bool match, err; - const char *dss; - unsigned arena_ind = mib[1]; - dss_prec_t dss_prec_old = dss_prec_limit; - dss_prec_t dss_prec = dss_prec_limit; - - malloc_mutex_lock(&ctl_mtx); - WRITE(dss, const char *); - match = false; - for (i = 0; i < dss_prec_limit; i++) { - if (strcmp(dss_prec_names[i], dss) == 0) { - dss_prec = i; - match = true; - break; - } - } - if (match == false) { - ret = EINVAL; - goto label_return; - } - - if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arenas[arena_ind]; - if (arena != NULL) { - dss_prec_old = arena_dss_prec_get(arena); - arena_dss_prec_set(arena, dss_prec); - err = false; - } else - err = true; - } else { - dss_prec_old = chunk_dss_prec_get(); - err = chunk_dss_prec_set(dss_prec); - } - dss = dss_prec_names[dss_prec_old]; - READ(dss, const char *); - if (err) { - ret = EFAULT; - goto label_return; - } - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static const ctl_named_node_t * -arena_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; - - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas) { - ret = NULL; - goto label_return; - } - - ret = super_arena_i_node; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -/******************************************************************************/ - -static int -arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned narenas; - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (*oldlenp != sizeof(unsigned)) { - ret = EINVAL; - goto label_return; - } - narenas = ctl_stats.narenas; - READ(narenas, unsigned); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned nread, i; - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { - ret = EINVAL; - nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) - ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; - } else { - ret = 0; - nread = ctl_stats.narenas; - } - - for (i = 0; i < nread; i++) - ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; - -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) -CTL_RO_NL_GEN(arenas_page, PAGE, size_t) -CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) -CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) -CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) -CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) -CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) -CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) -static const ctl_named_node_t * -arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > NBINS) - return (NULL); - return (super_arenas_bin_i_node); -} - -CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) -CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t) -static const ctl_named_node_t * -arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > nlclasses) - return (NULL); - return (super_arenas_lrun_i_node); -} - -static int -arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - unsigned arena_ind; - - malloc_mutex_lock(&ctl_mtx); - WRITEONLY(); - arena_ind = UINT_MAX; - WRITE(arena_ind, unsigned); - if (newp != NULL && arena_ind >= ctl_stats.narenas) - ret = EFAULT; - else { - if (arena_ind == UINT_MAX) - arena_ind = ctl_stats.narenas; - arena_purge(arena_ind); - ret = 0; - } - -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - unsigned narenas; - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (ctl_grow()) { - ret = EAGAIN; - goto label_return; - } - narenas = ctl_stats.narenas - 1; - READ(narenas, unsigned); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -/******************************************************************************/ - -static int -prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (config_prof == false) - return (ENOENT); - - malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ - oldval = opt_prof_active; - if (newp != NULL) { - /* - * The memory barriers will tend to make opt_prof_active - * propagate faster on systems with weak memory ordering. - */ - mb_write(); - WRITE(opt_prof_active, bool); - mb_write(); - } - READ(oldval, bool); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - const char *filename = NULL; - - if (config_prof == false) - return (ENOENT); - - WRITEONLY(); - WRITE(filename, const char *); - - if (prof_mdump(filename)) { - ret = EFAULT; - goto label_return; - } - - ret = 0; -label_return: - return (ret); -} - -CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) - -/******************************************************************************/ - -CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) -CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) -CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) -CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) - -CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current, - size_t) -CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t) -CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t) -CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t) -CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t) - -CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) -CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) -CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) -CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, - ctl_stats.arenas[mib[2]].astats.mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, - ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, - ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_purged, - ctl_stats.arenas[mib[2]].astats.purged, uint64_t) - -CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, - ctl_stats.arenas[mib[2]].allocated_small, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, - ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, - ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, - ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, - ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) - -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated, - ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) - -static const ctl_named_node_t * -stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > NBINS) - return (NULL); - return (super_stats_arenas_i_bins_j_node); -} - -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, - ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) - -static const ctl_named_node_t * -stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > nlclasses) - return (NULL); - return (super_stats_arenas_i_lruns_j_node); -} - -static const ctl_named_node_t * -stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; - - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) { - ret = NULL; - goto label_return; - } - - ret = super_stats_arenas_i_node; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} diff --git a/lib/jemalloc-3.6.0/src/extent.c b/lib/jemalloc-3.6.0/src/extent.c deleted file mode 100644 index 8c09b48..0000000 --- a/lib/jemalloc-3.6.0/src/extent.c +++ /dev/null @@ -1,39 +0,0 @@ -#define JEMALLOC_EXTENT_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -static inline int -extent_szad_comp(extent_node_t *a, extent_node_t *b) -{ - int ret; - size_t a_size = a->size; - size_t b_size = b->size; - - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_addr = (uintptr_t)a->addr; - uintptr_t b_addr = (uintptr_t)b->addr; - - ret = (a_addr > b_addr) - (a_addr < b_addr); - } - - return (ret); -} - -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad, - extent_szad_comp) - -static inline int -extent_ad_comp(extent_node_t *a, extent_node_t *b) -{ - uintptr_t a_addr = (uintptr_t)a->addr; - uintptr_t b_addr = (uintptr_t)b->addr; - - return ((a_addr > b_addr) - (a_addr < b_addr)); -} - -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad, - extent_ad_comp) diff --git a/lib/jemalloc-3.6.0/src/hash.c b/lib/jemalloc-3.6.0/src/hash.c deleted file mode 100644 index cfa4da0..0000000 --- a/lib/jemalloc-3.6.0/src/hash.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_HASH_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/lib/jemalloc-3.6.0/src/huge.c b/lib/jemalloc-3.6.0/src/huge.c deleted file mode 100644 index d72f213..0000000 --- a/lib/jemalloc-3.6.0/src/huge.c +++ /dev/null @@ -1,347 +0,0 @@ -#define JEMALLOC_HUGE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -uint64_t huge_nmalloc; -uint64_t huge_ndalloc; -size_t huge_allocated; - -malloc_mutex_t huge_mtx; - -/******************************************************************************/ - -/* Tree of chunks that are stand-alone huge allocations. */ -static extent_tree_t huge; - -void * -huge_malloc(size_t size, bool zero, dss_prec_t dss_prec) -{ - - return (huge_palloc(size, chunksize, zero, dss_prec)); -} - -void * -huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec) -{ - void *ret; - size_t csize; - extent_node_t *node; - bool is_zeroed; - - /* Allocate one or more contiguous chunks for this request. */ - - csize = CHUNK_CEILING(size); - if (csize == 0) { - /* size is large enough to cause size_t wrap-around. */ - return (NULL); - } - - /* Allocate an extent node with which to track the chunk. */ - node = base_node_alloc(); - if (node == NULL) - return (NULL); - - /* - * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that - * it is possible to make correct junk/zero fill decisions below. - */ - is_zeroed = zero; - ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec); - if (ret == NULL) { - base_node_dealloc(node); - return (NULL); - } - - /* Insert node into huge. */ - node->addr = ret; - node->size = csize; - - malloc_mutex_lock(&huge_mtx); - extent_tree_ad_insert(&huge, node); - if (config_stats) { - stats_cactive_add(csize); - huge_nmalloc++; - huge_allocated += csize; - } - malloc_mutex_unlock(&huge_mtx); - - if (config_fill && zero == false) { - if (opt_junk) - memset(ret, 0xa5, csize); - else if (opt_zero && is_zeroed == false) - memset(ret, 0, csize); - } - - return (ret); -} - -bool -huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) -{ - - /* - * Avoid moving the allocation if the size class can be left the same. - */ - if (oldsize > arena_maxclass - && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) - && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { - assert(CHUNK_CEILING(oldsize) == oldsize); - return (false); - } - - /* Reallocation would require a move. */ - return (true); -} - -void * -huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec) -{ - void *ret; - size_t copysize; - - /* Try to avoid moving the allocation. */ - if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false) - return (ptr); - - /* - * size and oldsize are different enough that we need to use a - * different size class. In that case, fall back to allocating new - * space and copying. - */ - if (alignment > chunksize) - ret = huge_palloc(size + extra, alignment, zero, dss_prec); - else - ret = huge_malloc(size + extra, zero, dss_prec); - - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ - if (alignment > chunksize) - ret = huge_palloc(size, alignment, zero, dss_prec); - else - ret = huge_malloc(size, zero, dss_prec); - - if (ret == NULL) - return (NULL); - } - - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - -#ifdef JEMALLOC_MREMAP - /* - * Use mremap(2) if this is a huge-->huge reallocation, and neither the - * source nor the destination are in dss. - */ - if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr) - == false && chunk_in_dss(ret) == false))) { - size_t newsize = huge_salloc(ret); - - /* - * Remove ptr from the tree of huge allocations before - * performing the remap operation, in order to avoid the - * possibility of another thread acquiring that mapping before - * this one removes it from the tree. - */ - huge_dalloc(ptr, false); - if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, - ret) == MAP_FAILED) { - /* - * Assuming no chunk management bugs in the allocator, - * the only documented way an error can occur here is - * if the application changed the map type for a - * portion of the old allocation. This is firmly in - * undefined behavior territory, so write a diagnostic - * message, and optionally abort. - */ - char buf[BUFERROR_BUF]; - - buferror(get_errno(), buf, sizeof(buf)); - malloc_printf(": Error in mremap(): %s\n", - buf); - if (opt_abort) - abort(); - memcpy(ret, ptr, copysize); - chunk_dealloc_mmap(ptr, oldsize); - } else if (config_fill && zero == false && opt_junk && oldsize - < newsize) { - /* - * mremap(2) clobbers the original mapping, so - * junk/zero filling is not preserved. There is no - * need to zero fill here, since any trailing - * uninititialized memory is demand-zeroed by the - * kernel, but junk filling must be redone. - */ - memset(ret + oldsize, 0xa5, newsize - oldsize); - } - } else -#endif - { - memcpy(ret, ptr, copysize); - iqalloct(ptr, try_tcache_dalloc); - } - return (ret); -} - -#ifdef JEMALLOC_JET -#undef huge_dalloc_junk -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) -#endif -static void -huge_dalloc_junk(void *ptr, size_t usize) -{ - - if (config_fill && config_dss && opt_junk) { - /* - * Only bother junk filling if the chunk isn't about to be - * unmapped. - */ - if (config_munmap == false || (config_dss && chunk_in_dss(ptr))) - memset(ptr, 0x5a, usize); - } -} -#ifdef JEMALLOC_JET -#undef huge_dalloc_junk -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) -huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); -#endif - -void -huge_dalloc(void *ptr, bool unmap) -{ - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = ptr; - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - assert(node->addr == ptr); - extent_tree_ad_remove(&huge, node); - - if (config_stats) { - stats_cactive_sub(node->size); - huge_ndalloc++; - huge_allocated -= node->size; - } - - malloc_mutex_unlock(&huge_mtx); - - if (unmap) - huge_dalloc_junk(node->addr, node->size); - - chunk_dealloc(node->addr, node->size, unmap); - - base_node_dealloc(node); -} - -size_t -huge_salloc(const void *ptr) -{ - size_t ret; - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = __DECONST(void *, ptr); - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - - ret = node->size; - - malloc_mutex_unlock(&huge_mtx); - - return (ret); -} - -dss_prec_t -huge_dss_prec_get(arena_t *arena) -{ - - return (arena_dss_prec_get(choose_arena(arena))); -} - -prof_ctx_t * -huge_prof_ctx_get(const void *ptr) -{ - prof_ctx_t *ret; - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = __DECONST(void *, ptr); - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - - ret = node->prof_ctx; - - malloc_mutex_unlock(&huge_mtx); - - return (ret); -} - -void -huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) -{ - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = __DECONST(void *, ptr); - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - - node->prof_ctx = ctx; - - malloc_mutex_unlock(&huge_mtx); -} - -bool -huge_boot(void) -{ - - /* Initialize chunks data. */ - if (malloc_mutex_init(&huge_mtx)) - return (true); - extent_tree_ad_new(&huge); - - if (config_stats) { - huge_nmalloc = 0; - huge_ndalloc = 0; - huge_allocated = 0; - } - - return (false); -} - -void -huge_prefork(void) -{ - - malloc_mutex_prefork(&huge_mtx); -} - -void -huge_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&huge_mtx); -} - -void -huge_postfork_child(void) -{ - - malloc_mutex_postfork_child(&huge_mtx); -} diff --git a/lib/jemalloc-3.6.0/src/jemalloc.c b/lib/jemalloc-3.6.0/src/jemalloc.c deleted file mode 100644 index 204778b..0000000 --- a/lib/jemalloc-3.6.0/src/jemalloc.c +++ /dev/null @@ -1,2111 +0,0 @@ -#define JEMALLOC_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -malloc_tsd_data(, arenas, arena_t *, NULL) -malloc_tsd_data(, thread_allocated, thread_allocated_t, - THREAD_ALLOCATED_INITIALIZER) - -/* Runtime configuration options. */ -const char *je_malloc_conf; -bool opt_abort = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; -bool opt_junk = -#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) - true -#else - false -#endif - ; -size_t opt_quarantine = ZU(0); -bool opt_redzone = false; -bool opt_utrace = false; -bool opt_valgrind = false; -bool opt_xmalloc = false; -bool opt_zero = false; -size_t opt_narenas = 0; - -unsigned ncpus; - -malloc_mutex_t arenas_lock; -arena_t **arenas; -unsigned narenas_total; -unsigned narenas_auto; - -/* Set to true once the allocator has been initialized. */ -static bool malloc_initialized = false; - -#ifdef JEMALLOC_THREADED_INIT -/* Used to let the initializing thread recursively allocate. */ -# define NO_INITIALIZER ((unsigned long)0) -# define INITIALIZER pthread_self() -# define IS_INITIALIZER (malloc_initializer == pthread_self()) -static pthread_t malloc_initializer = NO_INITIALIZER; -#else -# define NO_INITIALIZER false -# define INITIALIZER true -# define IS_INITIALIZER malloc_initializer -static bool malloc_initializer = NO_INITIALIZER; -#endif - -/* Used to avoid initialization races. */ -#ifdef _WIN32 -static malloc_mutex_t init_lock; - -JEMALLOC_ATTR(constructor) -static void WINAPI -_init_init_lock(void) -{ - - malloc_mutex_init(&init_lock); -} - -#ifdef _MSC_VER -# pragma section(".CRT$XCU", read) -JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) -static const void (WINAPI *init_init_lock)(void) = _init_init_lock; -#endif - -#else -static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; -#endif - -typedef struct { - void *p; /* Input pointer (as in realloc(p, s)). */ - size_t s; /* Request size. */ - void *r; /* Result pointer. */ -} malloc_utrace_t; - -#ifdef JEMALLOC_UTRACE -# define UTRACE(a, b, c) do { \ - if (opt_utrace) { \ - int utrace_serrno = errno; \ - malloc_utrace_t ut; \ - ut.p = (a); \ - ut.s = (b); \ - ut.r = (c); \ - utrace(&ut, sizeof(ut)); \ - errno = utrace_serrno; \ - } \ -} while (0) -#else -# define UTRACE(a, b, c) -#endif - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -static bool malloc_init_hard(void); - -/******************************************************************************/ -/* - * Begin miscellaneous support functions. - */ - -/* Create a new arena and insert it into the arenas array at index ind. */ -arena_t * -arenas_extend(unsigned ind) -{ - arena_t *ret; - - ret = (arena_t *)base_alloc(sizeof(arena_t)); - if (ret != NULL && arena_new(ret, ind) == false) { - arenas[ind] = ret; - return (ret); - } - /* Only reached if there is an OOM error. */ - - /* - * OOM here is quite inconvenient to propagate, since dealing with it - * would require a check for failure in the fast path. Instead, punt - * by using arenas[0]. In practice, this is an extremely unlikely - * failure. - */ - malloc_write(": Error initializing arena\n"); - if (opt_abort) - abort(); - - return (arenas[0]); -} - -/* Slow path, called only by choose_arena(). */ -arena_t * -choose_arena_hard(void) -{ - arena_t *ret; - - if (narenas_auto > 1) { - unsigned i, choose, first_null; - - choose = 0; - first_null = narenas_auto; - malloc_mutex_lock(&arenas_lock); - assert(arenas[0] != NULL); - for (i = 1; i < narenas_auto; i++) { - if (arenas[i] != NULL) { - /* - * Choose the first arena that has the lowest - * number of threads assigned to it. - */ - if (arenas[i]->nthreads < - arenas[choose]->nthreads) - choose = i; - } else if (first_null == narenas_auto) { - /* - * Record the index of the first uninitialized - * arena, in case all extant arenas are in use. - * - * NB: It is possible for there to be - * discontinuities in terms of initialized - * versus uninitialized arenas, due to the - * "thread.arena" mallctl. - */ - first_null = i; - } - } - - if (arenas[choose]->nthreads == 0 - || first_null == narenas_auto) { - /* - * Use an unloaded arena, or the least loaded arena if - * all arenas are already initialized. - */ - ret = arenas[choose]; - } else { - /* Initialize a new arena. */ - ret = arenas_extend(first_null); - } - ret->nthreads++; - malloc_mutex_unlock(&arenas_lock); - } else { - ret = arenas[0]; - malloc_mutex_lock(&arenas_lock); - ret->nthreads++; - malloc_mutex_unlock(&arenas_lock); - } - - arenas_tsd_set(&ret); - - return (ret); -} - -static void -stats_print_atexit(void) -{ - - if (config_tcache && config_stats) { - unsigned narenas, i; - - /* - * Merge stats from extant threads. This is racy, since - * individual threads do not lock when recording tcache stats - * events. As a consequence, the final stats may be slightly - * out of date by the time they are reported, if other threads - * continue to allocate. - */ - for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arenas[i]; - if (arena != NULL) { - tcache_t *tcache; - - /* - * tcache_stats_merge() locks bins, so if any - * code is introduced that acquires both arena - * and bin locks in the opposite order, - * deadlocks may result. - */ - malloc_mutex_lock(&arena->lock); - ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tcache, arena); - } - malloc_mutex_unlock(&arena->lock); - } - } - } - je_malloc_stats_print(NULL, NULL, NULL); -} - -/* - * End miscellaneous support functions. - */ -/******************************************************************************/ -/* - * Begin initialization functions. - */ - -static unsigned -malloc_ncpus(void) -{ - long result; - -#ifdef _WIN32 - SYSTEM_INFO si; - GetSystemInfo(&si); - result = si.dwNumberOfProcessors; -#else - result = sysconf(_SC_NPROCESSORS_ONLN); -#endif - return ((result == -1) ? 1 : (unsigned)result); -} - -void -arenas_cleanup(void *arg) -{ - arena_t *arena = *(arena_t **)arg; - - malloc_mutex_lock(&arenas_lock); - arena->nthreads--; - malloc_mutex_unlock(&arenas_lock); -} - -JEMALLOC_ALWAYS_INLINE_C void -malloc_thread_init(void) -{ - - /* - * TSD initialization can't be safely done as a side effect of - * deallocation, because it is possible for a thread to do nothing but - * deallocate its TLS data via free(), in which case writing to TLS - * would cause write-after-free memory corruption. The quarantine - * facility *only* gets used as a side effect of deallocation, so make - * a best effort attempt at initializing its TSD by hooking all - * allocation events. - */ - if (config_fill && opt_quarantine) - quarantine_alloc_hook(); -} - -JEMALLOC_ALWAYS_INLINE_C bool -malloc_init(void) -{ - - if (malloc_initialized == false && malloc_init_hard()) - return (true); - malloc_thread_init(); - - return (false); -} - -static bool -malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, - char const **v_p, size_t *vlen_p) -{ - bool accept; - const char *opts = *opts_p; - - *k_p = opts; - - for (accept = false; accept == false;) { - switch (*opts) { - case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': - case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': - case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': - case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': - case 'Y': case 'Z': - case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': - case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': - case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': - case 's': case 't': case 'u': case 'v': case 'w': case 'x': - case 'y': case 'z': - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': case '8': case '9': - case '_': - opts++; - break; - case ':': - opts++; - *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; - *v_p = opts; - accept = true; - break; - case '\0': - if (opts != *opts_p) { - malloc_write(": Conf string ends " - "with key\n"); - } - return (true); - default: - malloc_write(": Malformed conf string\n"); - return (true); - } - } - - for (accept = false; accept == false;) { - switch (*opts) { - case ',': - opts++; - /* - * Look ahead one character here, because the next time - * this function is called, it will assume that end of - * input has been cleanly reached if no input remains, - * but we have optimistically already consumed the - * comma if one exists. - */ - if (*opts == '\0') { - malloc_write(": Conf string ends " - "with comma\n"); - } - *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; - accept = true; - break; - case '\0': - *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; - accept = true; - break; - default: - opts++; - break; - } - } - - *opts_p = opts; - return (false); -} - -static void -malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, - size_t vlen) -{ - - malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, - (int)vlen, v); -} - -static void -malloc_conf_init(void) -{ - unsigned i; - char buf[PATH_MAX + 1]; - const char *opts, *k, *v; - size_t klen, vlen; - - /* - * Automatically configure valgrind before processing options. The - * valgrind option remains in jemalloc 3.x for compatibility reasons. - */ - if (config_valgrind) { - opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; - if (config_fill && opt_valgrind) { - opt_junk = false; - assert(opt_zero == false); - opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; - opt_redzone = true; - } - if (config_tcache && opt_valgrind) - opt_tcache = false; - } - - for (i = 0; i < 3; i++) { - /* Get runtime configuration. */ - switch (i) { - case 0: - if (je_malloc_conf != NULL) { - /* - * Use options that were compiled into the - * program. - */ - opts = je_malloc_conf; - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - case 1: { - int linklen = 0; -#ifndef _WIN32 - int saved_errno = errno; - const char *linkname = -# ifdef JEMALLOC_PREFIX - "/etc/"JEMALLOC_PREFIX"malloc.conf" -# else - "/etc/malloc.conf" -# endif - ; - - /* - * Try to use the contents of the "/etc/malloc.conf" - * symbolic link's name. - */ - linklen = readlink(linkname, buf, sizeof(buf) - 1); - if (linklen == -1) { - /* No configuration specified. */ - linklen = 0; - /* restore errno */ - set_errno(saved_errno); - } -#endif - buf[linklen] = '\0'; - opts = buf; - break; - } case 2: { - const char *envname = -#ifdef JEMALLOC_PREFIX - JEMALLOC_CPREFIX"MALLOC_CONF" -#else - "MALLOC_CONF" -#endif - ; - - if ((opts = getenv(envname)) != NULL) { - /* - * Do nothing; opts is already initialized to - * the value of the MALLOC_CONF environment - * variable. - */ - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - } default: - not_reached(); - buf[0] = '\0'; - opts = buf; - } - - while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, - &vlen) == false) { -#define CONF_HANDLE_BOOL(o, n) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ - if (strncmp("true", v, vlen) == 0 && \ - vlen == sizeof("true")-1) \ - o = true; \ - else if (strncmp("false", v, vlen) == \ - 0 && vlen == sizeof("false")-1) \ - o = false; \ - else { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } \ - continue; \ - } -#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ - uintmax_t um; \ - char *end; \ - \ - set_errno(0); \ - um = malloc_strtoumax(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } else if (clip) { \ - if (min != 0 && um < min) \ - o = min; \ - else if (um > max) \ - o = max; \ - else \ - o = um; \ - } else { \ - if ((min != 0 && um < min) || \ - um > max) { \ - malloc_conf_error( \ - "Out-of-range " \ - "conf value", \ - k, klen, v, vlen); \ - } else \ - o = um; \ - } \ - continue; \ - } -#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ - long l; \ - char *end; \ - \ - set_errno(0); \ - l = strtol(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } else if (l < (ssize_t)min || l > \ - (ssize_t)max) { \ - malloc_conf_error( \ - "Out-of-range conf value", \ - k, klen, v, vlen); \ - } else \ - o = l; \ - continue; \ - } -#define CONF_HANDLE_CHAR_P(o, n, d) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ - size_t cpylen = (vlen <= \ - sizeof(o)-1) ? vlen : \ - sizeof(o)-1; \ - strncpy(o, v, cpylen); \ - o[cpylen] = '\0'; \ - continue; \ - } - - CONF_HANDLE_BOOL(opt_abort, "abort") - /* - * Chunks always require at least one header page, plus - * one data page in the absence of redzones, or three - * pages in the presence of redzones. In order to - * simplify options processing, fix the limit based on - * config_fill. - */ - CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + - (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1, - true) - if (strncmp("dss", k, klen) == 0) { - int i; - bool match = false; - for (i = 0; i < dss_prec_limit; i++) { - if (strncmp(dss_prec_names[i], v, vlen) - == 0) { - if (chunk_dss_prec_set(i)) { - malloc_conf_error( - "Error setting dss", - k, klen, v, vlen); - } else { - opt_dss = - dss_prec_names[i]; - match = true; - break; - } - } - } - if (match == false) { - malloc_conf_error("Invalid conf value", - k, klen, v, vlen); - } - continue; - } - CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, - SIZE_T_MAX, false) - CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", - -1, (sizeof(size_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_stats_print, "stats_print") - if (config_fill) { - CONF_HANDLE_BOOL(opt_junk, "junk") - CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", - 0, SIZE_T_MAX, false) - CONF_HANDLE_BOOL(opt_redzone, "redzone") - CONF_HANDLE_BOOL(opt_zero, "zero") - } - if (config_utrace) { - CONF_HANDLE_BOOL(opt_utrace, "utrace") - } - if (config_valgrind) { - CONF_HANDLE_BOOL(opt_valgrind, "valgrind") - } - if (config_xmalloc) { - CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") - } - if (config_tcache) { - CONF_HANDLE_BOOL(opt_tcache, "tcache") - CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, - "lg_tcache_max", -1, - (sizeof(size_t) << 3) - 1) - } - if (config_prof) { - CONF_HANDLE_BOOL(opt_prof, "prof") - CONF_HANDLE_CHAR_P(opt_prof_prefix, - "prof_prefix", "jeprof") - CONF_HANDLE_BOOL(opt_prof_active, "prof_active") - CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, - "lg_prof_sample", 0, - (sizeof(uint64_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") - CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, - "lg_prof_interval", -1, - (sizeof(uint64_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") - CONF_HANDLE_BOOL(opt_prof_final, "prof_final") - CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") - } - malloc_conf_error("Invalid conf pair", k, klen, v, - vlen); -#undef CONF_HANDLE_BOOL -#undef CONF_HANDLE_SIZE_T -#undef CONF_HANDLE_SSIZE_T -#undef CONF_HANDLE_CHAR_P - } - } -} - -static bool -malloc_init_hard(void) -{ - arena_t *init_arenas[1]; - - malloc_mutex_lock(&init_lock); - if (malloc_initialized || IS_INITIALIZER) { - /* - * Another thread initialized the allocator before this one - * acquired init_lock, or this thread is the initializing - * thread, and it is recursively allocating. - */ - malloc_mutex_unlock(&init_lock); - return (false); - } -#ifdef JEMALLOC_THREADED_INIT - if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { - /* Busy-wait until the initializing thread completes. */ - do { - malloc_mutex_unlock(&init_lock); - CPU_SPINWAIT; - malloc_mutex_lock(&init_lock); - } while (malloc_initialized == false); - malloc_mutex_unlock(&init_lock); - return (false); - } -#endif - malloc_initializer = INITIALIZER; - - malloc_tsd_boot(); - if (config_prof) - prof_boot0(); - - malloc_conf_init(); - - if (opt_stats_print) { - /* Print statistics at exit. */ - if (atexit(stats_print_atexit) != 0) { - malloc_write(": Error in atexit()\n"); - if (opt_abort) - abort(); - } - } - - if (base_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (chunk_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (ctl_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (config_prof) - prof_boot1(); - - arena_boot(); - - if (config_tcache && tcache_boot0()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (huge_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (malloc_mutex_init(&arenas_lock)) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - /* - * Create enough scaffolding to allow recursive allocation in - * malloc_ncpus(). - */ - narenas_total = narenas_auto = 1; - arenas = init_arenas; - memset(arenas, 0, sizeof(arena_t *) * narenas_auto); - - /* - * Initialize one arena here. The rest are lazily created in - * choose_arena_hard(). - */ - arenas_extend(0); - if (arenas[0] == NULL) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - /* Initialize allocation counters before any allocations can occur. */ - if (config_stats && thread_allocated_tsd_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (arenas_tsd_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (config_tcache && tcache_boot1()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (config_fill && quarantine_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (config_prof && prof_boot2()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - malloc_mutex_unlock(&init_lock); - /**********************************************************************/ - /* Recursive allocation may follow. */ - - ncpus = malloc_ncpus(); - -#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ - && !defined(_WIN32)) - /* LinuxThreads's pthread_atfork() allocates. */ - if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, - jemalloc_postfork_child) != 0) { - malloc_write(": Error in pthread_atfork()\n"); - if (opt_abort) - abort(); - } -#endif - - /* Done recursively allocating. */ - /**********************************************************************/ - malloc_mutex_lock(&init_lock); - - if (mutex_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (opt_narenas == 0) { - /* - * For SMP systems, create more than one arena per CPU by - * default. - */ - if (ncpus > 1) - opt_narenas = ncpus << 2; - else - opt_narenas = 1; - } - narenas_auto = opt_narenas; - /* - * Make sure that the arenas array can be allocated. In practice, this - * limit is enough to allow the allocator to function, but the ctl - * machinery will fail to allocate memory at far lower limits. - */ - if (narenas_auto > chunksize / sizeof(arena_t *)) { - narenas_auto = chunksize / sizeof(arena_t *); - malloc_printf(": Reducing narenas to limit (%d)\n", - narenas_auto); - } - narenas_total = narenas_auto; - - /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); - if (arenas == NULL) { - malloc_mutex_unlock(&init_lock); - return (true); - } - /* - * Zero the array. In practice, this should always be pre-zeroed, - * since it was just mmap()ed, but let's be sure. - */ - memset(arenas, 0, sizeof(arena_t *) * narenas_total); - /* Copy the pointer to the one arena that was already initialized. */ - arenas[0] = init_arenas[0]; - - malloc_initialized = true; - malloc_mutex_unlock(&init_lock); - - return (false); -} - -/* - * End initialization functions. - */ -/******************************************************************************/ -/* - * Begin malloc(3)-compatible functions. - */ - -static void * -imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; - - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = imalloc(SMALL_MAXCLASS+1); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = imalloc(usize); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imalloc_prof(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; - - if ((uintptr_t)cnt != (uintptr_t)1U) - p = imalloc_prof_sample(usize, cnt); - else - p = imalloc(usize); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); - - return (p); -} - -/* - * MALLOC_BODY() is a macro rather than a function because its contents are in - * the fast path, but inlining would cause reliability issues when determining - * how many frames to discard from heap profiling backtraces. - */ -#define MALLOC_BODY(ret, size, usize) do { \ - if (malloc_init()) \ - ret = NULL; \ - else { \ - if (config_prof && opt_prof) { \ - prof_thr_cnt_t *cnt; \ - \ - usize = s2u(size); \ - /* \ - * Call PROF_ALLOC_PREP() here rather than in \ - * imalloc_prof() so that imalloc_prof() can be \ - * inlined without introducing uncertainty \ - * about the number of backtrace frames to \ - * ignore. imalloc_prof() is in the fast path \ - * when heap profiling is enabled, so inlining \ - * is critical to performance. (For \ - * consistency all callers of PROF_ALLOC_PREP() \ - * are structured similarly, even though e.g. \ - * realloc() isn't called enough for inlining \ - * to be critical.) \ - */ \ - PROF_ALLOC_PREP(1, usize, cnt); \ - ret = imalloc_prof(usize, cnt); \ - } else { \ - if (config_stats || (config_valgrind && \ - opt_valgrind)) \ - usize = s2u(size); \ - ret = imalloc(size); \ - } \ - } \ -} while (0) - -void * -je_malloc(size_t size) -{ - void *ret; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - if (size == 0) - size = 1; - - MALLOC_BODY(ret, size, usize); - - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in malloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && ret != NULL) { - assert(usize == isalloc(ret, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); - return (ret); -} - -static void * -imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; - - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0); - p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment, - false); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = ipalloc(usize, alignment, false); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; - - if ((uintptr_t)cnt != (uintptr_t)1U) - p = imemalign_prof_sample(alignment, usize, cnt); - else - p = ipalloc(usize, alignment, false); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); - - return (p); -} - -JEMALLOC_ATTR(nonnull(1)) -#ifdef JEMALLOC_PROF -/* - * Avoid any uncertainty as to how many backtrace frames to ignore in - * PROF_ALLOC_PREP(). - */ -JEMALLOC_NOINLINE -#endif -static int -imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) -{ - int ret; - size_t usize; - void *result; - - assert(min_alignment != 0); - - if (malloc_init()) { - result = NULL; - goto label_oom; - } else { - if (size == 0) - size = 1; - - /* Make sure that alignment is a large enough power of 2. */ - if (((alignment - 1) & alignment) != 0 - || (alignment < min_alignment)) { - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error allocating " - "aligned memory: invalid alignment\n"); - abort(); - } - result = NULL; - ret = EINVAL; - goto label_return; - } - - usize = sa2u(size, alignment); - if (usize == 0) { - result = NULL; - goto label_oom; - } - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - PROF_ALLOC_PREP(2, usize, cnt); - result = imemalign_prof(alignment, usize, cnt); - } else - result = ipalloc(usize, alignment, false); - if (result == NULL) - goto label_oom; - } - - *memptr = result; - ret = 0; -label_return: - if (config_stats && result != NULL) { - assert(usize == isalloc(result, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, size, result); - return (ret); -label_oom: - assert(result == NULL); - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error allocating aligned memory: " - "out of memory\n"); - abort(); - } - ret = ENOMEM; - goto label_return; -} - -int -je_posix_memalign(void **memptr, size_t alignment, size_t size) -{ - int ret = imemalign(memptr, alignment, size, sizeof(void *)); - JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, - config_prof), false); - return (ret); -} - -void * -je_aligned_alloc(size_t alignment, size_t size) -{ - void *ret; - int err; - - if ((err = imemalign(&ret, alignment, size, 1)) != 0) { - ret = NULL; - set_errno(err); - } - JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), - false); - return (ret); -} - -static void * -icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; - - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = icalloc(SMALL_MAXCLASS+1); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = icalloc(usize); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -icalloc_prof(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; - - if ((uintptr_t)cnt != (uintptr_t)1U) - p = icalloc_prof_sample(usize, cnt); - else - p = icalloc(usize); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); - - return (p); -} - -void * -je_calloc(size_t num, size_t size) -{ - void *ret; - size_t num_size; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - if (malloc_init()) { - num_size = 0; - ret = NULL; - goto label_return; - } - - num_size = num * size; - if (num_size == 0) { - if (num == 0 || size == 0) - num_size = 1; - else { - ret = NULL; - goto label_return; - } - /* - * Try to avoid division here. We know that it isn't possible to - * overflow during multiplication if neither operand uses any of the - * most significant half of the bits in a size_t. - */ - } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) - && (num_size / size != num)) { - /* size_t overflow. */ - ret = NULL; - goto label_return; - } - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - usize = s2u(num_size); - PROF_ALLOC_PREP(1, usize, cnt); - ret = icalloc_prof(usize, cnt); - } else { - if (config_stats || (config_valgrind && opt_valgrind)) - usize = s2u(num_size); - ret = icalloc(num_size); - } - -label_return: - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in calloc(): out of " - "memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && ret != NULL) { - assert(usize == isalloc(ret, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); - return (ret); -} - -static void * -irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; - - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = iralloc(oldptr, usize, 0, 0, false); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; - prof_ctx_t *old_ctx; - - old_ctx = prof_ctx_get(oldptr); - if ((uintptr_t)cnt != (uintptr_t)1U) - p = irealloc_prof_sample(oldptr, usize, cnt); - else - p = iralloc(oldptr, usize, 0, 0, false); - if (p == NULL) - return (NULL); - prof_realloc(p, usize, cnt, old_usize, old_ctx); - - return (p); -} - -JEMALLOC_INLINE_C void -ifree(void *ptr) -{ - size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - - assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); - - if (config_prof && opt_prof) { - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); - } else if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqalloc(ptr); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); -} - -void * -je_realloc(void *ptr, size_t size) -{ - void *ret; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - size_t old_usize = 0; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - - if (size == 0) { - if (ptr != NULL) { - /* realloc(ptr, 0) is equivalent to free(ptr). */ - UTRACE(ptr, 0, 0); - ifree(ptr); - return (NULL); - } - size = 1; - } - - if (ptr != NULL) { - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if ((config_prof && opt_prof) || config_stats || - (config_valgrind && opt_valgrind)) - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && opt_valgrind) - old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - usize = s2u(size); - PROF_ALLOC_PREP(1, usize, cnt); - ret = irealloc_prof(ptr, old_usize, usize, cnt); - } else { - if (config_stats || (config_valgrind && opt_valgrind)) - usize = s2u(size); - ret = iralloc(ptr, size, 0, 0, false); - } - } else { - /* realloc(NULL, size) is equivalent to malloc(size). */ - MALLOC_BODY(ret, size, usize); - } - - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in realloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && ret != NULL) { - thread_allocated_t *ta; - assert(usize == isalloc(ret, config_prof)); - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_usize; - } - UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize, - false); - return (ret); -} - -void -je_free(void *ptr) -{ - - UTRACE(ptr, 0, 0); - if (ptr != NULL) - ifree(ptr); -} - -/* - * End malloc(3)-compatible functions. - */ -/******************************************************************************/ -/* - * Begin non-standard override functions. - */ - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -void * -je_memalign(size_t alignment, size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - imemalign(&ret, alignment, size, 1); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); -} -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -void * -je_valloc(size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - imemalign(&ret, PAGE, size, 1); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); -} -#endif - -/* - * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has - * #define je_malloc malloc - */ -#define malloc_is_malloc 1 -#define is_malloc_(a) malloc_is_ ## a -#define is_malloc(a) is_malloc_(a) - -#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) -/* - * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible - * to inconsistently reference libc's malloc(3)-compatible functions - * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). - * - * These definitions interpose hooks in glibc. The functions are actually - * passed an extra argument for the caller return address, which will be - * ignored. - */ -JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free; -JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc; -JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc; -JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = - je_memalign; -#endif - -/* - * End non-standard override functions. - */ -/******************************************************************************/ -/* - * Begin non-standard functions. - */ - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena) -{ - - assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, - alignment))); - - if (alignment != 0) - return (ipalloct(usize, alignment, zero, try_tcache, arena)); - else if (zero) - return (icalloct(usize, try_tcache, arena)); - else - return (imalloct(usize, try_tcache, arena)); -} - -static void * -imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena, prof_thr_cnt_t *cnt) -{ - void *p; - - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - size_t usize_promoted = (alignment == 0) ? - s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment); - assert(usize_promoted != 0); - p = imallocx(usize_promoted, alignment, zero, try_tcache, - arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = imallocx(usize, alignment, zero, try_tcache, arena); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena, prof_thr_cnt_t *cnt) -{ - void *p; - - if ((uintptr_t)cnt != (uintptr_t)1U) { - p = imallocx_prof_sample(usize, alignment, zero, try_tcache, - arena, cnt); - } else - p = imallocx(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); - - return (p); -} - -void * -je_mallocx(size_t size, int flags) -{ - void *p; - size_t usize; - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & MALLOCX_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - arena_t *arena; - bool try_tcache; - - assert(size != 0); - - if (malloc_init()) - goto label_oom; - - if (arena_ind != UINT_MAX) { - arena = arenas[arena_ind]; - try_tcache = false; - } else { - arena = NULL; - try_tcache = true; - } - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - PROF_ALLOC_PREP(1, usize, cnt); - p = imallocx_prof(usize, alignment, zero, try_tcache, arena, - cnt); - } else - p = imallocx(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - goto label_oom; - - if (config_stats) { - assert(usize == isalloc(p, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); - return (p); -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in mallocx(): out of memory\n"); - abort(); - } - UTRACE(0, size, 0); - return (NULL); -} - -static void * -irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize, - bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena, - prof_thr_cnt_t *cnt) -{ - void *p; - - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= - size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero, - try_tcache_alloc, try_tcache_dalloc, arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else { - p = iralloct(oldptr, size, 0, alignment, zero, - try_tcache_alloc, try_tcache_dalloc, arena); - } - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment, - size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, - arena_t *arena, prof_thr_cnt_t *cnt) -{ - void *p; - prof_ctx_t *old_ctx; - - old_ctx = prof_ctx_get(oldptr); - if ((uintptr_t)cnt != (uintptr_t)1U) - p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero, - try_tcache_alloc, try_tcache_dalloc, arena, cnt); - else { - p = iralloct(oldptr, size, 0, alignment, zero, - try_tcache_alloc, try_tcache_dalloc, arena); - } - if (p == NULL) - return (NULL); - - if (p == oldptr && alignment != 0) { - /* - * The allocation did not move, so it is possible that the size - * class is smaller than would guarantee the requested - * alignment, and that the alignment constraint was - * serendipitously satisfied. Additionally, old_usize may not - * be the same as the current usize because of in-place large - * reallocation. Therefore, query the actual value of usize. - */ - *usize = isalloc(p, config_prof); - } - prof_realloc(p, *usize, cnt, old_usize, old_ctx); - - return (p); -} - -void * -je_rallocx(void *ptr, size_t size, int flags) -{ - void *p; - size_t usize, old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & MALLOCX_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache_alloc, try_tcache_dalloc; - arena_t *arena; - - assert(ptr != NULL); - assert(size != 0); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk; - try_tcache_alloc = false; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - try_tcache_dalloc = (chunk == ptr || chunk->arena != - arenas[arena_ind]); - arena = arenas[arena_ind]; - } else { - try_tcache_alloc = true; - try_tcache_dalloc = true; - arena = NULL; - } - - if ((config_prof && opt_prof) || config_stats || - (config_valgrind && opt_valgrind)) - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_usize); - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); - PROF_ALLOC_PREP(1, usize, cnt); - p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero, - try_tcache_alloc, try_tcache_dalloc, arena, cnt); - if (p == NULL) - goto label_oom; - } else { - p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc, - try_tcache_dalloc, arena); - if (p == NULL) - goto label_oom; - if (config_stats || (config_valgrind && opt_valgrind)) - usize = isalloc(p, config_prof); - } - - if (config_stats) { - thread_allocated_t *ta; - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_usize; - } - UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero); - return (p); -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write(": Error in rallocx(): out of memory\n"); - abort(); - } - UTRACE(ptr, size, 0); - return (NULL); -} - -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, bool zero, arena_t *arena) -{ - size_t usize; - - if (ixalloc(ptr, size, extra, alignment, zero)) - return (old_usize); - usize = isalloc(ptr, config_prof); - - return (usize); -} - -static size_t -ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, size_t max_usize, bool zero, arena_t *arena, - prof_thr_cnt_t *cnt) -{ - size_t usize; - - if (cnt == NULL) - return (old_usize); - /* Use minimum usize to determine whether promotion may happen. */ - if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size, - alignment)) <= SMALL_MAXCLASS) { - if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= - size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), - alignment, zero)) - return (old_usize); - usize = isalloc(ptr, config_prof); - if (max_usize < PAGE) - arena_prof_promoted(ptr, usize); - } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero, arena); - } - - return (usize); -} - -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, size_t max_usize, bool zero, arena_t *arena, - prof_thr_cnt_t *cnt) -{ - size_t usize; - prof_ctx_t *old_ctx; - - old_ctx = prof_ctx_get(ptr); - if ((uintptr_t)cnt != (uintptr_t)1U) { - usize = ixallocx_prof_sample(ptr, old_usize, size, extra, - alignment, zero, max_usize, arena, cnt); - } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero, arena); - } - if (usize == old_usize) - return (usize); - prof_realloc(ptr, usize, cnt, old_usize, old_ctx); - - return (usize); -} - -size_t -je_xallocx(void *ptr, size_t size, size_t extra, int flags) -{ - size_t usize, old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & MALLOCX_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - arena_t *arena; - - assert(ptr != NULL); - assert(size != 0); - assert(SIZE_T_MAX - size >= extra); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (arena_ind != UINT_MAX) - arena = arenas[arena_ind]; - else - arena = NULL; - - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_usize); - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - /* - * usize isn't knowable before ixalloc() returns when extra is - * non-zero. Therefore, compute its maximum possible value and - * use that in PROF_ALLOC_PREP() to decide whether to capture a - * backtrace. prof_realloc() will use the actual usize to - * decide whether to sample. - */ - size_t max_usize = (alignment == 0) ? s2u(size+extra) : - sa2u(size+extra, alignment); - PROF_ALLOC_PREP(1, max_usize, cnt); - usize = ixallocx_prof(ptr, old_usize, size, extra, alignment, - max_usize, zero, arena, cnt); - } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero, arena); - } - if (usize == old_usize) - goto label_not_resized; - - if (config_stats) { - thread_allocated_t *ta; - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_usize; - } - JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero); -label_not_resized: - UTRACE(ptr, size, ptr); - return (usize); -} - -size_t -je_sallocx(const void *ptr, int flags) -{ - size_t usize; - - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - usize = ivsalloc(ptr, config_prof); - else { - assert(ptr != NULL); - usize = isalloc(ptr, config_prof); - } - - return (usize); -} - -void -je_dallocx(void *ptr, int flags) -{ - size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache; - - assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); - - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - try_tcache = (chunk == ptr || chunk->arena != - arenas[arena_ind]); - } else - try_tcache = true; - - UTRACE(ptr, 0, 0); - if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_prof && opt_prof) { - if (config_stats == false && config_valgrind == false) - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); - } - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqalloct(ptr, try_tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); -} - -size_t -je_nallocx(size_t size, int flags) -{ - size_t usize; - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - - assert(size != 0); - - if (malloc_init()) - return (0); - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); - return (usize); -} - -int -je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ - - if (malloc_init()) - return (EAGAIN); - - return (ctl_byname(name, oldp, oldlenp, newp, newlen)); -} - -int -je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) -{ - - if (malloc_init()) - return (EAGAIN); - - return (ctl_nametomib(name, mibp, miblenp)); -} - -int -je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - - if (malloc_init()) - return (EAGAIN); - - return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); -} - -void -je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - - stats_print(write_cb, cbopaque, opts); -} - -size_t -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) -{ - size_t ret; - - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); - else - ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; - - return (ret); -} - -/* - * End non-standard functions. - */ -/******************************************************************************/ -/* - * Begin experimental functions. - */ -#ifdef JEMALLOC_EXPERIMENTAL - -int -je_allocm(void **ptr, size_t *rsize, size_t size, int flags) -{ - void *p; - - assert(ptr != NULL); - - p = je_mallocx(size, flags); - if (p == NULL) - return (ALLOCM_ERR_OOM); - if (rsize != NULL) - *rsize = isalloc(p, config_prof); - *ptr = p; - return (ALLOCM_SUCCESS); -} - -int -je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) -{ - int ret; - bool no_move = flags & ALLOCM_NO_MOVE; - - assert(ptr != NULL); - assert(*ptr != NULL); - assert(size != 0); - assert(SIZE_T_MAX - size >= extra); - - if (no_move) { - size_t usize = je_xallocx(*ptr, size, extra, flags); - ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; - if (rsize != NULL) - *rsize = usize; - } else { - void *p = je_rallocx(*ptr, size+extra, flags); - if (p != NULL) { - *ptr = p; - ret = ALLOCM_SUCCESS; - } else - ret = ALLOCM_ERR_OOM; - if (rsize != NULL) - *rsize = isalloc(*ptr, config_prof); - } - return (ret); -} - -int -je_sallocm(const void *ptr, size_t *rsize, int flags) -{ - - assert(rsize != NULL); - *rsize = je_sallocx(ptr, flags); - return (ALLOCM_SUCCESS); -} - -int -je_dallocm(void *ptr, int flags) -{ - - je_dallocx(ptr, flags); - return (ALLOCM_SUCCESS); -} - -int -je_nallocm(size_t *rsize, size_t size, int flags) -{ - size_t usize; - - usize = je_nallocx(size, flags); - if (usize == 0) - return (ALLOCM_ERR_OOM); - if (rsize != NULL) - *rsize = usize; - return (ALLOCM_SUCCESS); -} - -#endif -/* - * End experimental functions. - */ -/******************************************************************************/ -/* - * The following functions are used by threading libraries for protection of - * malloc during fork(). - */ - -/* - * If an application creates a thread before doing any allocation in the main - * thread, then calls fork(2) in the main thread followed by memory allocation - * in the child process, a race can occur that results in deadlock within the - * child: the main thread may have forked while the created thread had - * partially initialized the allocator. Ordinarily jemalloc prevents - * fork/malloc races via the following functions it registers during - * initialization using pthread_atfork(), but of course that does no good if - * the allocator isn't fully initialized at fork time. The following library - * constructor is a partial solution to this problem. It may still possible to - * trigger the deadlock described above, but doing so would involve forking via - * a library constructor that runs before jemalloc's runs. - */ -JEMALLOC_ATTR(constructor) -static void -jemalloc_constructor(void) -{ - - malloc_init(); -} - -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_prefork(void) -#else -JEMALLOC_EXPORT void -_malloc_prefork(void) -#endif -{ - unsigned i; - -#ifdef JEMALLOC_MUTEX_INIT_CB - if (malloc_initialized == false) - return; -#endif - assert(malloc_initialized); - - /* Acquire all mutexes in a safe order. */ - ctl_prefork(); - prof_prefork(); - malloc_mutex_prefork(&arenas_lock); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_prefork(arenas[i]); - } - chunk_prefork(); - base_prefork(); - huge_prefork(); -} - -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_postfork_parent(void) -#else -JEMALLOC_EXPORT void -_malloc_postfork(void) -#endif -{ - unsigned i; - -#ifdef JEMALLOC_MUTEX_INIT_CB - if (malloc_initialized == false) - return; -#endif - assert(malloc_initialized); - - /* Release all mutexes, now that fork() has completed. */ - huge_postfork_parent(); - base_postfork_parent(); - chunk_postfork_parent(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_parent(arenas[i]); - } - malloc_mutex_postfork_parent(&arenas_lock); - prof_postfork_parent(); - ctl_postfork_parent(); -} - -void -jemalloc_postfork_child(void) -{ - unsigned i; - - assert(malloc_initialized); - - /* Release all mutexes, now that fork() has completed. */ - huge_postfork_child(); - base_postfork_child(); - chunk_postfork_child(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_child(arenas[i]); - } - malloc_mutex_postfork_child(&arenas_lock); - prof_postfork_child(); - ctl_postfork_child(); -} - -/******************************************************************************/ -/* - * The following functions are used for TLS allocation/deallocation in static - * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() - * is that these avoid accessing TLS variables. - */ - -static void * -a0alloc(size_t size, bool zero) -{ - - if (malloc_init()) - return (NULL); - - if (size == 0) - size = 1; - - if (size <= arena_maxclass) - return (arena_malloc(arenas[0], size, zero, false)); - else - return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0]))); -} - -void * -a0malloc(size_t size) -{ - - return (a0alloc(size, false)); -} - -void * -a0calloc(size_t num, size_t size) -{ - - return (a0alloc(num * size, true)); -} - -void -a0free(void *ptr) -{ - arena_chunk_t *chunk; - - if (ptr == NULL) - return; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - arena_dalloc(chunk->arena, chunk, ptr, false); - else - huge_dalloc(ptr, true); -} - -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/src/mb.c b/lib/jemalloc-3.6.0/src/mb.c deleted file mode 100644 index dc2c0a2..0000000 --- a/lib/jemalloc-3.6.0/src/mb.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_MB_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/lib/jemalloc-3.6.0/src/mutex.c b/lib/jemalloc-3.6.0/src/mutex.c deleted file mode 100644 index 788eca3..0000000 --- a/lib/jemalloc-3.6.0/src/mutex.c +++ /dev/null @@ -1,149 +0,0 @@ -#define JEMALLOC_MUTEX_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -#include -#endif - -#ifndef _CRT_SPINCOUNT -#define _CRT_SPINCOUNT 4000 -#endif - -/******************************************************************************/ -/* Data. */ - -#ifdef JEMALLOC_LAZY_LOCK -bool isthreaded = false; -#endif -#ifdef JEMALLOC_MUTEX_INIT_CB -static bool postpone_init = true; -static malloc_mutex_t *postponed_mutexes = NULL; -#endif - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static void pthread_create_once(void); -#endif - -/******************************************************************************/ -/* - * We intercept pthread_create() calls in order to toggle isthreaded if the - * process goes multi-threaded. - */ - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, - void *(*)(void *), void *__restrict); - -static void -pthread_create_once(void) -{ - - pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); - if (pthread_create_fptr == NULL) { - malloc_write(": Error in dlsym(RTLD_NEXT, " - "\"pthread_create\")\n"); - abort(); - } - - isthreaded = true; -} - -JEMALLOC_EXPORT int -pthread_create(pthread_t *__restrict thread, - const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), - void *__restrict arg) -{ - static pthread_once_t once_control = PTHREAD_ONCE_INIT; - - pthread_once(&once_control, pthread_create_once); - - return (pthread_create_fptr(thread, attr, start_routine, arg)); -} -#endif - -/******************************************************************************/ - -#ifdef JEMALLOC_MUTEX_INIT_CB -JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)); -#endif - -bool -malloc_mutex_init(malloc_mutex_t *mutex) -{ - -#ifdef _WIN32 - if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, - _CRT_SPINCOUNT)) - return (true); -#elif (defined(JEMALLOC_OSSPIN)) - mutex->lock = 0; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) - if (postpone_init) { - mutex->postponed_next = postponed_mutexes; - postponed_mutexes = mutex; - } else { - if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) != - 0) - return (true); - } -#else - pthread_mutexattr_t attr; - - if (pthread_mutexattr_init(&attr) != 0) - return (true); - pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); - if (pthread_mutex_init(&mutex->lock, &attr) != 0) { - pthread_mutexattr_destroy(&attr); - return (true); - } - pthread_mutexattr_destroy(&attr); -#endif - return (false); -} - -void -malloc_mutex_prefork(malloc_mutex_t *mutex) -{ - - malloc_mutex_lock(mutex); -} - -void -malloc_mutex_postfork_parent(malloc_mutex_t *mutex) -{ - - malloc_mutex_unlock(mutex); -} - -void -malloc_mutex_postfork_child(malloc_mutex_t *mutex) -{ - -#ifdef JEMALLOC_MUTEX_INIT_CB - malloc_mutex_unlock(mutex); -#else - if (malloc_mutex_init(mutex)) { - malloc_printf(": Error re-initializing mutex in " - "child\n"); - if (opt_abort) - abort(); - } -#endif -} - -bool -mutex_boot(void) -{ - -#ifdef JEMALLOC_MUTEX_INIT_CB - postpone_init = false; - while (postponed_mutexes != NULL) { - if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, - base_calloc) != 0) - return (true); - postponed_mutexes = postponed_mutexes->postponed_next; - } -#endif - return (false); -} diff --git a/lib/jemalloc-3.6.0/src/prof.c b/lib/jemalloc-3.6.0/src/prof.c deleted file mode 100644 index 7722b7b..0000000 --- a/lib/jemalloc-3.6.0/src/prof.c +++ /dev/null @@ -1,1420 +0,0 @@ -#define JEMALLOC_PROF_C_ -#include "jemalloc/internal/jemalloc_internal.h" -/******************************************************************************/ - -#ifdef JEMALLOC_PROF_LIBUNWIND -#define UNW_LOCAL_ONLY -#include -#endif - -#ifdef JEMALLOC_PROF_LIBGCC -#include -#endif - -/******************************************************************************/ -/* Data. */ - -malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL) - -bool opt_prof = false; -bool opt_prof_active = true; -size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; -ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; -bool opt_prof_gdump = false; -bool opt_prof_final = true; -bool opt_prof_leak = false; -bool opt_prof_accum = false; -char opt_prof_prefix[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - -uint64_t prof_interval = 0; -bool prof_promote; - -/* - * Table of mutexes that are shared among ctx's. These are leaf locks, so - * there is no problem with using them for more than one ctx at the same time. - * The primary motivation for this sharing though is that ctx's are ephemeral, - * and destroying mutexes causes complications for systems that allocate when - * creating/destroying mutexes. - */ -static malloc_mutex_t *ctx_locks; -static unsigned cum_ctxs; /* Atomic counter. */ - -/* - * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data - * structure that knows about all backtraces currently captured. - */ -static ckh_t bt2ctx; -static malloc_mutex_t bt2ctx_mtx; - -static malloc_mutex_t prof_dump_seq_mtx; -static uint64_t prof_dump_seq; -static uint64_t prof_dump_iseq; -static uint64_t prof_dump_mseq; -static uint64_t prof_dump_useq; - -/* - * This buffer is rather large for stack allocation, so use a single buffer for - * all profile dumps. - */ -static malloc_mutex_t prof_dump_mtx; -static char prof_dump_buf[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PROF_DUMP_BUFSIZE -#else - 1 -#endif -]; -static unsigned prof_dump_buf_end; -static int prof_dump_fd; - -/* Do not dump any profiles until bootstrapping is complete. */ -static bool prof_booted = false; - -/******************************************************************************/ - -void -bt_init(prof_bt_t *bt, void **vec) -{ - - cassert(config_prof); - - bt->vec = vec; - bt->len = 0; -} - -static void -bt_destroy(prof_bt_t *bt) -{ - - cassert(config_prof); - - idalloc(bt); -} - -static prof_bt_t * -bt_dup(prof_bt_t *bt) -{ - prof_bt_t *ret; - - cassert(config_prof); - - /* - * Create a single allocation that has space for vec immediately - * following the prof_bt_t structure. The backtraces that get - * stored in the backtrace caches are copied from stack-allocated - * temporary variables, so size is known at creation time. Making this - * a contiguous object improves cache locality. - */ - ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) + - (bt->len * sizeof(void *))); - if (ret == NULL) - return (NULL); - ret->vec = (void **)((uintptr_t)ret + - QUANTUM_CEILING(sizeof(prof_bt_t))); - memcpy(ret->vec, bt->vec, bt->len * sizeof(void *)); - ret->len = bt->len; - - return (ret); -} - -static inline void -prof_enter(prof_tdata_t *prof_tdata) -{ - - cassert(config_prof); - - assert(prof_tdata->enq == false); - prof_tdata->enq = true; - - malloc_mutex_lock(&bt2ctx_mtx); -} - -static inline void -prof_leave(prof_tdata_t *prof_tdata) -{ - bool idump, gdump; - - cassert(config_prof); - - malloc_mutex_unlock(&bt2ctx_mtx); - - assert(prof_tdata->enq); - prof_tdata->enq = false; - idump = prof_tdata->enq_idump; - prof_tdata->enq_idump = false; - gdump = prof_tdata->enq_gdump; - prof_tdata->enq_gdump = false; - - if (idump) - prof_idump(); - if (gdump) - prof_gdump(); -} - -#ifdef JEMALLOC_PROF_LIBUNWIND -void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - unw_context_t uc; - unw_cursor_t cursor; - unsigned i; - int err; - - cassert(config_prof); - assert(bt->len == 0); - assert(bt->vec != NULL); - - unw_getcontext(&uc); - unw_init_local(&cursor, &uc); - - /* Throw away (nignore+1) stack frames, if that many exist. */ - for (i = 0; i < nignore + 1; i++) { - err = unw_step(&cursor); - if (err <= 0) - return; - } - - /* - * Iterate over stack frames until there are no more, or until no space - * remains in bt. - */ - for (i = 0; i < PROF_BT_MAX; i++) { - unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); - bt->len++; - err = unw_step(&cursor); - if (err <= 0) - break; - } -} -#elif (defined(JEMALLOC_PROF_LIBGCC)) -static _Unwind_Reason_Code -prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) -{ - - cassert(config_prof); - - return (_URC_NO_REASON); -} - -static _Unwind_Reason_Code -prof_unwind_callback(struct _Unwind_Context *context, void *arg) -{ - prof_unwind_data_t *data = (prof_unwind_data_t *)arg; - - cassert(config_prof); - - if (data->nignore > 0) - data->nignore--; - else { - data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context); - data->bt->len++; - if (data->bt->len == data->max) - return (_URC_END_OF_STACK); - } - - return (_URC_NO_REASON); -} - -void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX}; - - cassert(config_prof); - - _Unwind_Backtrace(prof_unwind_callback, &data); -} -#elif (defined(JEMALLOC_PROF_GCC)) -void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ -#define BT_FRAME(i) \ - if ((i) < nignore + PROF_BT_MAX) { \ - void *p; \ - if (__builtin_frame_address(i) == 0) \ - return; \ - p = __builtin_return_address(i); \ - if (p == NULL) \ - return; \ - if (i >= nignore) { \ - bt->vec[(i) - nignore] = p; \ - bt->len = (i) - nignore + 1; \ - } \ - } else \ - return; - - cassert(config_prof); - assert(nignore <= 3); - - BT_FRAME(0) - BT_FRAME(1) - BT_FRAME(2) - BT_FRAME(3) - BT_FRAME(4) - BT_FRAME(5) - BT_FRAME(6) - BT_FRAME(7) - BT_FRAME(8) - BT_FRAME(9) - - BT_FRAME(10) - BT_FRAME(11) - BT_FRAME(12) - BT_FRAME(13) - BT_FRAME(14) - BT_FRAME(15) - BT_FRAME(16) - BT_FRAME(17) - BT_FRAME(18) - BT_FRAME(19) - - BT_FRAME(20) - BT_FRAME(21) - BT_FRAME(22) - BT_FRAME(23) - BT_FRAME(24) - BT_FRAME(25) - BT_FRAME(26) - BT_FRAME(27) - BT_FRAME(28) - BT_FRAME(29) - - BT_FRAME(30) - BT_FRAME(31) - BT_FRAME(32) - BT_FRAME(33) - BT_FRAME(34) - BT_FRAME(35) - BT_FRAME(36) - BT_FRAME(37) - BT_FRAME(38) - BT_FRAME(39) - - BT_FRAME(40) - BT_FRAME(41) - BT_FRAME(42) - BT_FRAME(43) - BT_FRAME(44) - BT_FRAME(45) - BT_FRAME(46) - BT_FRAME(47) - BT_FRAME(48) - BT_FRAME(49) - - BT_FRAME(50) - BT_FRAME(51) - BT_FRAME(52) - BT_FRAME(53) - BT_FRAME(54) - BT_FRAME(55) - BT_FRAME(56) - BT_FRAME(57) - BT_FRAME(58) - BT_FRAME(59) - - BT_FRAME(60) - BT_FRAME(61) - BT_FRAME(62) - BT_FRAME(63) - BT_FRAME(64) - BT_FRAME(65) - BT_FRAME(66) - BT_FRAME(67) - BT_FRAME(68) - BT_FRAME(69) - - BT_FRAME(70) - BT_FRAME(71) - BT_FRAME(72) - BT_FRAME(73) - BT_FRAME(74) - BT_FRAME(75) - BT_FRAME(76) - BT_FRAME(77) - BT_FRAME(78) - BT_FRAME(79) - - BT_FRAME(80) - BT_FRAME(81) - BT_FRAME(82) - BT_FRAME(83) - BT_FRAME(84) - BT_FRAME(85) - BT_FRAME(86) - BT_FRAME(87) - BT_FRAME(88) - BT_FRAME(89) - - BT_FRAME(90) - BT_FRAME(91) - BT_FRAME(92) - BT_FRAME(93) - BT_FRAME(94) - BT_FRAME(95) - BT_FRAME(96) - BT_FRAME(97) - BT_FRAME(98) - BT_FRAME(99) - - BT_FRAME(100) - BT_FRAME(101) - BT_FRAME(102) - BT_FRAME(103) - BT_FRAME(104) - BT_FRAME(105) - BT_FRAME(106) - BT_FRAME(107) - BT_FRAME(108) - BT_FRAME(109) - - BT_FRAME(110) - BT_FRAME(111) - BT_FRAME(112) - BT_FRAME(113) - BT_FRAME(114) - BT_FRAME(115) - BT_FRAME(116) - BT_FRAME(117) - BT_FRAME(118) - BT_FRAME(119) - - BT_FRAME(120) - BT_FRAME(121) - BT_FRAME(122) - BT_FRAME(123) - BT_FRAME(124) - BT_FRAME(125) - BT_FRAME(126) - BT_FRAME(127) - - /* Extras to compensate for nignore. */ - BT_FRAME(128) - BT_FRAME(129) - BT_FRAME(130) -#undef BT_FRAME -} -#else -void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - - cassert(config_prof); - not_reached(); -} -#endif - -static malloc_mutex_t * -prof_ctx_mutex_choose(void) -{ - unsigned nctxs = atomic_add_u(&cum_ctxs, 1); - - return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]); -} - -static void -prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt) -{ - - ctx->bt = bt; - ctx->lock = prof_ctx_mutex_choose(); - /* - * Set nlimbo to 1, in order to avoid a race condition with - * prof_ctx_merge()/prof_ctx_destroy(). - */ - ctx->nlimbo = 1; - ql_elm_new(ctx, dump_link); - memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t)); - ql_new(&ctx->cnts_ql); -} - -static void -prof_ctx_destroy(prof_ctx_t *ctx) -{ - prof_tdata_t *prof_tdata; - - cassert(config_prof); - - /* - * Check that ctx is still unused by any thread cache before destroying - * it. prof_lookup() increments ctx->nlimbo in order to avoid a race - * condition with this function, as does prof_ctx_merge() in order to - * avoid a race between the main body of prof_ctx_merge() and entry - * into this function. - */ - prof_tdata = prof_tdata_get(false); - assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX); - prof_enter(prof_tdata); - malloc_mutex_lock(ctx->lock); - if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 && - ctx->nlimbo == 1) { - assert(ctx->cnt_merged.curbytes == 0); - assert(ctx->cnt_merged.accumobjs == 0); - assert(ctx->cnt_merged.accumbytes == 0); - /* Remove ctx from bt2ctx. */ - if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL)) - not_reached(); - prof_leave(prof_tdata); - /* Destroy ctx. */ - malloc_mutex_unlock(ctx->lock); - bt_destroy(ctx->bt); - idalloc(ctx); - } else { - /* - * Compensate for increment in prof_ctx_merge() or - * prof_lookup(). - */ - ctx->nlimbo--; - malloc_mutex_unlock(ctx->lock); - prof_leave(prof_tdata); - } -} - -static void -prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt) -{ - bool destroy; - - cassert(config_prof); - - /* Merge cnt stats and detach from ctx. */ - malloc_mutex_lock(ctx->lock); - ctx->cnt_merged.curobjs += cnt->cnts.curobjs; - ctx->cnt_merged.curbytes += cnt->cnts.curbytes; - ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs; - ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes; - ql_remove(&ctx->cnts_ql, cnt, cnts_link); - if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL && - ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) { - /* - * Increment ctx->nlimbo in order to keep another thread from - * winning the race to destroy ctx while this one has ctx->lock - * dropped. Without this, it would be possible for another - * thread to: - * - * 1) Sample an allocation associated with ctx. - * 2) Deallocate the sampled object. - * 3) Successfully prof_ctx_destroy(ctx). - * - * The result would be that ctx no longer exists by the time - * this thread accesses it in prof_ctx_destroy(). - */ - ctx->nlimbo++; - destroy = true; - } else - destroy = false; - malloc_mutex_unlock(ctx->lock); - if (destroy) - prof_ctx_destroy(ctx); -} - -static bool -prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey, - prof_ctx_t **p_ctx, bool *p_new_ctx) -{ - union { - prof_ctx_t *p; - void *v; - } ctx; - union { - prof_bt_t *p; - void *v; - } btkey; - bool new_ctx; - - prof_enter(prof_tdata); - if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) { - /* bt has never been seen before. Insert it. */ - ctx.v = imalloc(sizeof(prof_ctx_t)); - if (ctx.v == NULL) { - prof_leave(prof_tdata); - return (true); - } - btkey.p = bt_dup(bt); - if (btkey.v == NULL) { - prof_leave(prof_tdata); - idalloc(ctx.v); - return (true); - } - prof_ctx_init(ctx.p, btkey.p); - if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) { - /* OOM. */ - prof_leave(prof_tdata); - idalloc(btkey.v); - idalloc(ctx.v); - return (true); - } - new_ctx = true; - } else { - /* - * Increment nlimbo, in order to avoid a race condition with - * prof_ctx_merge()/prof_ctx_destroy(). - */ - malloc_mutex_lock(ctx.p->lock); - ctx.p->nlimbo++; - malloc_mutex_unlock(ctx.p->lock); - new_ctx = false; - } - prof_leave(prof_tdata); - - *p_btkey = btkey.v; - *p_ctx = ctx.p; - *p_new_ctx = new_ctx; - return (false); -} - -prof_thr_cnt_t * -prof_lookup(prof_bt_t *bt) -{ - union { - prof_thr_cnt_t *p; - void *v; - } ret; - prof_tdata_t *prof_tdata; - - cassert(config_prof); - - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (NULL); - - if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) { - void *btkey; - prof_ctx_t *ctx; - bool new_ctx; - - /* - * This thread's cache lacks bt. Look for it in the global - * cache. - */ - if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx)) - return (NULL); - - /* Link a prof_thd_cnt_t into ctx for this thread. */ - if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { - assert(ckh_count(&prof_tdata->bt2cnt) > 0); - /* - * Flush the least recently used cnt in order to keep - * bt2cnt from becoming too large. - */ - ret.p = ql_last(&prof_tdata->lru_ql, lru_link); - assert(ret.v != NULL); - if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, - NULL, NULL)) - not_reached(); - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - prof_ctx_merge(ret.p->ctx, ret.p); - /* ret can now be re-used. */ - } else { - assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX); - /* Allocate and partially initialize a new cnt. */ - ret.v = imalloc(sizeof(prof_thr_cnt_t)); - if (ret.p == NULL) { - if (new_ctx) - prof_ctx_destroy(ctx); - return (NULL); - } - ql_elm_new(ret.p, cnts_link); - ql_elm_new(ret.p, lru_link); - } - /* Finish initializing ret. */ - ret.p->ctx = ctx; - ret.p->epoch = 0; - memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); - if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) { - if (new_ctx) - prof_ctx_destroy(ctx); - idalloc(ret.v); - return (NULL); - } - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); - malloc_mutex_lock(ctx->lock); - ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link); - ctx->nlimbo--; - malloc_mutex_unlock(ctx->lock); - } else { - /* Move ret to the front of the LRU. */ - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); - } - - return (ret.p); -} - -#ifdef JEMALLOC_JET -size_t -prof_bt_count(void) -{ - size_t bt_count; - prof_tdata_t *prof_tdata; - - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (0); - - prof_enter(prof_tdata); - bt_count = ckh_count(&bt2ctx); - prof_leave(prof_tdata); - - return (bt_count); -} -#endif - -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open_impl) -#endif -static int -prof_dump_open(bool propagate_err, const char *filename) -{ - int fd; - - fd = creat(filename, 0644); - if (fd == -1 && propagate_err == false) { - malloc_printf(": creat(\"%s\"), 0644) failed\n", - filename); - if (opt_abort) - abort(); - } - - return (fd); -} -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open) -prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); -#endif - -static bool -prof_dump_flush(bool propagate_err) -{ - bool ret = false; - ssize_t err; - - cassert(config_prof); - - err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); - if (err == -1) { - if (propagate_err == false) { - malloc_write(": write() failed during heap " - "profile flush\n"); - if (opt_abort) - abort(); - } - ret = true; - } - prof_dump_buf_end = 0; - - return (ret); -} - -static bool -prof_dump_close(bool propagate_err) -{ - bool ret; - - assert(prof_dump_fd != -1); - ret = prof_dump_flush(propagate_err); - close(prof_dump_fd); - prof_dump_fd = -1; - - return (ret); -} - -static bool -prof_dump_write(bool propagate_err, const char *s) -{ - unsigned i, slen, n; - - cassert(config_prof); - - i = 0; - slen = strlen(s); - while (i < slen) { - /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) - if (prof_dump_flush(propagate_err) && propagate_err) - return (true); - - if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { - /* Finish writing. */ - n = slen - i; - } else { - /* Write as much of s as will fit. */ - n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; - } - memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); - prof_dump_buf_end += n; - i += n; - } - - return (false); -} - -JEMALLOC_ATTR(format(printf, 2, 3)) -static bool -prof_dump_printf(bool propagate_err, const char *format, ...) -{ - bool ret; - va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; - - va_start(ap, format); - malloc_vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); - ret = prof_dump_write(propagate_err, buf); - - return (ret); -} - -static void -prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx, - prof_ctx_list_t *ctx_ql) -{ - prof_thr_cnt_t *thr_cnt; - prof_cnt_t tcnt; - - cassert(config_prof); - - malloc_mutex_lock(ctx->lock); - - /* - * Increment nlimbo so that ctx won't go away before dump. - * Additionally, link ctx into the dump list so that it is included in - * prof_dump()'s second pass. - */ - ctx->nlimbo++; - ql_tail_insert(ctx_ql, ctx, dump_link); - - memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t)); - ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) { - volatile unsigned *epoch = &thr_cnt->epoch; - - while (true) { - unsigned epoch0 = *epoch; - - /* Make sure epoch is even. */ - if (epoch0 & 1U) - continue; - - memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t)); - - /* Terminate if epoch didn't change while reading. */ - if (*epoch == epoch0) - break; - } - - ctx->cnt_summed.curobjs += tcnt.curobjs; - ctx->cnt_summed.curbytes += tcnt.curbytes; - if (opt_prof_accum) { - ctx->cnt_summed.accumobjs += tcnt.accumobjs; - ctx->cnt_summed.accumbytes += tcnt.accumbytes; - } - } - - if (ctx->cnt_summed.curobjs != 0) - (*leak_nctx)++; - - /* Add to cnt_all. */ - cnt_all->curobjs += ctx->cnt_summed.curobjs; - cnt_all->curbytes += ctx->cnt_summed.curbytes; - if (opt_prof_accum) { - cnt_all->accumobjs += ctx->cnt_summed.accumobjs; - cnt_all->accumbytes += ctx->cnt_summed.accumbytes; - } - - malloc_mutex_unlock(ctx->lock); -} - -static bool -prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) -{ - - if (opt_lg_prof_sample == 0) { - if (prof_dump_printf(propagate_err, - "heap profile: %"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @ heapprofile\n", - cnt_all->curobjs, cnt_all->curbytes, - cnt_all->accumobjs, cnt_all->accumbytes)) - return (true); - } else { - if (prof_dump_printf(propagate_err, - "heap profile: %"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n", - cnt_all->curobjs, cnt_all->curbytes, - cnt_all->accumobjs, cnt_all->accumbytes, - ((uint64_t)1U << opt_lg_prof_sample))) - return (true); - } - - return (false); -} - -static void -prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql) -{ - - ctx->nlimbo--; - ql_remove(ctx_ql, ctx, dump_link); -} - -static void -prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql) -{ - - malloc_mutex_lock(ctx->lock); - prof_dump_ctx_cleanup_locked(ctx, ctx_ql); - malloc_mutex_unlock(ctx->lock); -} - -static bool -prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt, - prof_ctx_list_t *ctx_ql) -{ - bool ret; - unsigned i; - - cassert(config_prof); - - /* - * Current statistics can sum to 0 as a result of unmerged per thread - * statistics. Additionally, interval- and growth-triggered dumps can - * occur between the time a ctx is created and when its statistics are - * filled in. Avoid dumping any ctx that is an artifact of either - * implementation detail. - */ - malloc_mutex_lock(ctx->lock); - if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) || - (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) { - assert(ctx->cnt_summed.curobjs == 0); - assert(ctx->cnt_summed.curbytes == 0); - assert(ctx->cnt_summed.accumobjs == 0); - assert(ctx->cnt_summed.accumbytes == 0); - ret = false; - goto label_return; - } - - if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @", - ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes, - ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) { - ret = true; - goto label_return; - } - - for (i = 0; i < bt->len; i++) { - if (prof_dump_printf(propagate_err, " %#"PRIxPTR, - (uintptr_t)bt->vec[i])) { - ret = true; - goto label_return; - } - } - - if (prof_dump_write(propagate_err, "\n")) { - ret = true; - goto label_return; - } - - ret = false; -label_return: - prof_dump_ctx_cleanup_locked(ctx, ctx_ql); - malloc_mutex_unlock(ctx->lock); - return (ret); -} - -static bool -prof_dump_maps(bool propagate_err) -{ - bool ret; - int mfd; - char filename[PATH_MAX + 1]; - - cassert(config_prof); -#ifdef __FreeBSD__ - malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map"); -#else - malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps", - (int)getpid()); -#endif - mfd = open(filename, O_RDONLY); - if (mfd != -1) { - ssize_t nread; - - if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && - propagate_err) { - ret = true; - goto label_return; - } - nread = 0; - do { - prof_dump_buf_end += nread; - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - /* Make space in prof_dump_buf before read(). */ - if (prof_dump_flush(propagate_err) && - propagate_err) { - ret = true; - goto label_return; - } - } - nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], - PROF_DUMP_BUFSIZE - prof_dump_buf_end); - } while (nread > 0); - } else { - ret = true; - goto label_return; - } - - ret = false; -label_return: - if (mfd != -1) - close(mfd); - return (ret); -} - -static void -prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx, - const char *filename) -{ - - if (cnt_all->curbytes != 0) { - malloc_printf(": Leak summary: %"PRId64" byte%s, %" - PRId64" object%s, %zu context%s\n", - cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "", - cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "", - leak_nctx, (leak_nctx != 1) ? "s" : ""); - malloc_printf( - ": Run pprof on \"%s\" for leak detail\n", - filename); - } -} - -static bool -prof_dump(bool propagate_err, const char *filename, bool leakcheck) -{ - prof_tdata_t *prof_tdata; - prof_cnt_t cnt_all; - size_t tabind; - union { - prof_ctx_t *p; - void *v; - } ctx; - size_t leak_nctx; - prof_ctx_list_t ctx_ql; - - cassert(config_prof); - - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (true); - - malloc_mutex_lock(&prof_dump_mtx); - - /* Merge per thread profile stats, and sum them in cnt_all. */ - memset(&cnt_all, 0, sizeof(prof_cnt_t)); - leak_nctx = 0; - ql_new(&ctx_ql); - prof_enter(prof_tdata); - for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;) - prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql); - prof_leave(prof_tdata); - - /* Create dump file. */ - if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) - goto label_open_close_error; - - /* Dump profile header. */ - if (prof_dump_header(propagate_err, &cnt_all)) - goto label_write_error; - - /* Dump per ctx profile stats. */ - while ((ctx.p = ql_first(&ctx_ql)) != NULL) { - if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql)) - goto label_write_error; - } - - /* Dump /proc//maps if possible. */ - if (prof_dump_maps(propagate_err)) - goto label_write_error; - - if (prof_dump_close(propagate_err)) - goto label_open_close_error; - - malloc_mutex_unlock(&prof_dump_mtx); - - if (leakcheck) - prof_leakcheck(&cnt_all, leak_nctx, filename); - - return (false); -label_write_error: - prof_dump_close(propagate_err); -label_open_close_error: - while ((ctx.p = ql_first(&ctx_ql)) != NULL) - prof_dump_ctx_cleanup(ctx.p, &ctx_ql); - malloc_mutex_unlock(&prof_dump_mtx); - return (true); -} - -#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) -#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) -static void -prof_dump_filename(char *filename, char v, int64_t vseq) -{ - - cassert(config_prof); - - if (vseq != VSEQ_INVALID) { - /* "...v.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"PRIu64".%c%"PRId64".heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); - } else { - /* "....heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"PRIu64".%c.heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v); - } - prof_dump_seq++; -} - -static void -prof_fdump(void) -{ - char filename[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); - - if (prof_booted == false) - return; - - if (opt_prof_final && opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, opt_prof_leak); - } -} - -void -prof_idump(void) -{ - prof_tdata_t *prof_tdata; - char filename[PATH_MAX + 1]; - - cassert(config_prof); - - if (prof_booted == false) - return; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return; - if (prof_tdata->enq) { - prof_tdata->enq_idump = true; - return; - } - - if (opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename, 'i', prof_dump_iseq); - prof_dump_iseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, false); - } -} - -bool -prof_mdump(const char *filename) -{ - char filename_buf[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); - - if (opt_prof == false || prof_booted == false) - return (true); - - if (filename == NULL) { - /* No filename specified, so automatically generate one. */ - if (opt_prof_prefix[0] == '\0') - return (true); - malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename_buf, 'm', prof_dump_mseq); - prof_dump_mseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); - filename = filename_buf; - } - return (prof_dump(true, filename, false)); -} - -void -prof_gdump(void) -{ - prof_tdata_t *prof_tdata; - char filename[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); - - if (prof_booted == false) - return; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return; - if (prof_tdata->enq) { - prof_tdata->enq_gdump = true; - return; - } - - if (opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename, 'u', prof_dump_useq); - prof_dump_useq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, false); - } -} - -static void -prof_bt_hash(const void *key, size_t r_hash[2]) -{ - prof_bt_t *bt = (prof_bt_t *)key; - - cassert(config_prof); - - hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); -} - -static bool -prof_bt_keycomp(const void *k1, const void *k2) -{ - const prof_bt_t *bt1 = (prof_bt_t *)k1; - const prof_bt_t *bt2 = (prof_bt_t *)k2; - - cassert(config_prof); - - if (bt1->len != bt2->len) - return (false); - return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); -} - -prof_tdata_t * -prof_tdata_init(void) -{ - prof_tdata_t *prof_tdata; - - cassert(config_prof); - - /* Initialize an empty cache for this thread. */ - prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t)); - if (prof_tdata == NULL) - return (NULL); - - if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS, - prof_bt_hash, prof_bt_keycomp)) { - idalloc(prof_tdata); - return (NULL); - } - ql_new(&prof_tdata->lru_ql); - - prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX); - if (prof_tdata->vec == NULL) { - ckh_delete(&prof_tdata->bt2cnt); - idalloc(prof_tdata); - return (NULL); - } - - prof_tdata->prng_state = 0; - prof_tdata->threshold = 0; - prof_tdata->accum = 0; - - prof_tdata->enq = false; - prof_tdata->enq_idump = false; - prof_tdata->enq_gdump = false; - - prof_tdata_tsd_set(&prof_tdata); - - return (prof_tdata); -} - -void -prof_tdata_cleanup(void *arg) -{ - prof_thr_cnt_t *cnt; - prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg; - - cassert(config_prof); - - if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) { - /* - * Another destructor deallocated memory after this destructor - * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY - * in order to receive another callback. - */ - prof_tdata = PROF_TDATA_STATE_PURGATORY; - prof_tdata_tsd_set(&prof_tdata); - } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to PROF_TDATA_STATE_PURGATORY so that other destructors - * wouldn't cause re-creation of the prof_tdata. This time, do - * nothing, so that the destructor will not be called again. - */ - } else if (prof_tdata != NULL) { - /* - * Delete the hash table. All of its contents can still be - * iterated over via the LRU. - */ - ckh_delete(&prof_tdata->bt2cnt); - /* - * Iteratively merge cnt's into the global stats and delete - * them. - */ - while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { - ql_remove(&prof_tdata->lru_ql, cnt, lru_link); - prof_ctx_merge(cnt->ctx, cnt); - idalloc(cnt); - } - idalloc(prof_tdata->vec); - idalloc(prof_tdata); - prof_tdata = PROF_TDATA_STATE_PURGATORY; - prof_tdata_tsd_set(&prof_tdata); - } -} - -void -prof_boot0(void) -{ - - cassert(config_prof); - - memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, - sizeof(PROF_PREFIX_DEFAULT)); -} - -void -prof_boot1(void) -{ - - cassert(config_prof); - - /* - * opt_prof and prof_promote must be in their final state before any - * arenas are initialized, so this function must be executed early. - */ - - if (opt_prof_leak && opt_prof == false) { - /* - * Enable opt_prof, but in such a way that profiles are never - * automatically dumped. - */ - opt_prof = true; - opt_prof_gdump = false; - } else if (opt_prof) { - if (opt_lg_prof_interval >= 0) { - prof_interval = (((uint64_t)1U) << - opt_lg_prof_interval); - } - } - - prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE); -} - -bool -prof_boot2(void) -{ - - cassert(config_prof); - - if (opt_prof) { - unsigned i; - - if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) - return (true); - if (malloc_mutex_init(&bt2ctx_mtx)) - return (true); - if (prof_tdata_tsd_boot()) { - malloc_write( - ": Error in pthread_key_create()\n"); - abort(); - } - - if (malloc_mutex_init(&prof_dump_seq_mtx)) - return (true); - if (malloc_mutex_init(&prof_dump_mtx)) - return (true); - - if (atexit(prof_fdump) != 0) { - malloc_write(": Error in atexit()\n"); - if (opt_abort) - abort(); - } - - ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * - sizeof(malloc_mutex_t)); - if (ctx_locks == NULL) - return (true); - for (i = 0; i < PROF_NCTX_LOCKS; i++) { - if (malloc_mutex_init(&ctx_locks[i])) - return (true); - } - } - -#ifdef JEMALLOC_PROF_LIBGCC - /* - * Cause the backtracing machinery to allocate its internal state - * before enabling profiling. - */ - _Unwind_Backtrace(prof_unwind_init_callback, NULL); -#endif - - prof_booted = true; - - return (false); -} - -void -prof_prefork(void) -{ - - if (opt_prof) { - unsigned i; - - malloc_mutex_prefork(&bt2ctx_mtx); - malloc_mutex_prefork(&prof_dump_seq_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_prefork(&ctx_locks[i]); - } -} - -void -prof_postfork_parent(void) -{ - - if (opt_prof) { - unsigned i; - - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_parent(&ctx_locks[i]); - malloc_mutex_postfork_parent(&prof_dump_seq_mtx); - malloc_mutex_postfork_parent(&bt2ctx_mtx); - } -} - -void -prof_postfork_child(void) -{ - - if (opt_prof) { - unsigned i; - - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_child(&ctx_locks[i]); - malloc_mutex_postfork_child(&prof_dump_seq_mtx); - malloc_mutex_postfork_child(&bt2ctx_mtx); - } -} - -/******************************************************************************/ diff --git a/lib/jemalloc-3.6.0/src/quarantine.c b/lib/jemalloc-3.6.0/src/quarantine.c deleted file mode 100644 index 5431511..0000000 --- a/lib/jemalloc-3.6.0/src/quarantine.c +++ /dev/null @@ -1,199 +0,0 @@ -#define JEMALLOC_QUARANTINE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/* - * quarantine pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) -#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) -#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY - -/******************************************************************************/ -/* Data. */ - -malloc_tsd_data(, quarantine, quarantine_t *, NULL) - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static quarantine_t *quarantine_grow(quarantine_t *quarantine); -static void quarantine_drain_one(quarantine_t *quarantine); -static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound); - -/******************************************************************************/ - -quarantine_t * -quarantine_init(size_t lg_maxobjs) -{ - quarantine_t *quarantine; - - quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) + - ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t))); - if (quarantine == NULL) - return (NULL); - quarantine->curbytes = 0; - quarantine->curobjs = 0; - quarantine->first = 0; - quarantine->lg_maxobjs = lg_maxobjs; - - quarantine_tsd_set(&quarantine); - - return (quarantine); -} - -static quarantine_t * -quarantine_grow(quarantine_t *quarantine) -{ - quarantine_t *ret; - - ret = quarantine_init(quarantine->lg_maxobjs + 1); - if (ret == NULL) { - quarantine_drain_one(quarantine); - return (quarantine); - } - - ret->curbytes = quarantine->curbytes; - ret->curobjs = quarantine->curobjs; - if (quarantine->first + quarantine->curobjs <= (ZU(1) << - quarantine->lg_maxobjs)) { - /* objs ring buffer data are contiguous. */ - memcpy(ret->objs, &quarantine->objs[quarantine->first], - quarantine->curobjs * sizeof(quarantine_obj_t)); - } else { - /* objs ring buffer data wrap around. */ - size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - - quarantine->first; - size_t ncopy_b = quarantine->curobjs - ncopy_a; - - memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a - * sizeof(quarantine_obj_t)); - memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * - sizeof(quarantine_obj_t)); - } - idalloc(quarantine); - - return (ret); -} - -static void -quarantine_drain_one(quarantine_t *quarantine) -{ - quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; - assert(obj->usize == isalloc(obj->ptr, config_prof)); - idalloc(obj->ptr); - quarantine->curbytes -= obj->usize; - quarantine->curobjs--; - quarantine->first = (quarantine->first + 1) & ((ZU(1) << - quarantine->lg_maxobjs) - 1); -} - -static void -quarantine_drain(quarantine_t *quarantine, size_t upper_bound) -{ - - while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) - quarantine_drain_one(quarantine); -} - -void -quarantine(void *ptr) -{ - quarantine_t *quarantine; - size_t usize = isalloc(ptr, config_prof); - - cassert(config_fill); - assert(opt_quarantine); - - quarantine = *quarantine_tsd_get(); - if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) { - if (quarantine == QUARANTINE_STATE_PURGATORY) { - /* - * Make a note that quarantine() was called after - * quarantine_cleanup() was called. - */ - quarantine = QUARANTINE_STATE_REINCARNATED; - quarantine_tsd_set(&quarantine); - } - idalloc(ptr); - return; - } - /* - * Drain one or more objects if the quarantine size limit would be - * exceeded by appending ptr. - */ - if (quarantine->curbytes + usize > opt_quarantine) { - size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - - usize : 0; - quarantine_drain(quarantine, upper_bound); - } - /* Grow the quarantine ring buffer if it's full. */ - if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) - quarantine = quarantine_grow(quarantine); - /* quarantine_grow() must free a slot if it fails to grow. */ - assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); - /* Append ptr if its size doesn't exceed the quarantine size. */ - if (quarantine->curbytes + usize <= opt_quarantine) { - size_t offset = (quarantine->first + quarantine->curobjs) & - ((ZU(1) << quarantine->lg_maxobjs) - 1); - quarantine_obj_t *obj = &quarantine->objs[offset]; - obj->ptr = ptr; - obj->usize = usize; - quarantine->curbytes += usize; - quarantine->curobjs++; - if (config_fill && opt_junk) { - /* - * Only do redzone validation if Valgrind isn't in - * operation. - */ - if ((config_valgrind == false || opt_valgrind == false) - && usize <= SMALL_MAXCLASS) - arena_quarantine_junk_small(ptr, usize); - else - memset(ptr, 0x5a, usize); - } - } else { - assert(quarantine->curbytes == 0); - idalloc(ptr); - } -} - -void -quarantine_cleanup(void *arg) -{ - quarantine_t *quarantine = *(quarantine_t **)arg; - - if (quarantine == QUARANTINE_STATE_REINCARNATED) { - /* - * Another destructor deallocated memory after this destructor - * was called. Reset quarantine to QUARANTINE_STATE_PURGATORY - * in order to receive another callback. - */ - quarantine = QUARANTINE_STATE_PURGATORY; - quarantine_tsd_set(&quarantine); - } else if (quarantine == QUARANTINE_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to QUARANTINE_STATE_PURGATORY so that other destructors - * wouldn't cause re-creation of the quarantine. This time, do - * nothing, so that the destructor will not be called again. - */ - } else if (quarantine != NULL) { - quarantine_drain(quarantine, 0); - idalloc(quarantine); - quarantine = QUARANTINE_STATE_PURGATORY; - quarantine_tsd_set(&quarantine); - } -} - -bool -quarantine_boot(void) -{ - - cassert(config_fill); - - if (quarantine_tsd_boot()) - return (true); - - return (false); -} diff --git a/lib/jemalloc-3.6.0/src/rtree.c b/lib/jemalloc-3.6.0/src/rtree.c deleted file mode 100644 index 205957a..0000000 --- a/lib/jemalloc-3.6.0/src/rtree.c +++ /dev/null @@ -1,105 +0,0 @@ -#define JEMALLOC_RTREE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -rtree_t * -rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc) -{ - rtree_t *ret; - unsigned bits_per_level, bits_in_leaf, height, i; - - assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); - - bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1; - bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1; - if (bits > bits_in_leaf) { - height = 1 + (bits - bits_in_leaf) / bits_per_level; - if ((height-1) * bits_per_level + bits_in_leaf != bits) - height++; - } else { - height = 1; - } - assert((height-1) * bits_per_level + bits_in_leaf >= bits); - - ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) + - (sizeof(unsigned) * height)); - if (ret == NULL) - return (NULL); - memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) * - height)); - - ret->alloc = alloc; - ret->dalloc = dalloc; - if (malloc_mutex_init(&ret->mutex)) { - if (dalloc != NULL) - dalloc(ret); - return (NULL); - } - ret->height = height; - if (height > 1) { - if ((height-1) * bits_per_level + bits_in_leaf > bits) { - ret->level2bits[0] = (bits - bits_in_leaf) % - bits_per_level; - } else - ret->level2bits[0] = bits_per_level; - for (i = 1; i < height-1; i++) - ret->level2bits[i] = bits_per_level; - ret->level2bits[height-1] = bits_in_leaf; - } else - ret->level2bits[0] = bits; - - ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]); - if (ret->root == NULL) { - if (dalloc != NULL) - dalloc(ret); - return (NULL); - } - memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]); - - return (ret); -} - -static void -rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level) -{ - - if (level < rtree->height - 1) { - size_t nchildren, i; - - nchildren = ZU(1) << rtree->level2bits[level]; - for (i = 0; i < nchildren; i++) { - void **child = (void **)node[i]; - if (child != NULL) - rtree_delete_subtree(rtree, child, level + 1); - } - } - rtree->dalloc(node); -} - -void -rtree_delete(rtree_t *rtree) -{ - - rtree_delete_subtree(rtree, rtree->root, 0); - rtree->dalloc(rtree); -} - -void -rtree_prefork(rtree_t *rtree) -{ - - malloc_mutex_prefork(&rtree->mutex); -} - -void -rtree_postfork_parent(rtree_t *rtree) -{ - - malloc_mutex_postfork_parent(&rtree->mutex); -} - -void -rtree_postfork_child(rtree_t *rtree) -{ - - malloc_mutex_postfork_child(&rtree->mutex); -} diff --git a/lib/jemalloc-3.6.0/src/stats.c b/lib/jemalloc-3.6.0/src/stats.c deleted file mode 100644 index bef2ab3..0000000 --- a/lib/jemalloc-3.6.0/src/stats.c +++ /dev/null @@ -1,549 +0,0 @@ -#define JEMALLOC_STATS_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -#define CTL_GET(n, v, t) do { \ - size_t sz = sizeof(t); \ - xmallctl(n, v, &sz, NULL, 0); \ -} while (0) - -#define CTL_I_GET(n, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = i; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ -} while (0) - -#define CTL_J_GET(n, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = j; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ -} while (0) - -#define CTL_IJ_GET(n, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = i; \ - mib[4] = j; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ -} while (0) - -/******************************************************************************/ -/* Data. */ - -bool opt_stats_print = false; - -size_t stats_cactive = 0; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void stats_arena_bins_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i, bool bins, bool large); - -/******************************************************************************/ - -static void -stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ - size_t page; - bool config_tcache; - unsigned nbins, j, gap_start; - - CTL_GET("arenas.page", &page, size_t); - - CTL_GET("config.tcache", &config_tcache, bool); - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "bins: bin size regs pgs allocated nmalloc" - " ndalloc nrequests nfills nflushes" - " newruns reruns curruns\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "bins: bin size regs pgs allocated nmalloc" - " ndalloc newruns reruns curruns\n"); - } - CTL_GET("arenas.nbins", &nbins, unsigned); - for (j = 0, gap_start = UINT_MAX; j < nbins; j++) { - uint64_t nruns; - - CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t); - if (nruns == 0) { - if (gap_start == UINT_MAX) - gap_start = j; - } else { - size_t reg_size, run_size, allocated; - uint32_t nregs; - uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t reruns; - size_t curruns; - - if (gap_start != UINT_MAX) { - if (j > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_cprintf(write_cb, cbopaque, - "[%u..%u]\n", gap_start, - j - 1); - } else { - /* Gap of one size class. */ - malloc_cprintf(write_cb, cbopaque, - "[%u]\n", gap_start); - } - gap_start = UINT_MAX; - } - CTL_J_GET("arenas.bin.0.size", ®_size, size_t); - CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t); - CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t); - CTL_IJ_GET("stats.arenas.0.bins.0.allocated", - &allocated, size_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc", - &nmalloc, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc", - &ndalloc, uint64_t); - if (config_tcache) { - CTL_IJ_GET("stats.arenas.0.bins.0.nrequests", - &nrequests, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nfills", - &nfills, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nflushes", - &nflushes, uint64_t); - } - CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns, - uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns, - size_t); - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "%13u %5zu %4u %3zu %12zu %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - j, reg_size, nregs, run_size / page, - allocated, nmalloc, ndalloc, nrequests, - nfills, nflushes, nruns, reruns, curruns); - } else { - malloc_cprintf(write_cb, cbopaque, - "%13u %5zu %4u %3zu %12zu %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - j, reg_size, nregs, run_size / page, - allocated, nmalloc, ndalloc, nruns, reruns, - curruns); - } - } - } - if (gap_start != UINT_MAX) { - if (j > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n", - gap_start, j - 1); - } else { - /* Gap of one size class. */ - malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start); - } - } -} - -static void -stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ - size_t page, nlruns, j; - ssize_t gap_start; - - CTL_GET("arenas.page", &page, size_t); - - malloc_cprintf(write_cb, cbopaque, - "large: size pages nmalloc ndalloc nrequests" - " curruns\n"); - CTL_GET("arenas.nlruns", &nlruns, size_t); - for (j = 0, gap_start = -1; j < nlruns; j++) { - uint64_t nmalloc, ndalloc, nrequests; - size_t run_size, curruns; - - CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc, - uint64_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc, - uint64_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests, - uint64_t); - if (nrequests == 0) { - if (gap_start == -1) - gap_start = j; - } else { - CTL_J_GET("arenas.lrun.0.size", &run_size, size_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns, - size_t); - if (gap_start != -1) { - malloc_cprintf(write_cb, cbopaque, "[%zu]\n", - j - gap_start); - gap_start = -1; - } - malloc_cprintf(write_cb, cbopaque, - "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - run_size, run_size / page, nmalloc, ndalloc, - nrequests, curruns); - } - } - if (gap_start != -1) - malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start); -} - -static void -stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i, bool bins, bool large) -{ - unsigned nthreads; - const char *dss; - size_t page, pactive, pdirty, mapped; - uint64_t npurge, nmadvise, purged; - size_t small_allocated; - uint64_t small_nmalloc, small_ndalloc, small_nrequests; - size_t large_allocated; - uint64_t large_nmalloc, large_ndalloc, large_nrequests; - - CTL_GET("arenas.page", &page, size_t); - - CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned); - malloc_cprintf(write_cb, cbopaque, - "assigned threads: %u\n", nthreads); - CTL_I_GET("stats.arenas.0.dss", &dss, const char *); - malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", - dss); - CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t); - CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t); - CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t); - CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t); - CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s," - " %"PRIu64" madvise%s, %"PRIu64" purged\n", - pactive, pdirty, npurge, npurge == 1 ? "" : "s", - nmadvise, nmadvise == 1 ? "" : "s", purged); - - malloc_cprintf(write_cb, cbopaque, - " allocated nmalloc ndalloc nrequests\n"); - CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t); - CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t); - CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t); - CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - small_allocated, small_nmalloc, small_ndalloc, small_nrequests); - CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t); - CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t); - CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t); - CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - large_allocated, large_nmalloc, large_ndalloc, large_nrequests); - malloc_cprintf(write_cb, cbopaque, - "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - small_allocated + large_allocated, - small_nmalloc + large_nmalloc, - small_ndalloc + large_ndalloc, - small_nrequests + large_nrequests); - malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page); - CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped); - - if (bins) - stats_arena_bins_print(write_cb, cbopaque, i); - if (large) - stats_arena_lruns_print(write_cb, cbopaque, i); -} - -void -stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - int err; - uint64_t epoch; - size_t u64sz; - bool general = true; - bool merged = true; - bool unmerged = true; - bool bins = true; - bool large = true; - - /* - * Refresh stats, in case mallctl() was called by the application. - * - * Check for OOM here, since refreshing the ctl cache can trigger - * allocation. In practice, none of the subsequent mallctl()-related - * calls in this function will cause OOM if this one succeeds. - * */ - epoch = 1; - u64sz = sizeof(uint64_t); - err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); - if (err != 0) { - if (err == EAGAIN) { - malloc_write(": Memory allocation failure in " - "mallctl(\"epoch\", ...)\n"); - return; - } - malloc_write(": Failure in mallctl(\"epoch\", " - "...)\n"); - abort(); - } - - if (opts != NULL) { - unsigned i; - - for (i = 0; opts[i] != '\0'; i++) { - switch (opts[i]) { - case 'g': - general = false; - break; - case 'm': - merged = false; - break; - case 'a': - unmerged = false; - break; - case 'b': - bins = false; - break; - case 'l': - large = false; - break; - default:; - } - } - } - - malloc_cprintf(write_cb, cbopaque, - "___ Begin jemalloc statistics ___\n"); - if (general) { - int err; - const char *cpv; - bool bv; - unsigned uv; - ssize_t ssv; - size_t sv, bsz, ssz, sssz, cpsz; - - bsz = sizeof(bool); - ssz = sizeof(size_t); - sssz = sizeof(ssize_t); - cpsz = sizeof(const char *); - - CTL_GET("version", &cpv, const char *); - malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); - CTL_GET("config.debug", &bv, bool); - malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", - bv ? "enabled" : "disabled"); - -#define OPT_WRITE_BOOL(n) \ - if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \ - == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %s\n", bv ? "true" : "false"); \ - } -#define OPT_WRITE_SIZE_T(n) \ - if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \ - == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zu\n", sv); \ - } -#define OPT_WRITE_SSIZE_T(n) \ - if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \ - == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zd\n", ssv); \ - } -#define OPT_WRITE_CHAR_P(n) \ - if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \ - == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": \"%s\"\n", cpv); \ - } - - malloc_cprintf(write_cb, cbopaque, - "Run-time option settings:\n"); - OPT_WRITE_BOOL(abort) - OPT_WRITE_SIZE_T(lg_chunk) - OPT_WRITE_CHAR_P(dss) - OPT_WRITE_SIZE_T(narenas) - OPT_WRITE_SSIZE_T(lg_dirty_mult) - OPT_WRITE_BOOL(stats_print) - OPT_WRITE_BOOL(junk) - OPT_WRITE_SIZE_T(quarantine) - OPT_WRITE_BOOL(redzone) - OPT_WRITE_BOOL(zero) - OPT_WRITE_BOOL(utrace) - OPT_WRITE_BOOL(valgrind) - OPT_WRITE_BOOL(xmalloc) - OPT_WRITE_BOOL(tcache) - OPT_WRITE_SSIZE_T(lg_tcache_max) - OPT_WRITE_BOOL(prof) - OPT_WRITE_CHAR_P(prof_prefix) - OPT_WRITE_BOOL(prof_active) - OPT_WRITE_SSIZE_T(lg_prof_sample) - OPT_WRITE_BOOL(prof_accum) - OPT_WRITE_SSIZE_T(lg_prof_interval) - OPT_WRITE_BOOL(prof_gdump) - OPT_WRITE_BOOL(prof_final) - OPT_WRITE_BOOL(prof_leak) - -#undef OPT_WRITE_BOOL -#undef OPT_WRITE_SIZE_T -#undef OPT_WRITE_SSIZE_T -#undef OPT_WRITE_CHAR_P - - malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); - - CTL_GET("arenas.narenas", &uv, unsigned); - malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); - - malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", - sizeof(void *)); - - CTL_GET("arenas.quantum", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); - - CTL_GET("arenas.page", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); - - CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: %u:1\n", - (1U << ssv)); - } else { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: N/A\n"); - } - if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0)) - == 0) { - malloc_cprintf(write_cb, cbopaque, - "Maximum thread-cached size class: %zu\n", sv); - } - if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 && - bv) { - CTL_GET("opt.lg_prof_sample", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "Average profile sample interval: %"PRIu64 - " (2^%zu)\n", (((uint64_t)1U) << sv), sv); - - CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: %"PRIu64 - " (2^%zd)\n", - (((uint64_t)1U) << ssv), ssv); - } else { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: N/A\n"); - } - } - CTL_GET("opt.lg_chunk", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n", - (ZU(1) << sv), sv); - } - - if (config_stats) { - size_t *cactive; - size_t allocated, active, mapped; - size_t chunks_current, chunks_high; - uint64_t chunks_total; - size_t huge_allocated; - uint64_t huge_nmalloc, huge_ndalloc; - - CTL_GET("stats.cactive", &cactive, size_t *); - CTL_GET("stats.allocated", &allocated, size_t); - CTL_GET("stats.active", &active, size_t); - CTL_GET("stats.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "Allocated: %zu, active: %zu, mapped: %zu\n", - allocated, active, mapped); - malloc_cprintf(write_cb, cbopaque, - "Current active ceiling: %zu\n", atomic_read_z(cactive)); - - /* Print chunk stats. */ - CTL_GET("stats.chunks.total", &chunks_total, uint64_t); - CTL_GET("stats.chunks.high", &chunks_high, size_t); - CTL_GET("stats.chunks.current", &chunks_current, size_t); - malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " - "highchunks curchunks\n"); - malloc_cprintf(write_cb, cbopaque, - " %13"PRIu64" %12zu %12zu\n", - chunks_total, chunks_high, chunks_current); - - /* Print huge stats. */ - CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); - CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t); - CTL_GET("stats.huge.allocated", &huge_allocated, size_t); - malloc_cprintf(write_cb, cbopaque, - "huge: nmalloc ndalloc allocated\n"); - malloc_cprintf(write_cb, cbopaque, - " %12"PRIu64" %12"PRIu64" %12zu\n", - huge_nmalloc, huge_ndalloc, huge_allocated); - - if (merged) { - unsigned narenas; - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i, ninitialized; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - for (i = ninitialized = 0; i < narenas; i++) { - if (initialized[i]) - ninitialized++; - } - - if (ninitialized > 1 || unmerged == false) { - /* Print merged arena stats. */ - malloc_cprintf(write_cb, cbopaque, - "\nMerged arenas stats:\n"); - stats_arena_print(write_cb, cbopaque, - narenas, bins, large); - } - } - } - - if (unmerged) { - unsigned narenas; - - /* Print stats for each arena. */ - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - - for (i = 0; i < narenas; i++) { - if (initialized[i]) { - malloc_cprintf(write_cb, - cbopaque, - "\narenas[%u]:\n", i); - stats_arena_print(write_cb, - cbopaque, i, bins, large); - } - } - } - } - } - malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); -} diff --git a/lib/jemalloc-3.6.0/src/tcache.c b/lib/jemalloc-3.6.0/src/tcache.c deleted file mode 100644 index 6de9296..0000000 --- a/lib/jemalloc-3.6.0/src/tcache.c +++ /dev/null @@ -1,479 +0,0 @@ -#define JEMALLOC_TCACHE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -malloc_tsd_data(, tcache, tcache_t *, NULL) -malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default) - -bool opt_tcache = true; -ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; - -tcache_bin_info_t *tcache_bin_info; -static unsigned stack_nelms; /* Total stack elms per tcache. */ - -size_t nhbins; -size_t tcache_maxclass; - -/******************************************************************************/ - -size_t tcache_salloc(const void *ptr) -{ - - return (arena_salloc(ptr, false)); -} - -void -tcache_event_hard(tcache_t *tcache) -{ - size_t binind = tcache->next_gc_bin; - tcache_bin_t *tbin = &tcache->tbins[binind]; - tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; - - if (tbin->low_water > 0) { - /* - * Flush (ceiling) 3/4 of the objects below the low water mark. - */ - if (binind < NBINS) { - tcache_bin_flush_small(tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); - } else { - tcache_bin_flush_large(tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); - } - /* - * Reduce fill count by 2X. Limit lg_fill_div such that the - * fill count is always at least 1. - */ - if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) - tbin->lg_fill_div++; - } else if (tbin->low_water < 0) { - /* - * Increase fill count by 2X. Make sure lg_fill_div stays - * greater than 0. - */ - if (tbin->lg_fill_div > 1) - tbin->lg_fill_div--; - } - tbin->low_water = tbin->ncached; - - tcache->next_gc_bin++; - if (tcache->next_gc_bin == nhbins) - tcache->next_gc_bin = 0; - tcache->ev_cnt = 0; -} - -void * -tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) -{ - void *ret; - - arena_tcache_fill_small(tcache->arena, tbin, binind, - config_prof ? tcache->prof_accumbytes : 0); - if (config_prof) - tcache->prof_accumbytes = 0; - ret = tcache_alloc_easy(tbin); - - return (ret); -} - -void -tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache) -{ - void *ptr; - unsigned i, nflush, ndeferred; - bool merged_stats = false; - - assert(binind < NBINS); - assert(rem <= tbin->ncached); - - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { - /* Lock the arena bin associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *arena = chunk->arena; - arena_bin_t *bin = &arena->bins[binind]; - - if (config_prof && arena == tcache->arena) { - if (arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); - tcache->prof_accumbytes = 0; - } - - malloc_mutex_lock(&bin->lock); - if (config_stats && arena == tcache->arena) { - assert(merged_stats == false); - merged_stats = true; - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->arena == arena) { - size_t pageind = ((uintptr_t)ptr - - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = - arena_mapp_get(chunk, pageind); - if (config_fill && opt_junk) { - arena_alloc_junk_small(ptr, - &arena_bin_info[binind], true); - } - arena_dalloc_bin_locked(arena, chunk, ptr, - mapelm); - } else { - /* - * This object was allocated via a different - * arena bin than the one that is currently - * locked. Stash the object, so that it can be - * handled in a future pass. - */ - tbin->avail[ndeferred] = ptr; - ndeferred++; - } - } - malloc_mutex_unlock(&bin->lock); - } - if (config_stats && merged_stats == false) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ - arena_bin_t *bin = &tcache->arena->bins[binind]; - malloc_mutex_lock(&bin->lock); - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&bin->lock); - } - - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); - tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; -} - -void -tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache) -{ - void *ptr; - unsigned i, nflush, ndeferred; - bool merged_stats = false; - - assert(binind < nhbins); - assert(rem <= tbin->ncached); - - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { - /* Lock the arena associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *arena = chunk->arena; - UNUSED bool idump; - - if (config_prof) - idump = false; - malloc_mutex_lock(&arena->lock); - if ((config_prof || config_stats) && arena == tcache->arena) { - if (config_prof) { - idump = arena_prof_accum_locked(arena, - tcache->prof_accumbytes); - tcache->prof_accumbytes = 0; - } - if (config_stats) { - merged_stats = true; - arena->stats.nrequests_large += - tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->arena == arena) - arena_dalloc_large_locked(arena, chunk, ptr); - else { - /* - * This object was allocated via a different - * arena than the one that is currently locked. - * Stash the object, so that it can be handled - * in a future pass. - */ - tbin->avail[ndeferred] = ptr; - ndeferred++; - } - } - malloc_mutex_unlock(&arena->lock); - if (config_prof && idump) - prof_idump(); - } - if (config_stats && merged_stats == false) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ - arena_t *arena = tcache->arena; - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&arena->lock); - } - - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); - tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; -} - -void -tcache_arena_associate(tcache_t *tcache, arena_t *arena) -{ - - if (config_stats) { - /* Link into list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); - ql_elm_new(tcache, link); - ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_mutex_unlock(&arena->lock); - } - tcache->arena = arena; -} - -void -tcache_arena_dissociate(tcache_t *tcache) -{ - - if (config_stats) { - /* Unlink from list of extant tcaches. */ - malloc_mutex_lock(&tcache->arena->lock); - ql_remove(&tcache->arena->tcache_ql, tcache, link); - tcache_stats_merge(tcache, tcache->arena); - malloc_mutex_unlock(&tcache->arena->lock); - } -} - -tcache_t * -tcache_create(arena_t *arena) -{ - tcache_t *tcache; - size_t size, stack_offset; - unsigned i; - - size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); - /* Naturally align the pointer stacks. */ - size = PTR_CEILING(size); - stack_offset = size; - size += stack_nelms * sizeof(void *); - /* - * Round up to the nearest multiple of the cacheline size, in order to - * avoid the possibility of false cacheline sharing. - * - * That this works relies on the same logic as in ipalloc(), but we - * cannot directly call ipalloc() here due to tcache bootstrapping - * issues. - */ - size = (size + CACHELINE_MASK) & (-CACHELINE); - - if (size <= SMALL_MAXCLASS) - tcache = (tcache_t *)arena_malloc_small(arena, size, true); - else if (size <= tcache_maxclass) - tcache = (tcache_t *)arena_malloc_large(arena, size, true); - else - tcache = (tcache_t *)icalloct(size, false, arena); - - if (tcache == NULL) - return (NULL); - - tcache_arena_associate(tcache, arena); - - assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); - for (i = 0; i < nhbins; i++) { - tcache->tbins[i].lg_fill_div = 1; - tcache->tbins[i].avail = (void **)((uintptr_t)tcache + - (uintptr_t)stack_offset); - stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); - } - - tcache_tsd_set(&tcache); - - return (tcache); -} - -void -tcache_destroy(tcache_t *tcache) -{ - unsigned i; - size_t tcache_size; - - tcache_arena_dissociate(tcache); - - for (i = 0; i < NBINS; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_small(tbin, i, 0, tcache); - - if (config_stats && tbin->tstats.nrequests != 0) { - arena_t *arena = tcache->arena; - arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(&bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); - } - } - - for (; i < nhbins; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_large(tbin, i, 0, tcache); - - if (config_stats && tbin->tstats.nrequests != 0) { - arena_t *arena = tcache->arena; - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[i - NBINS].nrequests += - tbin->tstats.nrequests; - malloc_mutex_unlock(&arena->lock); - } - } - - if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(tcache->arena, tcache->prof_accumbytes)) - prof_idump(); - - tcache_size = arena_salloc(tcache, false); - if (tcache_size <= SMALL_MAXCLASS) { - arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); - arena_t *arena = chunk->arena; - size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >> - LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - - arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm); - } else if (tcache_size <= tcache_maxclass) { - arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); - arena_t *arena = chunk->arena; - - arena_dalloc_large(arena, chunk, tcache); - } else - idalloct(tcache, false); -} - -void -tcache_thread_cleanup(void *arg) -{ - tcache_t *tcache = *(tcache_t **)arg; - - if (tcache == TCACHE_STATE_DISABLED) { - /* Do nothing. */ - } else if (tcache == TCACHE_STATE_REINCARNATED) { - /* - * Another destructor called an allocator function after this - * destructor was called. Reset tcache to - * TCACHE_STATE_PURGATORY in order to receive another callback. - */ - tcache = TCACHE_STATE_PURGATORY; - tcache_tsd_set(&tcache); - } else if (tcache == TCACHE_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to TCACHE_STATE_PURGATORY so that other destructors wouldn't - * cause re-creation of the tcache. This time, do nothing, so - * that the destructor will not be called again. - */ - } else if (tcache != NULL) { - assert(tcache != TCACHE_STATE_PURGATORY); - tcache_destroy(tcache); - tcache = TCACHE_STATE_PURGATORY; - tcache_tsd_set(&tcache); - } -} - -/* Caller must own arena->lock. */ -void -tcache_stats_merge(tcache_t *tcache, arena_t *arena) -{ - unsigned i; - - cassert(config_stats); - - /* Merge and reset tcache stats. */ - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - tcache_bin_t *tbin = &tcache->tbins[i]; - malloc_mutex_lock(&bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); - tbin->tstats.nrequests = 0; - } - - for (; i < nhbins; i++) { - malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; - tcache_bin_t *tbin = &tcache->tbins[i]; - arena->stats.nrequests_large += tbin->tstats.nrequests; - lstats->nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } -} - -bool -tcache_boot0(void) -{ - unsigned i; - - /* - * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is - * known. - */ - if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) - tcache_maxclass = SMALL_MAXCLASS; - else if ((1U << opt_lg_tcache_max) > arena_maxclass) - tcache_maxclass = arena_maxclass; - else - tcache_maxclass = (1U << opt_lg_tcache_max); - - nhbins = NBINS + (tcache_maxclass >> LG_PAGE); - - /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * - sizeof(tcache_bin_info_t)); - if (tcache_bin_info == NULL) - return (true); - stack_nelms = 0; - for (i = 0; i < NBINS; i++) { - if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { - tcache_bin_info[i].ncached_max = - (arena_bin_info[i].nregs << 1); - } else { - tcache_bin_info[i].ncached_max = - TCACHE_NSLOTS_SMALL_MAX; - } - stack_nelms += tcache_bin_info[i].ncached_max; - } - for (; i < nhbins; i++) { - tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; - stack_nelms += tcache_bin_info[i].ncached_max; - } - - return (false); -} - -bool -tcache_boot1(void) -{ - - if (tcache_tsd_boot() || tcache_enabled_tsd_boot()) - return (true); - - return (false); -} diff --git a/lib/jemalloc-3.6.0/src/tsd.c b/lib/jemalloc-3.6.0/src/tsd.c deleted file mode 100644 index 700caab..0000000 --- a/lib/jemalloc-3.6.0/src/tsd.c +++ /dev/null @@ -1,141 +0,0 @@ -#define JEMALLOC_TSD_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -static unsigned ncleanups; -static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; - -/******************************************************************************/ - -void * -malloc_tsd_malloc(size_t size) -{ - - /* Avoid choose_arena() in order to dodge bootstrapping issues. */ - return (arena_malloc(arenas[0], size, false, false)); -} - -void -malloc_tsd_dalloc(void *wrapper) -{ - - idalloct(wrapper, false); -} - -void -malloc_tsd_no_cleanup(void *arg) -{ - - not_reached(); -} - -#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) -#ifndef _WIN32 -JEMALLOC_EXPORT -#endif -void -_malloc_thread_cleanup(void) -{ - bool pending[MALLOC_TSD_CLEANUPS_MAX], again; - unsigned i; - - for (i = 0; i < ncleanups; i++) - pending[i] = true; - - do { - again = false; - for (i = 0; i < ncleanups; i++) { - if (pending[i]) { - pending[i] = cleanups[i](); - if (pending[i]) - again = true; - } - } - } while (again); -} -#endif - -void -malloc_tsd_cleanup_register(bool (*f)(void)) -{ - - assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); - cleanups[ncleanups] = f; - ncleanups++; -} - -void -malloc_tsd_boot(void) -{ - - ncleanups = 0; -} - -#ifdef _WIN32 -static BOOL WINAPI -_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) -{ - - switch (fdwReason) { -#ifdef JEMALLOC_LAZY_LOCK - case DLL_THREAD_ATTACH: - isthreaded = true; - break; -#endif - case DLL_THREAD_DETACH: - _malloc_thread_cleanup(); - break; - default: - break; - } - return (true); -} - -#ifdef _MSC_VER -# ifdef _M_IX86 -# pragma comment(linker, "/INCLUDE:__tls_used") -# else -# pragma comment(linker, "/INCLUDE:_tls_used") -# endif -# pragma section(".CRT$XLY",long,read) -#endif -JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) -static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL, - DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; -#endif - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -void * -tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) -{ - pthread_t self = pthread_self(); - tsd_init_block_t *iter; - - /* Check whether this thread has already inserted into the list. */ - malloc_mutex_lock(&head->lock); - ql_foreach(iter, &head->blocks, link) { - if (iter->thread == self) { - malloc_mutex_unlock(&head->lock); - return (iter->data); - } - } - /* Insert block into list. */ - ql_elm_new(block, link); - block->thread = self; - ql_tail_insert(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); - return (NULL); -} - -void -tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) -{ - - malloc_mutex_lock(&head->lock); - ql_remove(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); -} -#endif diff --git a/lib/jemalloc-3.6.0/src/util.c b/lib/jemalloc-3.6.0/src/util.c deleted file mode 100644 index 93a19fd..0000000 --- a/lib/jemalloc-3.6.0/src/util.c +++ /dev/null @@ -1,648 +0,0 @@ -#define assert(e) do { \ - if (config_debug && !(e)) { \ - malloc_write(": Failed assertion\n"); \ - abort(); \ - } \ -} while (0) - -#define not_reached() do { \ - if (config_debug) { \ - malloc_write(": Unreachable code reached\n"); \ - abort(); \ - } \ -} while (0) - -#define not_implemented() do { \ - if (config_debug) { \ - malloc_write(": Not implemented\n"); \ - abort(); \ - } \ -} while (0) - -#define JEMALLOC_UTIL_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void wrtmessage(void *cbopaque, const char *s); -#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) -static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, - size_t *slen_p); -#define D2S_BUFSIZE (1 + U2S_BUFSIZE) -static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); -#define O2S_BUFSIZE (1 + U2S_BUFSIZE) -static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); -#define X2S_BUFSIZE (2 + U2S_BUFSIZE) -static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, - size_t *slen_p); - -/******************************************************************************/ - -/* malloc_message() setup. */ -static void -wrtmessage(void *cbopaque, const char *s) -{ - -#ifdef SYS_write - /* - * Use syscall(2) rather than write(2) when possible in order to avoid - * the possibility of memory allocation within libc. This is necessary - * on FreeBSD; most operating systems do not have this problem though. - */ - UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); -#else - UNUSED int result = write(STDERR_FILENO, s, strlen(s)); -#endif -} - -JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); - -/* - * Wrapper around malloc_message() that avoids the need for - * je_malloc_message(...) throughout the code. - */ -void -malloc_write(const char *s) -{ - - if (je_malloc_message != NULL) - je_malloc_message(NULL, s); - else - wrtmessage(NULL, s); -} - -/* - * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so - * provide a wrapper. - */ -int -buferror(int err, char *buf, size_t buflen) -{ - -#ifdef _WIN32 - FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0, - (LPSTR)buf, buflen, NULL); - return (0); -#elif defined(_GNU_SOURCE) - char *b = strerror_r(err, buf, buflen); - if (b != buf) { - strncpy(buf, b, buflen); - buf[buflen-1] = '\0'; - } - return (0); -#else - return (strerror_r(err, buf, buflen)); -#endif -} - -uintmax_t -malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) -{ - uintmax_t ret, digit; - int b; - bool neg; - const char *p, *ns; - - p = nptr; - if (base < 0 || base == 1 || base > 36) { - ns = p; - set_errno(EINVAL); - ret = UINTMAX_MAX; - goto label_return; - } - b = base; - - /* Swallow leading whitespace and get sign, if any. */ - neg = false; - while (true) { - switch (*p) { - case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': - p++; - break; - case '-': - neg = true; - /* Fall through. */ - case '+': - p++; - /* Fall through. */ - default: - goto label_prefix; - } - } - - /* Get prefix, if any. */ - label_prefix: - /* - * Note where the first non-whitespace/sign character is so that it is - * possible to tell whether any digits are consumed (e.g., " 0" vs. - * " -x"). - */ - ns = p; - if (*p == '0') { - switch (p[1]) { - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': - if (b == 0) - b = 8; - if (b == 8) - p++; - break; - case 'X': case 'x': - switch (p[2]) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - case 'A': case 'B': case 'C': case 'D': case 'E': - case 'F': - case 'a': case 'b': case 'c': case 'd': case 'e': - case 'f': - if (b == 0) - b = 16; - if (b == 16) - p += 2; - break; - default: - break; - } - break; - default: - p++; - ret = 0; - goto label_return; - } - } - if (b == 0) - b = 10; - - /* Convert. */ - ret = 0; - while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) - || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) - || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { - uintmax_t pret = ret; - ret *= b; - ret += digit; - if (ret < pret) { - /* Overflow. */ - set_errno(ERANGE); - ret = UINTMAX_MAX; - goto label_return; - } - p++; - } - if (neg) - ret = -ret; - - if (p == ns) { - /* No conversion performed. */ - set_errno(EINVAL); - ret = UINTMAX_MAX; - goto label_return; - } - -label_return: - if (endptr != NULL) { - if (p == ns) { - /* No characters were converted. */ - *endptr = (char *)nptr; - } else - *endptr = (char *)p; - } - return (ret); -} - -static char * -u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) -{ - unsigned i; - - i = U2S_BUFSIZE - 1; - s[i] = '\0'; - switch (base) { - case 10: - do { - i--; - s[i] = "0123456789"[x % (uint64_t)10]; - x /= (uint64_t)10; - } while (x > 0); - break; - case 16: { - const char *digits = (uppercase) - ? "0123456789ABCDEF" - : "0123456789abcdef"; - - do { - i--; - s[i] = digits[x & 0xf]; - x >>= 4; - } while (x > 0); - break; - } default: { - const char *digits = (uppercase) - ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - : "0123456789abcdefghijklmnopqrstuvwxyz"; - - assert(base >= 2 && base <= 36); - do { - i--; - s[i] = digits[x % (uint64_t)base]; - x /= (uint64_t)base; - } while (x > 0); - }} - - *slen_p = U2S_BUFSIZE - 1 - i; - return (&s[i]); -} - -static char * -d2s(intmax_t x, char sign, char *s, size_t *slen_p) -{ - bool neg; - - if ((neg = (x < 0))) - x = -x; - s = u2s(x, 10, false, s, slen_p); - if (neg) - sign = '-'; - switch (sign) { - case '-': - if (neg == false) - break; - /* Fall through. */ - case ' ': - case '+': - s--; - (*slen_p)++; - *s = sign; - break; - default: not_reached(); - } - return (s); -} - -static char * -o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) -{ - - s = u2s(x, 8, false, s, slen_p); - if (alt_form && *s != '0') { - s--; - (*slen_p)++; - *s = '0'; - } - return (s); -} - -static char * -x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) -{ - - s = u2s(x, 16, uppercase, s, slen_p); - if (alt_form) { - s -= 2; - (*slen_p) += 2; - memcpy(s, uppercase ? "0X" : "0x", 2); - } - return (s); -} - -int -malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) -{ - int ret; - size_t i; - const char *f; - -#define APPEND_C(c) do { \ - if (i < size) \ - str[i] = (c); \ - i++; \ -} while (0) -#define APPEND_S(s, slen) do { \ - if (i < size) { \ - size_t cpylen = (slen <= size - i) ? slen : size - i; \ - memcpy(&str[i], s, cpylen); \ - } \ - i += slen; \ -} while (0) -#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ - /* Left padding. */ \ - size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ - (size_t)width - slen : 0); \ - if (left_justify == false && pad_len != 0) { \ - size_t j; \ - for (j = 0; j < pad_len; j++) \ - APPEND_C(' '); \ - } \ - /* Value. */ \ - APPEND_S(s, slen); \ - /* Right padding. */ \ - if (left_justify && pad_len != 0) { \ - size_t j; \ - for (j = 0; j < pad_len; j++) \ - APPEND_C(' '); \ - } \ -} while (0) -#define GET_ARG_NUMERIC(val, len) do { \ - switch (len) { \ - case '?': \ - val = va_arg(ap, int); \ - break; \ - case '?' | 0x80: \ - val = va_arg(ap, unsigned int); \ - break; \ - case 'l': \ - val = va_arg(ap, long); \ - break; \ - case 'l' | 0x80: \ - val = va_arg(ap, unsigned long); \ - break; \ - case 'q': \ - val = va_arg(ap, long long); \ - break; \ - case 'q' | 0x80: \ - val = va_arg(ap, unsigned long long); \ - break; \ - case 'j': \ - val = va_arg(ap, intmax_t); \ - break; \ - case 'j' | 0x80: \ - val = va_arg(ap, uintmax_t); \ - break; \ - case 't': \ - val = va_arg(ap, ptrdiff_t); \ - break; \ - case 'z': \ - val = va_arg(ap, ssize_t); \ - break; \ - case 'z' | 0x80: \ - val = va_arg(ap, size_t); \ - break; \ - case 'p': /* Synthetic; used for %p. */ \ - val = va_arg(ap, uintptr_t); \ - break; \ - default: not_reached(); \ - } \ -} while (0) - - i = 0; - f = format; - while (true) { - switch (*f) { - case '\0': goto label_out; - case '%': { - bool alt_form = false; - bool left_justify = false; - bool plus_space = false; - bool plus_plus = false; - int prec = -1; - int width = -1; - unsigned char len = '?'; - - f++; - /* Flags. */ - while (true) { - switch (*f) { - case '#': - assert(alt_form == false); - alt_form = true; - break; - case '-': - assert(left_justify == false); - left_justify = true; - break; - case ' ': - assert(plus_space == false); - plus_space = true; - break; - case '+': - assert(plus_plus == false); - plus_plus = true; - break; - default: goto label_width; - } - f++; - } - /* Width. */ - label_width: - switch (*f) { - case '*': - width = va_arg(ap, int); - f++; - if (width < 0) { - left_justify = true; - width = -width; - } - break; - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': { - uintmax_t uwidth; - set_errno(0); - uwidth = malloc_strtoumax(f, (char **)&f, 10); - assert(uwidth != UINTMAX_MAX || get_errno() != - ERANGE); - width = (int)uwidth; - break; - } default: - break; - } - /* Width/precision separator. */ - if (*f == '.') - f++; - else - goto label_length; - /* Precision. */ - switch (*f) { - case '*': - prec = va_arg(ap, int); - f++; - break; - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': { - uintmax_t uprec; - set_errno(0); - uprec = malloc_strtoumax(f, (char **)&f, 10); - assert(uprec != UINTMAX_MAX || get_errno() != - ERANGE); - prec = (int)uprec; - break; - } - default: break; - } - /* Length. */ - label_length: - switch (*f) { - case 'l': - f++; - if (*f == 'l') { - len = 'q'; - f++; - } else - len = 'l'; - break; - case 'q': case 'j': case 't': case 'z': - len = *f; - f++; - break; - default: break; - } - /* Conversion specifier. */ - switch (*f) { - char *s; - size_t slen; - case '%': - /* %% */ - APPEND_C(*f); - f++; - break; - case 'd': case 'i': { - intmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[D2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len); - s = d2s(val, (plus_plus ? '+' : (plus_space ? - ' ' : '-')), buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'o': { - uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[O2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len | 0x80); - s = o2s(val, alt_form, buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'u': { - uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[U2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len | 0x80); - s = u2s(val, 10, false, buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'x': case 'X': { - uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[X2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len | 0x80); - s = x2s(val, alt_form, *f == 'X', buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'c': { - unsigned char val; - char buf[2]; - - assert(len == '?' || len == 'l'); - assert_not_implemented(len != 'l'); - val = va_arg(ap, int); - buf[0] = val; - buf[1] = '\0'; - APPEND_PADDED_S(buf, 1, width, left_justify); - f++; - break; - } case 's': - assert(len == '?' || len == 'l'); - assert_not_implemented(len != 'l'); - s = va_arg(ap, char *); - slen = (prec < 0) ? strlen(s) : prec; - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - case 'p': { - uintmax_t val; - char buf[X2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, 'p'); - s = x2s(val, true, false, buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } default: not_reached(); - } - break; - } default: { - APPEND_C(*f); - f++; - break; - }} - } - label_out: - if (i < size) - str[i] = '\0'; - else - str[size - 1] = '\0'; - ret = i; - -#undef APPEND_C -#undef APPEND_S -#undef APPEND_PADDED_S -#undef GET_ARG_NUMERIC - return (ret); -} - -JEMALLOC_ATTR(format(printf, 3, 4)) -int -malloc_snprintf(char *str, size_t size, const char *format, ...) -{ - int ret; - va_list ap; - - va_start(ap, format); - ret = malloc_vsnprintf(str, size, format, ap); - va_end(ap); - - return (ret); -} - -void -malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap) -{ - char buf[MALLOC_PRINTF_BUFSIZE]; - - if (write_cb == NULL) { - /* - * The caller did not provide an alternate write_cb callback - * function, so use the default one. malloc_write() is an - * inline function, so use malloc_message() directly here. - */ - write_cb = (je_malloc_message != NULL) ? je_malloc_message : - wrtmessage; - cbopaque = NULL; - } - - malloc_vsnprintf(buf, sizeof(buf), format, ap); - write_cb(cbopaque, buf); -} - -/* - * Print to a callback function in such a way as to (hopefully) avoid memory - * allocation. - */ -JEMALLOC_ATTR(format(printf, 3, 4)) -void -malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - malloc_vcprintf(write_cb, cbopaque, format, ap); - va_end(ap); -} - -/* Print to stderr in such a way as to avoid memory allocation. */ -JEMALLOC_ATTR(format(printf, 1, 2)) -void -malloc_printf(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - malloc_vcprintf(NULL, NULL, format, ap); - va_end(ap); -} diff --git a/lib/jemalloc-3.6.0/src/zone.c b/lib/jemalloc-3.6.0/src/zone.c deleted file mode 100644 index e0302ef..0000000 --- a/lib/jemalloc-3.6.0/src/zone.c +++ /dev/null @@ -1,258 +0,0 @@ -#include "jemalloc/internal/jemalloc_internal.h" -#ifndef JEMALLOC_ZONE -# error "This source file is for zones on Darwin (OS X)." -#endif - -/* - * The malloc_default_purgeable_zone function is only available on >= 10.6. - * We need to check whether it is present at runtime, thus the weak_import. - */ -extern malloc_zone_t *malloc_default_purgeable_zone(void) -JEMALLOC_ATTR(weak_import); - -/******************************************************************************/ -/* Data. */ - -static malloc_zone_t zone; -static struct malloc_introspection_t zone_introspect; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static size_t zone_size(malloc_zone_t *zone, void *ptr); -static void *zone_malloc(malloc_zone_t *zone, size_t size); -static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); -static void *zone_valloc(malloc_zone_t *zone, size_t size); -static void zone_free(malloc_zone_t *zone, void *ptr); -static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); -#if (JEMALLOC_ZONE_VERSION >= 5) -static void *zone_memalign(malloc_zone_t *zone, size_t alignment, -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) - size_t size); -static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, - size_t size); -#endif -static void *zone_destroy(malloc_zone_t *zone); -static size_t zone_good_size(malloc_zone_t *zone, size_t size); -static void zone_force_lock(malloc_zone_t *zone); -static void zone_force_unlock(malloc_zone_t *zone); - -/******************************************************************************/ -/* - * Functions. - */ - -static size_t -zone_size(malloc_zone_t *zone, void *ptr) -{ - - /* - * There appear to be places within Darwin (such as setenv(3)) that - * cause calls to this function with pointers that *no* zone owns. If - * we knew that all pointers were owned by *some* zone, we could split - * our zone into two parts, and use one as the default allocator and - * the other as the default deallocator/reallocator. Since that will - * not work in practice, we must check all pointers to assure that they - * reside within a mapped chunk before determining size. - */ - return (ivsalloc(ptr, config_prof)); -} - -static void * -zone_malloc(malloc_zone_t *zone, size_t size) -{ - - return (je_malloc(size)); -} - -static void * -zone_calloc(malloc_zone_t *zone, size_t num, size_t size) -{ - - return (je_calloc(num, size)); -} - -static void * -zone_valloc(malloc_zone_t *zone, size_t size) -{ - void *ret = NULL; /* Assignment avoids useless compiler warning. */ - - je_posix_memalign(&ret, PAGE, size); - - return (ret); -} - -static void -zone_free(malloc_zone_t *zone, void *ptr) -{ - - if (ivsalloc(ptr, config_prof) != 0) { - je_free(ptr); - return; - } - - free(ptr); -} - -static void * -zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - - if (ivsalloc(ptr, config_prof) != 0) - return (je_realloc(ptr, size)); - - return (realloc(ptr, size)); -} - -#if (JEMALLOC_ZONE_VERSION >= 5) -static void * -zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) -{ - void *ret = NULL; /* Assignment avoids useless compiler warning. */ - - je_posix_memalign(&ret, alignment, size); - - return (ret); -} -#endif - -#if (JEMALLOC_ZONE_VERSION >= 6) -static void -zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) -{ - - if (ivsalloc(ptr, config_prof) != 0) { - assert(ivsalloc(ptr, config_prof) == size); - je_free(ptr); - return; - } - - free(ptr); -} -#endif - -static void * -zone_destroy(malloc_zone_t *zone) -{ - - /* This function should never be called. */ - not_reached(); - return (NULL); -} - -static size_t -zone_good_size(malloc_zone_t *zone, size_t size) -{ - - if (size == 0) - size = 1; - return (s2u(size)); -} - -static void -zone_force_lock(malloc_zone_t *zone) -{ - - if (isthreaded) - jemalloc_prefork(); -} - -static void -zone_force_unlock(malloc_zone_t *zone) -{ - - if (isthreaded) - jemalloc_postfork_parent(); -} - -JEMALLOC_ATTR(constructor) -void -register_zone(void) -{ - - /* - * If something else replaced the system default zone allocator, don't - * register jemalloc's. - */ - malloc_zone_t *default_zone = malloc_default_zone(); - if (!default_zone->zone_name || - strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { - return; - } - - zone.size = (void *)zone_size; - zone.malloc = (void *)zone_malloc; - zone.calloc = (void *)zone_calloc; - zone.valloc = (void *)zone_valloc; - zone.free = (void *)zone_free; - zone.realloc = (void *)zone_realloc; - zone.destroy = (void *)zone_destroy; - zone.zone_name = "jemalloc_zone"; - zone.batch_malloc = NULL; - zone.batch_free = NULL; - zone.introspect = &zone_introspect; - zone.version = JEMALLOC_ZONE_VERSION; -#if (JEMALLOC_ZONE_VERSION >= 5) - zone.memalign = zone_memalign; -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) - zone.free_definite_size = zone_free_definite_size; -#endif -#if (JEMALLOC_ZONE_VERSION >= 8) - zone.pressure_relief = NULL; -#endif - - zone_introspect.enumerator = NULL; - zone_introspect.good_size = (void *)zone_good_size; - zone_introspect.check = NULL; - zone_introspect.print = NULL; - zone_introspect.log = NULL; - zone_introspect.force_lock = (void *)zone_force_lock; - zone_introspect.force_unlock = (void *)zone_force_unlock; - zone_introspect.statistics = NULL; -#if (JEMALLOC_ZONE_VERSION >= 6) - zone_introspect.zone_locked = NULL; -#endif -#if (JEMALLOC_ZONE_VERSION >= 7) - zone_introspect.enable_discharge_checking = NULL; - zone_introspect.disable_discharge_checking = NULL; - zone_introspect.discharge = NULL; -#ifdef __BLOCKS__ - zone_introspect.enumerate_discharged_pointers = NULL; -#else - zone_introspect.enumerate_unavailable_without_blocks = NULL; -#endif -#endif - - /* - * The default purgeable zone is created lazily by OSX's libc. It uses - * the default zone when it is created for "small" allocations - * (< 15 KiB), but assumes the default zone is a scalable_zone. This - * obviously fails when the default zone is the jemalloc zone, so - * malloc_default_purgeable_zone is called beforehand so that the - * default purgeable zone is created when the default zone is still - * a scalable_zone. As purgeable zones only exist on >= 10.6, we need - * to check for the existence of malloc_default_purgeable_zone() at - * run time. - */ - if (malloc_default_purgeable_zone != NULL) - malloc_default_purgeable_zone(); - - /* Register the custom zone. At this point it won't be the default. */ - malloc_zone_register(&zone); - - /* - * Unregister and reregister the default zone. On OSX >= 10.6, - * unregistering takes the last registered zone and places it at the - * location of the specified zone. Unregistering the default zone thus - * makes the last registered one the default. On OSX < 10.6, - * unregistering shifts all registered zones. The first registered zone - * then becomes the default. - */ - do { - default_zone = malloc_default_zone(); - malloc_zone_unregister(default_zone); - malloc_zone_register(default_zone); - } while (malloc_default_zone() != &zone); -} diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-alti.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-alti.h deleted file mode 100644 index 0005df6..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-alti.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -/** - * @file SFMT-alti.h - * - * @brief SIMD oriented Fast Mersenne Twister(SFMT) - * pseudorandom number generator - * - * @author Mutsuo Saito (Hiroshima University) - * @author Makoto Matsumoto (Hiroshima University) - * - * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * The new BSD License is applied to this software. - * see LICENSE.txt - */ - -#ifndef SFMT_ALTI_H -#define SFMT_ALTI_H - -/** - * This function represents the recursion formula in AltiVec and BIG ENDIAN. - * @param a a 128-bit part of the interal state array - * @param b a 128-bit part of the interal state array - * @param c a 128-bit part of the interal state array - * @param d a 128-bit part of the interal state array - * @return output - */ -JEMALLOC_ALWAYS_INLINE -vector unsigned int vec_recursion(vector unsigned int a, - vector unsigned int b, - vector unsigned int c, - vector unsigned int d) { - - const vector unsigned int sl1 = ALTI_SL1; - const vector unsigned int sr1 = ALTI_SR1; -#ifdef ONLY64 - const vector unsigned int mask = ALTI_MSK64; - const vector unsigned char perm_sl = ALTI_SL2_PERM64; - const vector unsigned char perm_sr = ALTI_SR2_PERM64; -#else - const vector unsigned int mask = ALTI_MSK; - const vector unsigned char perm_sl = ALTI_SL2_PERM; - const vector unsigned char perm_sr = ALTI_SR2_PERM; -#endif - vector unsigned int v, w, x, y, z; - x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); - v = a; - y = vec_sr(b, sr1); - z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); - w = vec_sl(d, sl1); - z = vec_xor(z, w); - y = vec_and(y, mask); - v = vec_xor(v, x); - z = vec_xor(z, y); - z = vec_xor(z, v); - return z; -} - -/** - * This function fills the internal state array with pseudorandom - * integers. - */ -JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { - int i; - vector unsigned int r, r1, r2; - - r1 = ctx->sfmt[N - 2].s; - r2 = ctx->sfmt[N - 1].s; - for (i = 0; i < N - POS1; i++) { - r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); - ctx->sfmt[i].s = r; - r1 = r2; - r2 = r; - } - for (; i < N; i++) { - r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); - ctx->sfmt[i].s = r; - r1 = r2; - r2 = r; - } -} - -/** - * This function fills the user-specified array with pseudorandom - * integers. - * - * @param array an 128-bit array to be filled by pseudorandom numbers. - * @param size number of 128-bit pesudorandom numbers to be generated. - */ -JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { - int i, j; - vector unsigned int r, r1, r2; - - r1 = ctx->sfmt[N - 2].s; - r2 = ctx->sfmt[N - 1].s; - for (i = 0; i < N - POS1; i++) { - r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); - array[i].s = r; - r1 = r2; - r2 = r; - } - for (; i < N; i++) { - r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); - array[i].s = r; - r1 = r2; - r2 = r; - } - /* main loop */ - for (; i < size - N; i++) { - r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); - array[i].s = r; - r1 = r2; - r2 = r; - } - for (j = 0; j < 2 * N - size; j++) { - ctx->sfmt[j].s = array[j + size - N].s; - } - for (; i < size; i++) { - r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); - array[i].s = r; - ctx->sfmt[j++].s = r; - r1 = r2; - r2 = r; - } -} - -#ifndef ONLY64 -#if defined(__APPLE__) -#define ALTI_SWAP (vector unsigned char) \ - (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) -#else -#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} -#endif -/** - * This function swaps high and low 32-bit of 64-bit integers in user - * specified array. - * - * @param array an 128-bit array to be swaped. - * @param size size of 128-bit array. - */ -JEMALLOC_INLINE void swap(w128_t *array, int size) { - int i; - const vector unsigned char perm = ALTI_SWAP; - - for (i = 0; i < size; i++) { - array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); - } -} -#endif - -#endif diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params.h deleted file mode 100644 index ade6622..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params.h +++ /dev/null @@ -1,132 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS_H -#define SFMT_PARAMS_H - -#if !defined(MEXP) -#ifdef __GNUC__ - #warning "MEXP is not defined. I assume MEXP is 19937." -#endif - #define MEXP 19937 -#endif -/*----------------- - BASIC DEFINITIONS - -----------------*/ -/** Mersenne Exponent. The period of the sequence - * is a multiple of 2^MEXP-1. - * #define MEXP 19937 */ -/** SFMT generator has an internal state array of 128-bit integers, - * and N is its size. */ -#define N (MEXP / 128 + 1) -/** N32 is the size of internal state array when regarded as an array - * of 32-bit integers.*/ -#define N32 (N * 4) -/** N64 is the size of internal state array when regarded as an array - * of 64-bit integers.*/ -#define N64 (N * 2) - -/*---------------------- - the parameters of SFMT - following definitions are in paramsXXXX.h file. - ----------------------*/ -/** the pick up position of the array. -#define POS1 122 -*/ - -/** the parameter of shift left as four 32-bit registers. -#define SL1 18 - */ - -/** the parameter of shift left as one 128-bit register. - * The 128-bit integer is shifted by (SL2 * 8) bits. -#define SL2 1 -*/ - -/** the parameter of shift right as four 32-bit registers. -#define SR1 11 -*/ - -/** the parameter of shift right as one 128-bit register. - * The 128-bit integer is shifted by (SL2 * 8) bits. -#define SR2 1 -*/ - -/** A bitmask, used in the recursion. These parameters are introduced - * to break symmetry of SIMD. -#define MSK1 0xdfffffefU -#define MSK2 0xddfecb7fU -#define MSK3 0xbffaffffU -#define MSK4 0xbffffff6U -*/ - -/** These definitions are part of a 128-bit period certification vector. -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0xc98e126aU -*/ - -#if MEXP == 607 - #include "test/SFMT-params607.h" -#elif MEXP == 1279 - #include "test/SFMT-params1279.h" -#elif MEXP == 2281 - #include "test/SFMT-params2281.h" -#elif MEXP == 4253 - #include "test/SFMT-params4253.h" -#elif MEXP == 11213 - #include "test/SFMT-params11213.h" -#elif MEXP == 19937 - #include "test/SFMT-params19937.h" -#elif MEXP == 44497 - #include "test/SFMT-params44497.h" -#elif MEXP == 86243 - #include "test/SFMT-params86243.h" -#elif MEXP == 132049 - #include "test/SFMT-params132049.h" -#elif MEXP == 216091 - #include "test/SFMT-params216091.h" -#else -#ifdef __GNUC__ - #error "MEXP is not valid." - #undef MEXP -#else - #undef MEXP -#endif - -#endif - -#endif /* SFMT_PARAMS_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params11213.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params11213.h deleted file mode 100644 index 2994bd2..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params11213.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS11213_H -#define SFMT_PARAMS11213_H - -#define POS1 68 -#define SL1 14 -#define SL2 3 -#define SR1 7 -#define SR2 3 -#define MSK1 0xeffff7fbU -#define MSK2 0xffffffefU -#define MSK3 0xdfdfbfffU -#define MSK4 0x7fffdbfdU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0xe8148000U -#define PARITY4 0xd0c7afa3U - - -/* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) - #define ALTI_SR2_PERM \ - (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} - #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} - #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} - #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} -#endif /* For OSX */ -#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" - -#endif /* SFMT_PARAMS11213_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params1279.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params1279.h deleted file mode 100644 index d7959f9..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params1279.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS1279_H -#define SFMT_PARAMS1279_H - -#define POS1 7 -#define SL1 14 -#define SL2 3 -#define SR1 5 -#define SR2 1 -#define MSK1 0xf7fefffdU -#define MSK2 0x7fefcfffU -#define MSK3 0xaff3ef3fU -#define MSK4 0xb5ffff7fU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0x20000000U - - -/* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} - #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" - -#endif /* SFMT_PARAMS1279_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params132049.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params132049.h deleted file mode 100644 index a1dcec3..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params132049.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS132049_H -#define SFMT_PARAMS132049_H - -#define POS1 110 -#define SL1 19 -#define SL2 1 -#define SR1 21 -#define SR2 1 -#define MSK1 0xffffbb5fU -#define MSK2 0xfb6ebf95U -#define MSK3 0xfffefffaU -#define MSK4 0xcff77fffU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0xcb520000U -#define PARITY4 0xc7e91c7dU - - -/* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} - #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" - -#endif /* SFMT_PARAMS132049_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params19937.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params19937.h deleted file mode 100644 index fb92b4c..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params19937.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS19937_H -#define SFMT_PARAMS19937_H - -#define POS1 122 -#define SL1 18 -#define SL2 1 -#define SR1 11 -#define SR2 1 -#define MSK1 0xdfffffefU -#define MSK2 0xddfecb7fU -#define MSK3 0xbffaffffU -#define MSK4 0xbffffff6U -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0x13c9e684U - - -/* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} - #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" - -#endif /* SFMT_PARAMS19937_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params216091.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params216091.h deleted file mode 100644 index 125ce28..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params216091.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS216091_H -#define SFMT_PARAMS216091_H - -#define POS1 627 -#define SL1 11 -#define SL2 3 -#define SR1 10 -#define SR2 1 -#define MSK1 0xbff7bff7U -#define MSK2 0xbfffffffU -#define MSK3 0xbffffa7fU -#define MSK4 0xffddfbfbU -#define PARITY1 0xf8000001U -#define PARITY2 0x89e80709U -#define PARITY3 0x3bd2b64bU -#define PARITY4 0x0c64b1e4U - - -/* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} - #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" - -#endif /* SFMT_PARAMS216091_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params2281.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params2281.h deleted file mode 100644 index 0ef85c4..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params2281.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS2281_H -#define SFMT_PARAMS2281_H - -#define POS1 12 -#define SL1 19 -#define SL2 1 -#define SR1 5 -#define SR2 1 -#define MSK1 0xbff7ffbfU -#define MSK2 0xfdfffffeU -#define MSK3 0xf7ffef7fU -#define MSK4 0xf2f7cbbfU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0x41dfa600U - - -/* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} - #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" - -#endif /* SFMT_PARAMS2281_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params4253.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params4253.h deleted file mode 100644 index 9f07bc6..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params4253.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS4253_H -#define SFMT_PARAMS4253_H - -#define POS1 17 -#define SL1 20 -#define SL2 1 -#define SR1 7 -#define SR2 1 -#define MSK1 0x9f7bffffU -#define MSK2 0x9fffff5fU -#define MSK3 0x3efffffbU -#define MSK4 0xfffff7bbU -#define PARITY1 0xa8000001U -#define PARITY2 0xaf5390a3U -#define PARITY3 0xb740b3f8U -#define PARITY4 0x6c11486dU - - -/* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} - #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" - -#endif /* SFMT_PARAMS4253_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params44497.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params44497.h deleted file mode 100644 index 85598fe..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params44497.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS44497_H -#define SFMT_PARAMS44497_H - -#define POS1 330 -#define SL1 5 -#define SL2 3 -#define SR1 9 -#define SR2 3 -#define MSK1 0xeffffffbU -#define MSK2 0xdfbebfffU -#define MSK3 0xbfbf7befU -#define MSK4 0x9ffd7bffU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0xa3ac4000U -#define PARITY4 0xecc1327aU - - -/* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) - #define ALTI_SR2_PERM \ - (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} - #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} - #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} - #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} -#endif /* For OSX */ -#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" - -#endif /* SFMT_PARAMS44497_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params607.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params607.h deleted file mode 100644 index bc76485..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params607.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS607_H -#define SFMT_PARAMS607_H - -#define POS1 2 -#define SL1 15 -#define SL2 3 -#define SR1 13 -#define SR2 3 -#define MSK1 0xfdff37ffU -#define MSK2 0xef7f3f7dU -#define MSK3 0xff777b7dU -#define MSK4 0x7ff7fb2fU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0x5986f054U - - -/* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) - #define ALTI_SR2_PERM \ - (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} - #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} - #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} - #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} -#endif /* For OSX */ -#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" - -#endif /* SFMT_PARAMS607_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-params86243.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-params86243.h deleted file mode 100644 index 5e4d783..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-params86243.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef SFMT_PARAMS86243_H -#define SFMT_PARAMS86243_H - -#define POS1 366 -#define SL1 6 -#define SL2 7 -#define SR1 19 -#define SR2 1 -#define MSK1 0xfdbffbffU -#define MSK2 0xbff7ff3fU -#define MSK3 0xfd77efffU -#define MSK4 0xbf9ff3ffU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0xe9528d85U - - -/* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} - #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" - -#endif /* SFMT_PARAMS86243_H */ diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT-sse2.h b/lib/jemalloc-3.6.0/test/include/test/SFMT-sse2.h deleted file mode 100644 index 0314a16..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT-sse2.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -/** - * @file SFMT-sse2.h - * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 - * - * @author Mutsuo Saito (Hiroshima University) - * @author Makoto Matsumoto (Hiroshima University) - * - * @note We assume LITTLE ENDIAN in this file - * - * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * The new BSD License is applied to this software, see LICENSE.txt - */ - -#ifndef SFMT_SSE2_H -#define SFMT_SSE2_H - -/** - * This function represents the recursion formula. - * @param a a 128-bit part of the interal state array - * @param b a 128-bit part of the interal state array - * @param c a 128-bit part of the interal state array - * @param d a 128-bit part of the interal state array - * @param mask 128-bit mask - * @return output - */ -JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, - __m128i c, __m128i d, __m128i mask) { - __m128i v, x, y, z; - - x = _mm_load_si128(a); - y = _mm_srli_epi32(*b, SR1); - z = _mm_srli_si128(c, SR2); - v = _mm_slli_epi32(d, SL1); - z = _mm_xor_si128(z, x); - z = _mm_xor_si128(z, v); - x = _mm_slli_si128(x, SL2); - y = _mm_and_si128(y, mask); - z = _mm_xor_si128(z, x); - z = _mm_xor_si128(z, y); - return z; -} - -/** - * This function fills the internal state array with pseudorandom - * integers. - */ -JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { - int i; - __m128i r, r1, r2, mask; - mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); - - r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); - r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); - for (i = 0; i < N - POS1; i++) { - r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, - mask); - _mm_store_si128(&ctx->sfmt[i].si, r); - r1 = r2; - r2 = r; - } - for (; i < N; i++) { - r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, - mask); - _mm_store_si128(&ctx->sfmt[i].si, r); - r1 = r2; - r2 = r; - } -} - -/** - * This function fills the user-specified array with pseudorandom - * integers. - * - * @param array an 128-bit array to be filled by pseudorandom numbers. - * @param size number of 128-bit pesudorandom numbers to be generated. - */ -JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { - int i, j; - __m128i r, r1, r2, mask; - mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); - - r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); - r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); - for (i = 0; i < N - POS1; i++) { - r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, - mask); - _mm_store_si128(&array[i].si, r); - r1 = r2; - r2 = r; - } - for (; i < N; i++) { - r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, - mask); - _mm_store_si128(&array[i].si, r); - r1 = r2; - r2 = r; - } - /* main loop */ - for (; i < size - N; i++) { - r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, - mask); - _mm_store_si128(&array[i].si, r); - r1 = r2; - r2 = r; - } - for (j = 0; j < 2 * N - size; j++) { - r = _mm_load_si128(&array[j + size - N].si); - _mm_store_si128(&ctx->sfmt[j].si, r); - } - for (; i < size; i++) { - r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, - mask); - _mm_store_si128(&array[i].si, r); - _mm_store_si128(&ctx->sfmt[j++].si, r); - r1 = r2; - r2 = r; - } -} - -#endif diff --git a/lib/jemalloc-3.6.0/test/include/test/SFMT.h b/lib/jemalloc-3.6.0/test/include/test/SFMT.h deleted file mode 100644 index 09c1607..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/SFMT.h +++ /dev/null @@ -1,171 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -/** - * @file SFMT.h - * - * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom - * number generator - * - * @author Mutsuo Saito (Hiroshima University) - * @author Makoto Matsumoto (Hiroshima University) - * - * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * The new BSD License is applied to this software. - * see LICENSE.txt - * - * @note We assume that your system has inttypes.h. If your system - * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t, - * and you have to define PRIu64 and PRIx64 in this file as follows: - * @verbatim - typedef unsigned int uint32_t - typedef unsigned long long uint64_t - #define PRIu64 "llu" - #define PRIx64 "llx" -@endverbatim - * uint32_t must be exactly 32-bit unsigned integer type (no more, no - * less), and uint64_t must be exactly 64-bit unsigned integer type. - * PRIu64 and PRIx64 are used for printf function to print 64-bit - * unsigned int and 64-bit unsigned int in hexadecimal format. - */ - -#ifndef SFMT_H -#define SFMT_H - -typedef struct sfmt_s sfmt_t; - -uint32_t gen_rand32(sfmt_t *ctx); -uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); -uint64_t gen_rand64(sfmt_t *ctx); -uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); -void fill_array32(sfmt_t *ctx, uint32_t *array, int size); -void fill_array64(sfmt_t *ctx, uint64_t *array, int size); -sfmt_t *init_gen_rand(uint32_t seed); -sfmt_t *init_by_array(uint32_t *init_key, int key_length); -void fini_gen_rand(sfmt_t *ctx); -const char *get_idstring(void); -int get_min_array_size32(void); -int get_min_array_size64(void); - -#ifndef JEMALLOC_ENABLE_INLINE -double to_real1(uint32_t v); -double genrand_real1(sfmt_t *ctx); -double to_real2(uint32_t v); -double genrand_real2(sfmt_t *ctx); -double to_real3(uint32_t v); -double genrand_real3(sfmt_t *ctx); -double to_res53(uint64_t v); -double to_res53_mix(uint32_t x, uint32_t y); -double genrand_res53(sfmt_t *ctx); -double genrand_res53_mix(sfmt_t *ctx); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_)) -/* These real versions are due to Isaku Wada */ -/** generates a random number on [0,1]-real-interval */ -JEMALLOC_INLINE double to_real1(uint32_t v) -{ - return v * (1.0/4294967295.0); - /* divided by 2^32-1 */ -} - -/** generates a random number on [0,1]-real-interval */ -JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) -{ - return to_real1(gen_rand32(ctx)); -} - -/** generates a random number on [0,1)-real-interval */ -JEMALLOC_INLINE double to_real2(uint32_t v) -{ - return v * (1.0/4294967296.0); - /* divided by 2^32 */ -} - -/** generates a random number on [0,1)-real-interval */ -JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) -{ - return to_real2(gen_rand32(ctx)); -} - -/** generates a random number on (0,1)-real-interval */ -JEMALLOC_INLINE double to_real3(uint32_t v) -{ - return (((double)v) + 0.5)*(1.0/4294967296.0); - /* divided by 2^32 */ -} - -/** generates a random number on (0,1)-real-interval */ -JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) -{ - return to_real3(gen_rand32(ctx)); -} -/** These real versions are due to Isaku Wada */ - -/** generates a random number on [0,1) with 53-bit resolution*/ -JEMALLOC_INLINE double to_res53(uint64_t v) -{ - return v * (1.0/18446744073709551616.0L); -} - -/** generates a random number on [0,1) with 53-bit resolution from two - * 32 bit integers */ -JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) -{ - return to_res53(x | ((uint64_t)y << 32)); -} - -/** generates a random number on [0,1) with 53-bit resolution - */ -JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) -{ - return to_res53(gen_rand64(ctx)); -} - -/** generates a random number on [0,1) with 53-bit resolution - using 32bit integer. - */ -JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) -{ - uint32_t x, y; - - x = gen_rand32(ctx); - y = gen_rand32(ctx); - return to_res53_mix(x, y); -} -#endif -#endif diff --git a/lib/jemalloc-3.6.0/test/include/test/jemalloc_test.h.in b/lib/jemalloc-3.6.0/test/include/test/jemalloc_test.h.in deleted file mode 100644 index 730a55d..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/jemalloc_test.h.in +++ /dev/null @@ -1,141 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#ifdef _WIN32 -# include -#else -# include -#endif - -/******************************************************************************/ -/* - * Define always-enabled assertion macros, so that test assertions execute even - * if assertions are disabled in the library code. These definitions must - * exist prior to including "jemalloc/internal/util.h". - */ -#define assert(e) do { \ - if (!(e)) { \ - malloc_printf( \ - ": %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) - -#define not_reached() do { \ - malloc_printf( \ - ": %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ -} while (0) - -#define not_implemented() do { \ - malloc_printf(": %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ -} while (0) - -#define assert_not_implemented(e) do { \ - if (!(e)) \ - not_implemented(); \ -} while (0) - -#include "test/jemalloc_test_defs.h" - -#ifdef JEMALLOC_OSSPIN -# include -#endif - -#if defined(HAVE_ALTIVEC) && !defined(__APPLE__) -# include -#endif -#ifdef HAVE_SSE2 -# include -#endif - -/******************************************************************************/ -/* - * For unit tests, expose all public and private interfaces. - */ -#ifdef JEMALLOC_UNIT_TEST -# define JEMALLOC_JET -# define JEMALLOC_MANGLE -# include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* - * For integration tests, expose the public jemalloc interfaces, but only - * expose the minimum necessary internal utility code (to avoid re-implementing - * essentially identical code within the test infrastructure). - */ -#elif defined(JEMALLOC_INTEGRATION_TEST) -# define JEMALLOC_MANGLE -# include "jemalloc/jemalloc@install_suffix@.h" -# include "jemalloc/internal/jemalloc_internal_defs.h" -# include "jemalloc/internal/jemalloc_internal_macros.h" - -# define JEMALLOC_N(n) @private_namespace@##n -# include "jemalloc/internal/private_namespace.h" - -# define JEMALLOC_H_TYPES -# define JEMALLOC_H_STRUCTS -# define JEMALLOC_H_EXTERNS -# define JEMALLOC_H_INLINES -# include "jemalloc/internal/util.h" -# include "jemalloc/internal/qr.h" -# include "jemalloc/internal/ql.h" -# undef JEMALLOC_H_TYPES -# undef JEMALLOC_H_STRUCTS -# undef JEMALLOC_H_EXTERNS -# undef JEMALLOC_H_INLINES - -/******************************************************************************/ -/* - * For stress tests, expose the public jemalloc interfaces with name mangling - * so that they can be tested as e.g. malloc() and free(). Also expose the - * public jemalloc interfaces with jet_ prefixes, so that stress tests can use - * a separate allocator for their internal data structures. - */ -#elif defined(JEMALLOC_STRESS_TEST) -# include "jemalloc/jemalloc@install_suffix@.h" - -# include "jemalloc/jemalloc_protos_jet.h" - -# define JEMALLOC_JET -# include "jemalloc/internal/jemalloc_internal.h" -# include "jemalloc/internal/public_unnamespace.h" -# undef JEMALLOC_JET - -# include "jemalloc/jemalloc_rename.h" -# define JEMALLOC_MANGLE -# ifdef JEMALLOC_STRESS_TESTLIB -# include "jemalloc/jemalloc_mangle_jet.h" -# else -# include "jemalloc/jemalloc_mangle.h" -# endif - -/******************************************************************************/ -/* - * This header does dangerous things, the effects of which only test code - * should be subject to. - */ -#else -# error "This header cannot be included outside a testing context" -#endif - -/******************************************************************************/ -/* - * Common test utilities. - */ -#include "test/math.h" -#include "test/mtx.h" -#include "test/mq.h" -#include "test/test.h" -#include "test/thd.h" -#define MEXP 19937 -#include "test/SFMT.h" diff --git a/lib/jemalloc-3.6.0/test/include/test/jemalloc_test_defs.h.in b/lib/jemalloc-3.6.0/test/include/test/jemalloc_test_defs.h.in deleted file mode 100644 index 18a9773..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/jemalloc_test_defs.h.in +++ /dev/null @@ -1,5 +0,0 @@ -#include "jemalloc/internal/jemalloc_internal_defs.h" - -/* For use by SFMT. */ -#undef HAVE_SSE2 -#undef HAVE_ALTIVEC diff --git a/lib/jemalloc-3.6.0/test/include/test/math.h b/lib/jemalloc-3.6.0/test/include/test/math.h deleted file mode 100644 index a862ed7..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/math.h +++ /dev/null @@ -1,311 +0,0 @@ -#ifndef JEMALLOC_ENABLE_INLINE -double ln_gamma(double x); -double i_gamma(double x, double p, double ln_gamma_p); -double pt_norm(double p); -double pt_chi2(double p, double df, double ln_gamma_df_2); -double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_)) -/* - * Compute the natural log of Gamma(x), accurate to 10 decimal places. - * - * This implementation is based on: - * - * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function - * [S14]. Communications of the ACM 9(9):684. - */ -JEMALLOC_INLINE double -ln_gamma(double x) -{ - double f, z; - - assert(x > 0.0); - - if (x < 7.0) { - f = 1.0; - z = x; - while (z < 7.0) { - f *= z; - z += 1.0; - } - x = z; - f = -log(f); - } else - f = 0.0; - - z = 1.0 / (x * x); - - return (f + (x-0.5) * log(x) - x + 0.918938533204673 + - (((-0.000595238095238 * z + 0.000793650793651) * z - - 0.002777777777778) * z + 0.083333333333333) / x); -} - -/* - * Compute the incomplete Gamma ratio for [0..x], where p is the shape - * parameter, and ln_gamma_p is ln_gamma(p). - * - * This implementation is based on: - * - * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. - * Applied Statistics 19:285-287. - */ -JEMALLOC_INLINE double -i_gamma(double x, double p, double ln_gamma_p) -{ - double acu, factor, oflo, gin, term, rn, a, b, an, dif; - double pn[6]; - unsigned i; - - assert(p > 0.0); - assert(x >= 0.0); - - if (x == 0.0) - return (0.0); - - acu = 1.0e-10; - oflo = 1.0e30; - gin = 0.0; - factor = exp(p * log(x) - x - ln_gamma_p); - - if (x <= 1.0 || x < p) { - /* Calculation by series expansion. */ - gin = 1.0; - term = 1.0; - rn = p; - - while (true) { - rn += 1.0; - term *= x / rn; - gin += term; - if (term <= acu) { - gin *= factor / p; - return (gin); - } - } - } else { - /* Calculation by continued fraction. */ - a = 1.0 - p; - b = a + x + 1.0; - term = 0.0; - pn[0] = 1.0; - pn[1] = x; - pn[2] = x + 1.0; - pn[3] = x * b; - gin = pn[2] / pn[3]; - - while (true) { - a += 1.0; - b += 2.0; - term += 1.0; - an = a * term; - for (i = 0; i < 2; i++) - pn[i+4] = b * pn[i+2] - an * pn[i]; - if (pn[5] != 0.0) { - rn = pn[4] / pn[5]; - dif = fabs(gin - rn); - if (dif <= acu && dif <= acu * rn) { - gin = 1.0 - factor * gin; - return (gin); - } - gin = rn; - } - for (i = 0; i < 4; i++) - pn[i] = pn[i+2]; - - if (fabs(pn[4]) >= oflo) { - for (i = 0; i < 4; i++) - pn[i] /= oflo; - } - } - } -} - -/* - * Given a value p in [0..1] of the lower tail area of the normal distribution, - * compute the limit on the definite integral from [-inf..z] that satisfies p, - * accurate to 16 decimal places. - * - * This implementation is based on: - * - * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal - * distribution. Applied Statistics 37(3):477-484. - */ -JEMALLOC_INLINE double -pt_norm(double p) -{ - double q, r, ret; - - assert(p > 0.0 && p < 1.0); - - q = p - 0.5; - if (fabs(q) <= 0.425) { - /* p close to 1/2. */ - r = 0.180625 - q * q; - return (q * (((((((2.5090809287301226727e3 * r + - 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r - + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * - r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) - * r + 3.3871328727963666080e0) / - (((((((5.2264952788528545610e3 * r + - 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r - + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * - r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) - * r + 1.0)); - } else { - if (q < 0.0) - r = p; - else - r = 1.0 - p; - assert(r > 0.0); - - r = sqrt(-log(r)); - if (r <= 5.0) { - /* p neither close to 1/2 nor 0 or 1. */ - r -= 1.6; - ret = ((((((((7.74545014278341407640e-4 * r + - 2.27238449892691845833e-2) * r + - 2.41780725177450611770e-1) * r + - 1.27045825245236838258e0) * r + - 3.64784832476320460504e0) * r + - 5.76949722146069140550e0) * r + - 4.63033784615654529590e0) * r + - 1.42343711074968357734e0) / - (((((((1.05075007164441684324e-9 * r + - 5.47593808499534494600e-4) * r + - 1.51986665636164571966e-2) - * r + 1.48103976427480074590e-1) * r + - 6.89767334985100004550e-1) * r + - 1.67638483018380384940e0) * r + - 2.05319162663775882187e0) * r + 1.0)); - } else { - /* p near 0 or 1. */ - r -= 5.0; - ret = ((((((((2.01033439929228813265e-7 * r + - 2.71155556874348757815e-5) * r + - 1.24266094738807843860e-3) * r + - 2.65321895265761230930e-2) * r + - 2.96560571828504891230e-1) * r + - 1.78482653991729133580e0) * r + - 5.46378491116411436990e0) * r + - 6.65790464350110377720e0) / - (((((((2.04426310338993978564e-15 * r + - 1.42151175831644588870e-7) * r + - 1.84631831751005468180e-5) * r + - 7.86869131145613259100e-4) * r + - 1.48753612908506148525e-2) * r + - 1.36929880922735805310e-1) * r + - 5.99832206555887937690e-1) - * r + 1.0)); - } - if (q < 0.0) - ret = -ret; - return (ret); - } -} - -/* - * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution - * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute - * the upper limit on the definite integral from [0..z] that satisfies p, - * accurate to 12 decimal places. - * - * This implementation is based on: - * - * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of - * the Chi^2 distribution. Applied Statistics 24(3):385-388. - * - * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage - * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. - */ -JEMALLOC_INLINE double -pt_chi2(double p, double df, double ln_gamma_df_2) -{ - double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; - unsigned i; - - assert(p >= 0.0 && p < 1.0); - assert(df > 0.0); - - e = 5.0e-7; - aa = 0.6931471805; - - xx = 0.5 * df; - c = xx - 1.0; - - if (df < -1.24 * log(p)) { - /* Starting approximation for small Chi^2. */ - ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); - if (ch - e < 0.0) - return (ch); - } else { - if (df > 0.32) { - x = pt_norm(p); - /* - * Starting approximation using Wilson and Hilferty - * estimate. - */ - p1 = 0.222222 / df; - ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); - /* Starting approximation for p tending to 1. */ - if (ch > 2.2 * df + 6.0) { - ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + - ln_gamma_df_2); - } - } else { - ch = 0.4; - a = log(1.0 - p); - while (true) { - q = ch; - p1 = 1.0 + ch * (4.67 + ch); - p2 = ch * (6.73 + ch * (6.66 + ch)); - t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch - * (13.32 + 3.0 * ch)) / p2; - ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + - c * aa) * p2 / p1) / t; - if (fabs(q / ch - 1.0) - 0.01 <= 0.0) - break; - } - } - } - - for (i = 0; i < 20; i++) { - /* Calculation of seven-term Taylor series. */ - q = ch; - p1 = 0.5 * ch; - if (p1 < 0.0) - return (-1.0); - p2 = p - i_gamma(p1, xx, ln_gamma_df_2); - t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); - b = t / ch; - a = 0.5 * t - b * c; - s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + - 60.0 * a))))) / 420.0; - s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * - a)))) / 2520.0; - s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; - s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * - (889.0 + 1740.0 * a))) / 5040.0; - s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; - s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; - ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 - - b * (s4 - b * (s5 - b * s6)))))); - if (fabs(q / ch - 1.0) <= e) - break; - } - - return (ch); -} - -/* - * Given a value p in [0..1] and Gamma distribution shape and scale parameters, - * compute the upper limit on the definite integeral from [0..z] that satisfies - * p. - */ -JEMALLOC_INLINE double -pt_gamma(double p, double shape, double scale, double ln_gamma_shape) -{ - - return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale); -} -#endif diff --git a/lib/jemalloc-3.6.0/test/include/test/mq.h b/lib/jemalloc-3.6.0/test/include/test/mq.h deleted file mode 100644 index 1118865..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/mq.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Simple templated message queue implementation that relies on only mutexes for - * synchronization (which reduces portability issues). Given the following - * setup: - * - * typedef struct mq_msg_s mq_msg_t; - * struct mq_msg_s { - * mq_msg(mq_msg_t) link; - * [message data] - * }; - * mq_gen(, mq_, mq_t, mq_msg_t, link) - * - * The API is as follows: - * - * bool mq_init(mq_t *mq); - * void mq_fini(mq_t *mq); - * unsigned mq_count(mq_t *mq); - * mq_msg_t *mq_tryget(mq_t *mq); - * mq_msg_t *mq_get(mq_t *mq); - * void mq_put(mq_t *mq, mq_msg_t *msg); - * - * The message queue linkage embedded in each message is to be treated as - * externally opaque (no need to initialize or clean up externally). mq_fini() - * does not perform any cleanup of messages, since it knows nothing of their - * payloads. - */ -#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) - -#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ -typedef struct { \ - mtx_t lock; \ - ql_head(a_mq_msg_type) msgs; \ - unsigned count; \ -} a_mq_type; \ -a_attr bool \ -a_prefix##init(a_mq_type *mq) { \ - \ - if (mtx_init(&mq->lock)) \ - return (true); \ - ql_new(&mq->msgs); \ - mq->count = 0; \ - return (false); \ -} \ -a_attr void \ -a_prefix##fini(a_mq_type *mq) \ -{ \ - \ - mtx_fini(&mq->lock); \ -} \ -a_attr unsigned \ -a_prefix##count(a_mq_type *mq) \ -{ \ - unsigned count; \ - \ - mtx_lock(&mq->lock); \ - count = mq->count; \ - mtx_unlock(&mq->lock); \ - return (count); \ -} \ -a_attr a_mq_msg_type * \ -a_prefix##tryget(a_mq_type *mq) \ -{ \ - a_mq_msg_type *msg; \ - \ - mtx_lock(&mq->lock); \ - msg = ql_first(&mq->msgs); \ - if (msg != NULL) { \ - ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ - mq->count--; \ - } \ - mtx_unlock(&mq->lock); \ - return (msg); \ -} \ -a_attr a_mq_msg_type * \ -a_prefix##get(a_mq_type *mq) \ -{ \ - a_mq_msg_type *msg; \ - struct timespec timeout; \ - \ - msg = a_prefix##tryget(mq); \ - if (msg != NULL) \ - return (msg); \ - \ - timeout.tv_sec = 0; \ - timeout.tv_nsec = 1; \ - while (true) { \ - nanosleep(&timeout, NULL); \ - msg = a_prefix##tryget(mq); \ - if (msg != NULL) \ - return (msg); \ - if (timeout.tv_sec == 0) { \ - /* Double sleep time, up to max 1 second. */ \ - timeout.tv_nsec <<= 1; \ - if (timeout.tv_nsec >= 1000*1000*1000) { \ - timeout.tv_sec = 1; \ - timeout.tv_nsec = 0; \ - } \ - } \ - } \ -} \ -a_attr void \ -a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \ -{ \ - \ - mtx_lock(&mq->lock); \ - ql_elm_new(msg, a_field); \ - ql_tail_insert(&mq->msgs, msg, a_field); \ - mq->count++; \ - mtx_unlock(&mq->lock); \ -} diff --git a/lib/jemalloc-3.6.0/test/include/test/mtx.h b/lib/jemalloc-3.6.0/test/include/test/mtx.h deleted file mode 100644 index bbe822f..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/mtx.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * mtx is a slightly simplified version of malloc_mutex. This code duplication - * is unfortunate, but there are allocator bootstrapping considerations that - * would leak into the test infrastructure if malloc_mutex were used directly - * in tests. - */ - -typedef struct { -#ifdef _WIN32 - CRITICAL_SECTION lock; -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLock lock; -#else - pthread_mutex_t lock; -#endif -} mtx_t; - -bool mtx_init(mtx_t *mtx); -void mtx_fini(mtx_t *mtx); -void mtx_lock(mtx_t *mtx); -void mtx_unlock(mtx_t *mtx); diff --git a/lib/jemalloc-3.6.0/test/include/test/test.h b/lib/jemalloc-3.6.0/test/include/test/test.h deleted file mode 100644 index a32ec07..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/test.h +++ /dev/null @@ -1,329 +0,0 @@ -#define ASSERT_BUFSIZE 256 - -#define assert_cmp(t, a, b, cmp, neg_cmp, pri, fmt...) do { \ - t a_ = (a); \ - t b_ = (b); \ - if (!(a_ cmp b_)) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) "#cmp" (%s) --> " \ - "%"pri" "#neg_cmp" %"pri": ", \ - __func__, __FILE__, __LINE__, \ - #a, #b, a_, b_); \ - malloc_snprintf(message, sizeof(message), fmt); \ - p_test_fail(prefix, message); \ - } \ -} while (0) - -#define assert_ptr_eq(a, b, fmt...) assert_cmp(void *, a, b, ==, \ - !=, "p", fmt) -#define assert_ptr_ne(a, b, fmt...) assert_cmp(void *, a, b, !=, \ - ==, "p", fmt) -#define assert_ptr_null(a, fmt...) assert_cmp(void *, a, NULL, ==, \ - !=, "p", fmt) -#define assert_ptr_not_null(a, fmt...) assert_cmp(void *, a, NULL, !=, \ - ==, "p", fmt) - -#define assert_c_eq(a, b, fmt...) assert_cmp(char, a, b, ==, !=, "c", fmt) -#define assert_c_ne(a, b, fmt...) assert_cmp(char, a, b, !=, ==, "c", fmt) -#define assert_c_lt(a, b, fmt...) assert_cmp(char, a, b, <, >=, "c", fmt) -#define assert_c_le(a, b, fmt...) assert_cmp(char, a, b, <=, >, "c", fmt) -#define assert_c_ge(a, b, fmt...) assert_cmp(char, a, b, >=, <, "c", fmt) -#define assert_c_gt(a, b, fmt...) assert_cmp(char, a, b, >, <=, "c", fmt) - -#define assert_x_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "#x", fmt) -#define assert_x_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "#x", fmt) -#define assert_x_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "#x", fmt) -#define assert_x_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "#x", fmt) -#define assert_x_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "#x", fmt) -#define assert_x_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "#x", fmt) - -#define assert_d_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "d", fmt) -#define assert_d_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "d", fmt) -#define assert_d_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "d", fmt) -#define assert_d_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "d", fmt) -#define assert_d_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "d", fmt) -#define assert_d_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "d", fmt) - -#define assert_u_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "u", fmt) -#define assert_u_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "u", fmt) -#define assert_u_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "u", fmt) -#define assert_u_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "u", fmt) -#define assert_u_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "u", fmt) -#define assert_u_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "u", fmt) - -#define assert_ld_eq(a, b, fmt...) assert_cmp(long, a, b, ==, \ - !=, "ld", fmt) -#define assert_ld_ne(a, b, fmt...) assert_cmp(long, a, b, !=, \ - ==, "ld", fmt) -#define assert_ld_lt(a, b, fmt...) assert_cmp(long, a, b, <, \ - >=, "ld", fmt) -#define assert_ld_le(a, b, fmt...) assert_cmp(long, a, b, <=, \ - >, "ld", fmt) -#define assert_ld_ge(a, b, fmt...) assert_cmp(long, a, b, >=, \ - <, "ld", fmt) -#define assert_ld_gt(a, b, fmt...) assert_cmp(long, a, b, >, \ - <=, "ld", fmt) - -#define assert_lu_eq(a, b, fmt...) assert_cmp(unsigned long, \ - a, b, ==, !=, "lu", fmt) -#define assert_lu_ne(a, b, fmt...) assert_cmp(unsigned long, \ - a, b, !=, ==, "lu", fmt) -#define assert_lu_lt(a, b, fmt...) assert_cmp(unsigned long, \ - a, b, <, >=, "lu", fmt) -#define assert_lu_le(a, b, fmt...) assert_cmp(unsigned long, \ - a, b, <=, >, "lu", fmt) -#define assert_lu_ge(a, b, fmt...) assert_cmp(unsigned long, \ - a, b, >=, <, "lu", fmt) -#define assert_lu_gt(a, b, fmt...) assert_cmp(unsigned long, \ - a, b, >, <=, "lu", fmt) - -#define assert_qd_eq(a, b, fmt...) assert_cmp(long long, a, b, ==, \ - !=, "qd", fmt) -#define assert_qd_ne(a, b, fmt...) assert_cmp(long long, a, b, !=, \ - ==, "qd", fmt) -#define assert_qd_lt(a, b, fmt...) assert_cmp(long long, a, b, <, \ - >=, "qd", fmt) -#define assert_qd_le(a, b, fmt...) assert_cmp(long long, a, b, <=, \ - >, "qd", fmt) -#define assert_qd_ge(a, b, fmt...) assert_cmp(long long, a, b, >=, \ - <, "qd", fmt) -#define assert_qd_gt(a, b, fmt...) assert_cmp(long long, a, b, >, \ - <=, "qd", fmt) - -#define assert_qu_eq(a, b, fmt...) assert_cmp(unsigned long long, \ - a, b, ==, !=, "qu", fmt) -#define assert_qu_ne(a, b, fmt...) assert_cmp(unsigned long long, \ - a, b, !=, ==, "qu", fmt) -#define assert_qu_lt(a, b, fmt...) assert_cmp(unsigned long long, \ - a, b, <, >=, "qu", fmt) -#define assert_qu_le(a, b, fmt...) assert_cmp(unsigned long long, \ - a, b, <=, >, "qu", fmt) -#define assert_qu_ge(a, b, fmt...) assert_cmp(unsigned long long, \ - a, b, >=, <, "qu", fmt) -#define assert_qu_gt(a, b, fmt...) assert_cmp(unsigned long long, \ - a, b, >, <=, "qu", fmt) - -#define assert_jd_eq(a, b, fmt...) assert_cmp(intmax_t, a, b, ==, \ - !=, "jd", fmt) -#define assert_jd_ne(a, b, fmt...) assert_cmp(intmax_t, a, b, !=, \ - ==, "jd", fmt) -#define assert_jd_lt(a, b, fmt...) assert_cmp(intmax_t, a, b, <, \ - >=, "jd", fmt) -#define assert_jd_le(a, b, fmt...) assert_cmp(intmax_t, a, b, <=, \ - >, "jd", fmt) -#define assert_jd_ge(a, b, fmt...) assert_cmp(intmax_t, a, b, >=, \ - <, "jd", fmt) -#define assert_jd_gt(a, b, fmt...) assert_cmp(intmax_t, a, b, >, \ - <=, "jd", fmt) - -#define assert_ju_eq(a, b, fmt...) assert_cmp(uintmax_t, a, b, ==, \ - !=, "ju", fmt) -#define assert_ju_ne(a, b, fmt...) assert_cmp(uintmax_t, a, b, !=, \ - ==, "ju", fmt) -#define assert_ju_lt(a, b, fmt...) assert_cmp(uintmax_t, a, b, <, \ - >=, "ju", fmt) -#define assert_ju_le(a, b, fmt...) assert_cmp(uintmax_t, a, b, <=, \ - >, "ju", fmt) -#define assert_ju_ge(a, b, fmt...) assert_cmp(uintmax_t, a, b, >=, \ - <, "ju", fmt) -#define assert_ju_gt(a, b, fmt...) assert_cmp(uintmax_t, a, b, >, \ - <=, "ju", fmt) - -#define assert_zd_eq(a, b, fmt...) assert_cmp(ssize_t, a, b, ==, \ - !=, "zd", fmt) -#define assert_zd_ne(a, b, fmt...) assert_cmp(ssize_t, a, b, !=, \ - ==, "zd", fmt) -#define assert_zd_lt(a, b, fmt...) assert_cmp(ssize_t, a, b, <, \ - >=, "zd", fmt) -#define assert_zd_le(a, b, fmt...) assert_cmp(ssize_t, a, b, <=, \ - >, "zd", fmt) -#define assert_zd_ge(a, b, fmt...) assert_cmp(ssize_t, a, b, >=, \ - <, "zd", fmt) -#define assert_zd_gt(a, b, fmt...) assert_cmp(ssize_t, a, b, >, \ - <=, "zd", fmt) - -#define assert_zu_eq(a, b, fmt...) assert_cmp(size_t, a, b, ==, \ - !=, "zu", fmt) -#define assert_zu_ne(a, b, fmt...) assert_cmp(size_t, a, b, !=, \ - ==, "zu", fmt) -#define assert_zu_lt(a, b, fmt...) assert_cmp(size_t, a, b, <, \ - >=, "zu", fmt) -#define assert_zu_le(a, b, fmt...) assert_cmp(size_t, a, b, <=, \ - >, "zu", fmt) -#define assert_zu_ge(a, b, fmt...) assert_cmp(size_t, a, b, >=, \ - <, "zu", fmt) -#define assert_zu_gt(a, b, fmt...) assert_cmp(size_t, a, b, >, \ - <=, "zu", fmt) - -#define assert_d32_eq(a, b, fmt...) assert_cmp(int32_t, a, b, ==, \ - !=, PRId32, fmt) -#define assert_d32_ne(a, b, fmt...) assert_cmp(int32_t, a, b, !=, \ - ==, PRId32, fmt) -#define assert_d32_lt(a, b, fmt...) assert_cmp(int32_t, a, b, <, \ - >=, PRId32, fmt) -#define assert_d32_le(a, b, fmt...) assert_cmp(int32_t, a, b, <=, \ - >, PRId32, fmt) -#define assert_d32_ge(a, b, fmt...) assert_cmp(int32_t, a, b, >=, \ - <, PRId32, fmt) -#define assert_d32_gt(a, b, fmt...) assert_cmp(int32_t, a, b, >, \ - <=, PRId32, fmt) - -#define assert_u32_eq(a, b, fmt...) assert_cmp(uint32_t, a, b, ==, \ - !=, PRIu32, fmt) -#define assert_u32_ne(a, b, fmt...) assert_cmp(uint32_t, a, b, !=, \ - ==, PRIu32, fmt) -#define assert_u32_lt(a, b, fmt...) assert_cmp(uint32_t, a, b, <, \ - >=, PRIu32, fmt) -#define assert_u32_le(a, b, fmt...) assert_cmp(uint32_t, a, b, <=, \ - >, PRIu32, fmt) -#define assert_u32_ge(a, b, fmt...) assert_cmp(uint32_t, a, b, >=, \ - <, PRIu32, fmt) -#define assert_u32_gt(a, b, fmt...) assert_cmp(uint32_t, a, b, >, \ - <=, PRIu32, fmt) - -#define assert_d64_eq(a, b, fmt...) assert_cmp(int64_t, a, b, ==, \ - !=, PRId64, fmt) -#define assert_d64_ne(a, b, fmt...) assert_cmp(int64_t, a, b, !=, \ - ==, PRId64, fmt) -#define assert_d64_lt(a, b, fmt...) assert_cmp(int64_t, a, b, <, \ - >=, PRId64, fmt) -#define assert_d64_le(a, b, fmt...) assert_cmp(int64_t, a, b, <=, \ - >, PRId64, fmt) -#define assert_d64_ge(a, b, fmt...) assert_cmp(int64_t, a, b, >=, \ - <, PRId64, fmt) -#define assert_d64_gt(a, b, fmt...) assert_cmp(int64_t, a, b, >, \ - <=, PRId64, fmt) - -#define assert_u64_eq(a, b, fmt...) assert_cmp(uint64_t, a, b, ==, \ - !=, PRIu64, fmt) -#define assert_u64_ne(a, b, fmt...) assert_cmp(uint64_t, a, b, !=, \ - ==, PRIu64, fmt) -#define assert_u64_lt(a, b, fmt...) assert_cmp(uint64_t, a, b, <, \ - >=, PRIu64, fmt) -#define assert_u64_le(a, b, fmt...) assert_cmp(uint64_t, a, b, <=, \ - >, PRIu64, fmt) -#define assert_u64_ge(a, b, fmt...) assert_cmp(uint64_t, a, b, >=, \ - <, PRIu64, fmt) -#define assert_u64_gt(a, b, fmt...) assert_cmp(uint64_t, a, b, >, \ - <=, PRIu64, fmt) - -#define assert_b_eq(a, b, fmt...) do { \ - bool a_ = (a); \ - bool b_ = (b); \ - if (!(a_ == b_)) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) == (%s) --> %s != %s: ", \ - __func__, __FILE__, __LINE__, \ - #a, #b, a_ ? "true" : "false", \ - b_ ? "true" : "false"); \ - malloc_snprintf(message, sizeof(message), fmt); \ - p_test_fail(prefix, message); \ - } \ -} while (0) -#define assert_b_ne(a, b, fmt...) do { \ - bool a_ = (a); \ - bool b_ = (b); \ - if (!(a_ != b_)) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) != (%s) --> %s == %s: ", \ - __func__, __FILE__, __LINE__, \ - #a, #b, a_ ? "true" : "false", \ - b_ ? "true" : "false"); \ - malloc_snprintf(message, sizeof(message), fmt); \ - p_test_fail(prefix, message); \ - } \ -} while (0) -#define assert_true(a, fmt...) assert_b_eq(a, true, fmt) -#define assert_false(a, fmt...) assert_b_eq(a, false, fmt) - -#define assert_str_eq(a, b, fmt...) do { \ - if (strcmp((a), (b))) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) same as (%s) --> " \ - "\"%s\" differs from \"%s\": ", \ - __func__, __FILE__, __LINE__, #a, #b, a, b); \ - malloc_snprintf(message, sizeof(message), fmt); \ - p_test_fail(prefix, message); \ - } \ -} while (0) -#define assert_str_ne(a, b, fmt...) do { \ - if (!strcmp((a), (b))) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) differs from (%s) --> " \ - "\"%s\" same as \"%s\": ", \ - __func__, __FILE__, __LINE__, #a, #b, a, b); \ - malloc_snprintf(message, sizeof(message), fmt); \ - p_test_fail(prefix, message); \ - } \ -} while (0) - -#define assert_not_reached(fmt...) do { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Unreachable code reached: ", \ - __func__, __FILE__, __LINE__); \ - malloc_snprintf(message, sizeof(message), fmt); \ - p_test_fail(prefix, message); \ -} while (0) - -/* - * If this enum changes, corresponding changes in test/test.sh.in are also - * necessary. - */ -typedef enum { - test_status_pass = 0, - test_status_skip = 1, - test_status_fail = 2, - - test_status_count = 3 -} test_status_t; - -typedef void (test_t)(void); - -#define TEST_BEGIN(f) \ -static void \ -f(void) \ -{ \ - p_test_init(#f); - -#define TEST_END \ - goto label_test_end; \ -label_test_end: \ - p_test_fini(); \ -} - -#define test(tests...) \ - p_test(tests, NULL) - -#define test_skip_if(e) do { \ - if (e) { \ - test_skip("%s:%s:%d: Test skipped: (%s)", \ - __func__, __FILE__, __LINE__, #e); \ - goto label_test_end; \ - } \ -} while (0) - -void test_skip(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2)); -void test_fail(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2)); - -/* For private use by macros. */ -test_status_t p_test(test_t* t, ...); -void p_test_init(const char *name); -void p_test_fini(void); -void p_test_fail(const char *prefix, const char *message); diff --git a/lib/jemalloc-3.6.0/test/include/test/thd.h b/lib/jemalloc-3.6.0/test/include/test/thd.h deleted file mode 100644 index f941d7a..0000000 --- a/lib/jemalloc-3.6.0/test/include/test/thd.h +++ /dev/null @@ -1,9 +0,0 @@ -/* Abstraction layer for threading in tests */ -#ifdef _WIN32 -typedef HANDLE thd_t; -#else -typedef pthread_t thd_t; -#endif - -void thd_create(thd_t *thd, void *(*proc)(void *), void *arg); -void thd_join(thd_t thd, void **ret); diff --git a/lib/jemalloc-3.6.0/test/integration/MALLOCX_ARENA.c b/lib/jemalloc-3.6.0/test/integration/MALLOCX_ARENA.c deleted file mode 100644 index 71cf6f2..0000000 --- a/lib/jemalloc-3.6.0/test/integration/MALLOCX_ARENA.c +++ /dev/null @@ -1,58 +0,0 @@ -#include "test/jemalloc_test.h" - -#define NTHREADS 10 - -void * -thd_start(void *arg) -{ - unsigned thread_ind = (unsigned)(uintptr_t)arg; - unsigned arena_ind; - void *p; - size_t sz; - - sz = sizeof(arena_ind); - assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, - "Error in arenas.extend"); - - if (thread_ind % 4 != 3) { - size_t mib[3]; - size_t miblen = sizeof(mib) / sizeof(size_t); - const char *dss_precs[] = {"disabled", "primary", "secondary"}; - const char *dss = dss_precs[thread_ind % - (sizeof(dss_precs)/sizeof(char*))]; - assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, - "Error in mallctlnametomib()"); - mib[1] = arena_ind; - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, - sizeof(const char *)), 0, "Error in mallctlbymib()"); - } - - p = mallocx(1, MALLOCX_ARENA(arena_ind)); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - dallocx(p, 0); - - return (NULL); -} - -TEST_BEGIN(test_ALLOCM_ARENA) -{ - thd_t thds[NTHREADS]; - unsigned i; - - for (i = 0; i < NTHREADS; i++) { - thd_create(&thds[i], thd_start, - (void *)(uintptr_t)i); - } - - for (i = 0; i < NTHREADS; i++) - thd_join(thds[i], NULL); -} -TEST_END - -int -main(void) -{ - - return (test( - test_ALLOCM_ARENA)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/aligned_alloc.c b/lib/jemalloc-3.6.0/test/integration/aligned_alloc.c deleted file mode 100644 index 6090014..0000000 --- a/lib/jemalloc-3.6.0/test/integration/aligned_alloc.c +++ /dev/null @@ -1,125 +0,0 @@ -#include "test/jemalloc_test.h" - -#define CHUNK 0x400000 -/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 - -TEST_BEGIN(test_alignment_errors) -{ - size_t alignment; - void *p; - - alignment = 0; - set_errno(0); - p = aligned_alloc(alignment, 1); - assert_false(p != NULL || get_errno() != EINVAL, - "Expected error for invalid alignment %zu", alignment); - - for (alignment = sizeof(size_t); alignment < MAXALIGN; - alignment <<= 1) { - set_errno(0); - p = aligned_alloc(alignment + 1, 1); - assert_false(p != NULL || get_errno() != EINVAL, - "Expected error for invalid alignment %zu", - alignment + 1); - } -} -TEST_END - -TEST_BEGIN(test_oom_errors) -{ - size_t alignment, size; - void *p; - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x8000000000000000); - size = UINT64_C(0x8000000000000000); -#else - alignment = 0x80000000LU; - size = 0x80000000LU; -#endif - set_errno(0); - p = aligned_alloc(alignment, size); - assert_false(p != NULL || get_errno() != ENOMEM, - "Expected error for aligned_alloc(%zu, %zu)", - alignment, size); - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x4000000000000000); - size = UINT64_C(0xc000000000000001); -#else - alignment = 0x40000000LU; - size = 0xc0000001LU; -#endif - set_errno(0); - p = aligned_alloc(alignment, size); - assert_false(p != NULL || get_errno() != ENOMEM, - "Expected error for aligned_alloc(%zu, %zu)", - alignment, size); - - alignment = 0x10LU; -#if LG_SIZEOF_PTR == 3 - size = UINT64_C(0xfffffffffffffff0); -#else - size = 0xfffffff0LU; -#endif - set_errno(0); - p = aligned_alloc(alignment, size); - assert_false(p != NULL || get_errno() != ENOMEM, - "Expected error for aligned_alloc(&p, %zu, %zu)", - alignment, size); -} -TEST_END - -TEST_BEGIN(test_alignment_and_size) -{ - size_t alignment, size, total; - unsigned i; - void *ps[NITER]; - - for (i = 0; i < NITER; i++) - ps[i] = NULL; - - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { - total = 0; - for (size = 1; - size < 3 * alignment && size < (1U << 31); - size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { - for (i = 0; i < NITER; i++) { - ps[i] = aligned_alloc(alignment, size); - if (ps[i] == NULL) { - char buf[BUFERROR_BUF]; - - buferror(get_errno(), buf, sizeof(buf)); - test_fail( - "Error for alignment=%zu, " - "size=%zu (%#zx): %s", - alignment, size, size, buf); - } - total += malloc_usable_size(ps[i]); - if (total >= (MAXALIGN << 1)) - break; - } - for (i = 0; i < NITER; i++) { - if (ps[i] != NULL) { - free(ps[i]); - ps[i] = NULL; - } - } - } - } -} -TEST_END - -int -main(void) -{ - - return (test( - test_alignment_errors, - test_oom_errors, - test_alignment_and_size)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/allocated.c b/lib/jemalloc-3.6.0/test/integration/allocated.c deleted file mode 100644 index 3630e80..0000000 --- a/lib/jemalloc-3.6.0/test/integration/allocated.c +++ /dev/null @@ -1,125 +0,0 @@ -#include "test/jemalloc_test.h" - -static const bool config_stats = -#ifdef JEMALLOC_STATS - true -#else - false -#endif - ; - -void * -thd_start(void *arg) -{ - int err; - void *p; - uint64_t a0, a1, d0, d1; - uint64_t *ap0, *ap1, *dp0, *dp1; - size_t sz, usize; - - sz = sizeof(a0); - if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) { - if (err == ENOENT) - goto label_ENOENT; - test_fail("%s(): Error in mallctl(): %s", __func__, - strerror(err)); - } - sz = sizeof(ap0); - if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) { - if (err == ENOENT) - goto label_ENOENT; - test_fail("%s(): Error in mallctl(): %s", __func__, - strerror(err)); - } - assert_u64_eq(*ap0, a0, - "\"thread.allocatedp\" should provide a pointer to internal " - "storage"); - - sz = sizeof(d0); - if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) { - if (err == ENOENT) - goto label_ENOENT; - test_fail("%s(): Error in mallctl(): %s", __func__, - strerror(err)); - } - sz = sizeof(dp0); - if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) { - if (err == ENOENT) - goto label_ENOENT; - test_fail("%s(): Error in mallctl(): %s", __func__, - strerror(err)); - } - assert_u64_eq(*dp0, d0, - "\"thread.deallocatedp\" should provide a pointer to internal " - "storage"); - - p = malloc(1); - assert_ptr_not_null(p, "Unexpected malloc() error"); - - sz = sizeof(a1); - mallctl("thread.allocated", &a1, &sz, NULL, 0); - sz = sizeof(ap1); - mallctl("thread.allocatedp", &ap1, &sz, NULL, 0); - assert_u64_eq(*ap1, a1, - "Dereferenced \"thread.allocatedp\" value should equal " - "\"thread.allocated\" value"); - assert_ptr_eq(ap0, ap1, - "Pointer returned by \"thread.allocatedp\" should not change"); - - usize = malloc_usable_size(p); - assert_u64_le(a0 + usize, a1, - "Allocated memory counter should increase by at least the amount " - "explicitly allocated"); - - free(p); - - sz = sizeof(d1); - mallctl("thread.deallocated", &d1, &sz, NULL, 0); - sz = sizeof(dp1); - mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0); - assert_u64_eq(*dp1, d1, - "Dereferenced \"thread.deallocatedp\" value should equal " - "\"thread.deallocated\" value"); - assert_ptr_eq(dp0, dp1, - "Pointer returned by \"thread.deallocatedp\" should not change"); - - assert_u64_le(d0 + usize, d1, - "Deallocated memory counter should increase by at least the amount " - "explicitly deallocated"); - - return (NULL); -label_ENOENT: - assert_false(config_stats, - "ENOENT should only be returned if stats are disabled"); - test_skip("\"thread.allocated\" mallctl not available"); - return (NULL); -} - -TEST_BEGIN(test_main_thread) -{ - - thd_start(NULL); -} -TEST_END - -TEST_BEGIN(test_subthread) -{ - thd_t thd; - - thd_create(&thd, thd_start, NULL); - thd_join(thd, NULL); -} -TEST_END - -int -main(void) -{ - - /* Run tests multiple times to check for bad interactions. */ - return (test( - test_main_thread, - test_subthread, - test_main_thread, - test_subthread, - test_main_thread)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/allocm.c b/lib/jemalloc-3.6.0/test/integration/allocm.c deleted file mode 100644 index 7b4ea0c..0000000 --- a/lib/jemalloc-3.6.0/test/integration/allocm.c +++ /dev/null @@ -1,107 +0,0 @@ -#include "test/jemalloc_test.h" - -#define CHUNK 0x400000 -#define MAXALIGN (((size_t)1) << 25) -#define NITER 4 - -TEST_BEGIN(test_basic) -{ - size_t nsz, rsz, sz; - void *p; - - sz = 42; - nsz = 0; - assert_d_eq(nallocm(&nsz, sz, 0), ALLOCM_SUCCESS, - "Unexpected nallocm() error"); - rsz = 0; - assert_d_eq(allocm(&p, &rsz, sz, 0), ALLOCM_SUCCESS, - "Unexpected allocm() error"); - assert_zu_ge(rsz, sz, "Real size smaller than expected"); - assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch"); - assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, - "Unexpected dallocm() error"); - - assert_d_eq(allocm(&p, NULL, sz, 0), ALLOCM_SUCCESS, - "Unexpected allocm() error"); - assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, - "Unexpected dallocm() error"); - - nsz = 0; - assert_d_eq(nallocm(&nsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS, - "Unexpected nallocm() error"); - rsz = 0; - assert_d_eq(allocm(&p, &rsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS, - "Unexpected allocm() error"); - assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch"); - assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, - "Unexpected dallocm() error"); -} -TEST_END - -TEST_BEGIN(test_alignment_and_size) -{ - int r; - size_t nsz, rsz, sz, alignment, total; - unsigned i; - void *ps[NITER]; - - for (i = 0; i < NITER; i++) - ps[i] = NULL; - - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { - total = 0; - for (sz = 1; - sz < 3 * alignment && sz < (1U << 31); - sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { - for (i = 0; i < NITER; i++) { - nsz = 0; - r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment) | - ALLOCM_ZERO); - assert_d_eq(r, ALLOCM_SUCCESS, - "nallocm() error for alignment=%zu, " - "size=%zu (%#zx): %d", - alignment, sz, sz, r); - rsz = 0; - r = allocm(&ps[i], &rsz, sz, - ALLOCM_ALIGN(alignment) | ALLOCM_ZERO); - assert_d_eq(r, ALLOCM_SUCCESS, - "allocm() error for alignment=%zu, " - "size=%zu (%#zx): %d", - alignment, sz, sz, r); - assert_zu_ge(rsz, sz, - "Real size smaller than expected for " - "alignment=%zu, size=%zu", alignment, sz); - assert_zu_eq(nsz, rsz, - "nallocm()/allocm() rsize mismatch for " - "alignment=%zu, size=%zu", alignment, sz); - assert_ptr_null( - (void *)((uintptr_t)ps[i] & (alignment-1)), - "%p inadequately aligned for" - " alignment=%zu, size=%zu", ps[i], - alignment, sz); - sallocm(ps[i], &rsz, 0); - total += rsz; - if (total >= (MAXALIGN << 1)) - break; - } - for (i = 0; i < NITER; i++) { - if (ps[i] != NULL) { - dallocm(ps[i], 0); - ps[i] = NULL; - } - } - } - } -} -TEST_END - -int -main(void) -{ - - return (test( - test_basic, - test_alignment_and_size)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/mallocx.c b/lib/jemalloc-3.6.0/test/integration/mallocx.c deleted file mode 100644 index 123e041..0000000 --- a/lib/jemalloc-3.6.0/test/integration/mallocx.c +++ /dev/null @@ -1,97 +0,0 @@ -#include "test/jemalloc_test.h" - -#define CHUNK 0x400000 -#define MAXALIGN (((size_t)1) << 25) -#define NITER 4 - -TEST_BEGIN(test_basic) -{ - size_t nsz, rsz, sz; - void *p; - - sz = 42; - nsz = nallocx(sz, 0); - assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); - p = mallocx(sz, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - rsz = sallocx(p, 0); - assert_zu_ge(rsz, sz, "Real size smaller than expected"); - assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); - dallocx(p, 0); - - p = mallocx(sz, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - dallocx(p, 0); - - nsz = nallocx(sz, MALLOCX_ZERO); - assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); - p = mallocx(sz, MALLOCX_ZERO); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - rsz = sallocx(p, 0); - assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_alignment_and_size) -{ - size_t nsz, rsz, sz, alignment, total; - unsigned i; - void *ps[NITER]; - - for (i = 0; i < NITER; i++) - ps[i] = NULL; - - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { - total = 0; - for (sz = 1; - sz < 3 * alignment && sz < (1U << 31); - sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { - for (i = 0; i < NITER; i++) { - nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | - MALLOCX_ZERO); - assert_zu_ne(nsz, 0, - "nallocx() error for alignment=%zu, " - "size=%zu (%#zx)", alignment, sz, sz); - ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | - MALLOCX_ZERO); - assert_ptr_not_null(ps[i], - "mallocx() error for alignment=%zu, " - "size=%zu (%#zx)", alignment, sz, sz); - rsz = sallocx(ps[i], 0); - assert_zu_ge(rsz, sz, - "Real size smaller than expected for " - "alignment=%zu, size=%zu", alignment, sz); - assert_zu_eq(nsz, rsz, - "nallocx()/sallocx() size mismatch for " - "alignment=%zu, size=%zu", alignment, sz); - assert_ptr_null( - (void *)((uintptr_t)ps[i] & (alignment-1)), - "%p inadequately aligned for" - " alignment=%zu, size=%zu", ps[i], - alignment, sz); - total += rsz; - if (total >= (MAXALIGN << 1)) - break; - } - for (i = 0; i < NITER; i++) { - if (ps[i] != NULL) { - dallocx(ps[i], 0); - ps[i] = NULL; - } - } - } - } -} -TEST_END - -int -main(void) -{ - - return (test( - test_basic, - test_alignment_and_size)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/mremap.c b/lib/jemalloc-3.6.0/test/integration/mremap.c deleted file mode 100644 index a7fb7ef..0000000 --- a/lib/jemalloc-3.6.0/test/integration/mremap.c +++ /dev/null @@ -1,45 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_mremap) -{ - int err; - size_t sz, lg_chunk, chunksize, i; - char *p, *q; - - sz = sizeof(lg_chunk); - err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0); - assert_d_eq(err, 0, "Error in mallctl(): %s", strerror(err)); - chunksize = ((size_t)1U) << lg_chunk; - - p = (char *)malloc(chunksize); - assert_ptr_not_null(p, "malloc(%zu) --> %p", chunksize, p); - memset(p, 'a', chunksize); - - q = (char *)realloc(p, chunksize * 2); - assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize * 2, - q); - for (i = 0; i < chunksize; i++) { - assert_c_eq(q[i], 'a', - "realloc() should preserve existing bytes across copies"); - } - - p = q; - - q = (char *)realloc(p, chunksize); - assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize, q); - for (i = 0; i < chunksize; i++) { - assert_c_eq(q[i], 'a', - "realloc() should preserve existing bytes across copies"); - } - - free(q); -} -TEST_END - -int -main(void) -{ - - return (test( - test_mremap)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/posix_memalign.c b/lib/jemalloc-3.6.0/test/integration/posix_memalign.c deleted file mode 100644 index 19741c6..0000000 --- a/lib/jemalloc-3.6.0/test/integration/posix_memalign.c +++ /dev/null @@ -1,119 +0,0 @@ -#include "test/jemalloc_test.h" - -#define CHUNK 0x400000 -/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 - -TEST_BEGIN(test_alignment_errors) -{ - size_t alignment; - void *p; - - for (alignment = 0; alignment < sizeof(void *); alignment++) { - assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, - "Expected error for invalid alignment %zu", - alignment); - } - - for (alignment = sizeof(size_t); alignment < MAXALIGN; - alignment <<= 1) { - assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0, - "Expected error for invalid alignment %zu", - alignment + 1); - } -} -TEST_END - -TEST_BEGIN(test_oom_errors) -{ - size_t alignment, size; - void *p; - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x8000000000000000); - size = UINT64_C(0x8000000000000000); -#else - alignment = 0x80000000LU; - size = 0x80000000LU; -#endif - assert_d_ne(posix_memalign(&p, alignment, size), 0, - "Expected error for posix_memalign(&p, %zu, %zu)", - alignment, size); - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x4000000000000000); - size = UINT64_C(0xc000000000000001); -#else - alignment = 0x40000000LU; - size = 0xc0000001LU; -#endif - assert_d_ne(posix_memalign(&p, alignment, size), 0, - "Expected error for posix_memalign(&p, %zu, %zu)", - alignment, size); - - alignment = 0x10LU; -#if LG_SIZEOF_PTR == 3 - size = UINT64_C(0xfffffffffffffff0); -#else - size = 0xfffffff0LU; -#endif - assert_d_ne(posix_memalign(&p, alignment, size), 0, - "Expected error for posix_memalign(&p, %zu, %zu)", - alignment, size); -} -TEST_END - -TEST_BEGIN(test_alignment_and_size) -{ - size_t alignment, size, total; - unsigned i; - int err; - void *ps[NITER]; - - for (i = 0; i < NITER; i++) - ps[i] = NULL; - - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { - total = 0; - for (size = 1; - size < 3 * alignment && size < (1U << 31); - size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { - for (i = 0; i < NITER; i++) { - err = posix_memalign(&ps[i], - alignment, size); - if (err) { - char buf[BUFERROR_BUF]; - - buferror(get_errno(), buf, sizeof(buf)); - test_fail( - "Error for alignment=%zu, " - "size=%zu (%#zx): %s", - alignment, size, size, buf); - } - total += malloc_usable_size(ps[i]); - if (total >= (MAXALIGN << 1)) - break; - } - for (i = 0; i < NITER; i++) { - if (ps[i] != NULL) { - free(ps[i]); - ps[i] = NULL; - } - } - } - } -} -TEST_END - -int -main(void) -{ - - return (test( - test_alignment_errors, - test_oom_errors, - test_alignment_and_size)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/rallocm.c b/lib/jemalloc-3.6.0/test/integration/rallocm.c deleted file mode 100644 index 33c11bb..0000000 --- a/lib/jemalloc-3.6.0/test/integration/rallocm.c +++ /dev/null @@ -1,111 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_same_size) -{ - void *p, *q; - size_t sz, tsz; - - assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS, - "Unexpected allocm() error"); - - q = p; - assert_d_eq(rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE), ALLOCM_SUCCESS, - "Unexpected rallocm() error"); - assert_ptr_eq(q, p, "Unexpected object move"); - assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); - - assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, - "Unexpected dallocm() error"); -} -TEST_END - -TEST_BEGIN(test_extra_no_move) -{ - void *p, *q; - size_t sz, tsz; - - assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS, - "Unexpected allocm() error"); - - q = p; - assert_d_eq(rallocm(&q, &tsz, sz, sz-42, ALLOCM_NO_MOVE), - ALLOCM_SUCCESS, "Unexpected rallocm() error"); - assert_ptr_eq(q, p, "Unexpected object move"); - assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); - - assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, - "Unexpected dallocm() error"); -} -TEST_END - -TEST_BEGIN(test_no_move_fail) -{ - void *p, *q; - size_t sz, tsz; - - assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS, - "Unexpected allocm() error"); - - q = p; - assert_d_eq(rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE), - ALLOCM_ERR_NOT_MOVED, "Unexpected rallocm() result"); - assert_ptr_eq(q, p, "Unexpected object move"); - assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); - - assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, - "Unexpected dallocm() error"); -} -TEST_END - -TEST_BEGIN(test_grow_and_shrink) -{ - void *p, *q; - size_t tsz; -#define NCYCLES 3 - unsigned i, j; -#define NSZS 2500 - size_t szs[NSZS]; -#define MAXSZ ZU(12 * 1024 * 1024) - - assert_d_eq(allocm(&p, &szs[0], 1, 0), ALLOCM_SUCCESS, - "Unexpected allocm() error"); - - for (i = 0; i < NCYCLES; i++) { - for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { - q = p; - assert_d_eq(rallocm(&q, &szs[j], szs[j-1]+1, 0, 0), - ALLOCM_SUCCESS, - "Unexpected rallocm() error for size=%zu-->%zu", - szs[j-1], szs[j-1]+1); - assert_zu_ne(szs[j], szs[j-1]+1, - "Expected size to at least: %zu", szs[j-1]+1); - p = q; - } - - for (j--; j > 0; j--) { - q = p; - assert_d_eq(rallocm(&q, &tsz, szs[j-1], 0, 0), - ALLOCM_SUCCESS, - "Unexpected rallocm() error for size=%zu-->%zu", - szs[j], szs[j-1]); - assert_zu_eq(tsz, szs[j-1], - "Expected size=%zu, got size=%zu", szs[j-1], tsz); - p = q; - } - } - - assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS, - "Unexpected dallocm() error"); -} -TEST_END - -int -main(void) -{ - - return (test( - test_same_size, - test_extra_no_move, - test_no_move_fail, - test_grow_and_shrink)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/rallocx.c b/lib/jemalloc-3.6.0/test/integration/rallocx.c deleted file mode 100644 index ee21aed..0000000 --- a/lib/jemalloc-3.6.0/test/integration/rallocx.c +++ /dev/null @@ -1,182 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_grow_and_shrink) -{ - void *p, *q; - size_t tsz; -#define NCYCLES 3 - unsigned i, j; -#define NSZS 2500 - size_t szs[NSZS]; -#define MAXSZ ZU(12 * 1024 * 1024) - - p = mallocx(1, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - szs[0] = sallocx(p, 0); - - for (i = 0; i < NCYCLES; i++) { - for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { - q = rallocx(p, szs[j-1]+1, 0); - assert_ptr_not_null(q, - "Unexpected rallocx() error for size=%zu-->%zu", - szs[j-1], szs[j-1]+1); - szs[j] = sallocx(q, 0); - assert_zu_ne(szs[j], szs[j-1]+1, - "Expected size to at least: %zu", szs[j-1]+1); - p = q; - } - - for (j--; j > 0; j--) { - q = rallocx(p, szs[j-1], 0); - assert_ptr_not_null(q, - "Unexpected rallocx() error for size=%zu-->%zu", - szs[j], szs[j-1]); - tsz = sallocx(q, 0); - assert_zu_eq(tsz, szs[j-1], - "Expected size=%zu, got size=%zu", szs[j-1], tsz); - p = q; - } - } - - dallocx(p, 0); -#undef MAXSZ -#undef NSZS -#undef NCYCLES -} -TEST_END - -static bool -validate_fill(const void *p, uint8_t c, size_t offset, size_t len) -{ - bool ret = false; - const uint8_t *buf = (const uint8_t *)p; - size_t i; - - for (i = 0; i < len; i++) { - uint8_t b = buf[offset+i]; - if (b != c) { - test_fail("Allocation at %p contains %#x rather than " - "%#x at offset %zu", p, b, c, offset+i); - ret = true; - } - } - - return (ret); -} - -TEST_BEGIN(test_zero) -{ - void *p, *q; - size_t psz, qsz, i, j; - size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; -#define FILL_BYTE 0xaaU -#define RANGE 2048 - - for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { - size_t start_size = start_sizes[i]; - p = mallocx(start_size, MALLOCX_ZERO); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - psz = sallocx(p, 0); - - assert_false(validate_fill(p, 0, 0, psz), - "Expected zeroed memory"); - memset(p, FILL_BYTE, psz); - assert_false(validate_fill(p, FILL_BYTE, 0, psz), - "Expected filled memory"); - - for (j = 1; j < RANGE; j++) { - q = rallocx(p, start_size+j, MALLOCX_ZERO); - assert_ptr_not_null(q, "Unexpected rallocx() error"); - qsz = sallocx(q, 0); - if (q != p || qsz != psz) { - assert_false(validate_fill(q, FILL_BYTE, 0, - psz), "Expected filled memory"); - assert_false(validate_fill(q, 0, psz, qsz-psz), - "Expected zeroed memory"); - } - if (psz != qsz) { - memset(q+psz, FILL_BYTE, qsz-psz); - psz = qsz; - } - p = q; - } - assert_false(validate_fill(p, FILL_BYTE, 0, psz), - "Expected filled memory"); - dallocx(p, 0); - } -#undef FILL_BYTE -} -TEST_END - -TEST_BEGIN(test_align) -{ - void *p, *q; - size_t align; -#define MAX_ALIGN (ZU(1) << 25) - - align = ZU(1); - p = mallocx(1, MALLOCX_ALIGN(align)); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - - for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { - q = rallocx(p, 1, MALLOCX_ALIGN(align)); - assert_ptr_not_null(q, - "Unexpected rallocx() error for align=%zu", align); - assert_ptr_null( - (void *)((uintptr_t)q & (align-1)), - "%p inadequately aligned for align=%zu", - q, align); - p = q; - } - dallocx(p, 0); -#undef MAX_ALIGN -} -TEST_END - -TEST_BEGIN(test_lg_align_and_zero) -{ - void *p, *q; - size_t lg_align, sz; -#define MAX_LG_ALIGN 25 -#define MAX_VALIDATE (ZU(1) << 22) - - lg_align = ZU(0); - p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - - for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { - q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); - assert_ptr_not_null(q, - "Unexpected rallocx() error for lg_align=%zu", lg_align); - assert_ptr_null( - (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), - "%p inadequately aligned for lg_align=%zu", - q, lg_align); - sz = sallocx(q, 0); - if ((sz << 1) <= MAX_VALIDATE) { - assert_false(validate_fill(q, 0, 0, sz), - "Expected zeroed memory"); - } else { - assert_false(validate_fill(q, 0, 0, MAX_VALIDATE), - "Expected zeroed memory"); - assert_false(validate_fill(q+sz-MAX_VALIDATE, 0, 0, - MAX_VALIDATE), "Expected zeroed memory"); - } - p = q; - } - dallocx(p, 0); -#undef MAX_VALIDATE -#undef MAX_LG_ALIGN -} -TEST_END - -int -main(void) -{ - - return (test( - test_grow_and_shrink, - test_zero, - test_align, - test_lg_align_and_zero)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/thread_arena.c b/lib/jemalloc-3.6.0/test/integration/thread_arena.c deleted file mode 100644 index 67be535..0000000 --- a/lib/jemalloc-3.6.0/test/integration/thread_arena.c +++ /dev/null @@ -1,79 +0,0 @@ -#include "test/jemalloc_test.h" - -#define NTHREADS 10 - -void * -thd_start(void *arg) -{ - unsigned main_arena_ind = *(unsigned *)arg; - void *p; - unsigned arena_ind; - size_t size; - int err; - - p = malloc(1); - assert_ptr_not_null(p, "Error in malloc()"); - free(p); - - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, - sizeof(main_arena_ind)))) { - char buf[BUFERROR_BUF]; - - buferror(err, buf, sizeof(buf)); - test_fail("Error in mallctl(): %s", buf); - } - - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { - char buf[BUFERROR_BUF]; - - buferror(err, buf, sizeof(buf)); - test_fail("Error in mallctl(): %s", buf); - } - assert_u_eq(arena_ind, main_arena_ind, - "Arena index should be same as for main thread"); - - return (NULL); -} - -TEST_BEGIN(test_thread_arena) -{ - void *p; - unsigned arena_ind; - size_t size; - int err; - thd_t thds[NTHREADS]; - unsigned i; - - p = malloc(1); - assert_ptr_not_null(p, "Error in malloc()"); - - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { - char buf[BUFERROR_BUF]; - - buferror(err, buf, sizeof(buf)); - test_fail("Error in mallctl(): %s", buf); - } - - for (i = 0; i < NTHREADS; i++) { - thd_create(&thds[i], thd_start, - (void *)&arena_ind); - } - - for (i = 0; i < NTHREADS; i++) { - intptr_t join_ret; - thd_join(thds[i], (void *)&join_ret); - assert_zd_eq(join_ret, 0, "Unexpected thread join error"); - } -} -TEST_END - -int -main(void) -{ - - return (test( - test_thread_arena)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/thread_tcache_enabled.c b/lib/jemalloc-3.6.0/test/integration/thread_tcache_enabled.c deleted file mode 100644 index f4e89c6..0000000 --- a/lib/jemalloc-3.6.0/test/integration/thread_tcache_enabled.c +++ /dev/null @@ -1,113 +0,0 @@ -#include "test/jemalloc_test.h" - -static const bool config_tcache = -#ifdef JEMALLOC_TCACHE - true -#else - false -#endif - ; - -void * -thd_start(void *arg) -{ - int err; - size_t sz; - bool e0, e1; - - sz = sizeof(bool); - if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) { - if (err == ENOENT) { - assert_false(config_tcache, - "ENOENT should only be returned if tcache is " - "disabled"); - } - goto label_ENOENT; - } - - if (e0) { - e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), - 0, "Unexpected mallctl() error"); - assert_true(e0, "tcache should be enabled"); - } - - e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_false(e0, "tcache should be disabled"); - - e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_true(e0, "tcache should be enabled"); - - e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_true(e0, "tcache should be enabled"); - - e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_false(e0, "tcache should be disabled"); - - free(malloc(1)); - e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_false(e0, "tcache should be disabled"); - - free(malloc(1)); - e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_true(e0, "tcache should be enabled"); - - free(malloc(1)); - e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_true(e0, "tcache should be enabled"); - - free(malloc(1)); - e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_false(e0, "tcache should be disabled"); - - free(malloc(1)); - return (NULL); -label_ENOENT: - test_skip("\"thread.tcache.enabled\" mallctl not available"); - return (NULL); -} - -TEST_BEGIN(test_main_thread) -{ - - thd_start(NULL); -} -TEST_END - -TEST_BEGIN(test_subthread) -{ - thd_t thd; - - thd_create(&thd, thd_start, NULL); - thd_join(thd, NULL); -} -TEST_END - -int -main(void) -{ - - /* Run tests multiple times to check for bad interactions. */ - return (test( - test_main_thread, - test_subthread, - test_main_thread, - test_subthread, - test_main_thread)); -} diff --git a/lib/jemalloc-3.6.0/test/integration/xallocx.c b/lib/jemalloc-3.6.0/test/integration/xallocx.c deleted file mode 100644 index ab4cf94..0000000 --- a/lib/jemalloc-3.6.0/test/integration/xallocx.c +++ /dev/null @@ -1,59 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_same_size) -{ - void *p; - size_t sz, tsz; - - p = mallocx(42, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - sz = sallocx(p, 0); - - tsz = xallocx(p, sz, 0, 0); - assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_extra_no_move) -{ - void *p; - size_t sz, tsz; - - p = mallocx(42, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - sz = sallocx(p, 0); - - tsz = xallocx(p, sz, sz-42, 0); - assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_no_move_fail) -{ - void *p; - size_t sz, tsz; - - p = mallocx(42, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - sz = sallocx(p, 0); - - tsz = xallocx(p, sz + 5, 0, 0); - assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); - - dallocx(p, 0); -} -TEST_END - -int -main(void) -{ - - return (test( - test_same_size, - test_extra_no_move, - test_no_move_fail)); -} diff --git a/lib/jemalloc-3.6.0/test/src/SFMT.c b/lib/jemalloc-3.6.0/test/src/SFMT.c deleted file mode 100644 index e6f8dee..0000000 --- a/lib/jemalloc-3.6.0/test/src/SFMT.c +++ /dev/null @@ -1,719 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -/** - * @file SFMT.c - * @brief SIMD oriented Fast Mersenne Twister(SFMT) - * - * @author Mutsuo Saito (Hiroshima University) - * @author Makoto Matsumoto (Hiroshima University) - * - * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * The new BSD License is applied to this software, see LICENSE.txt - */ -#define SFMT_C_ -#include "test/jemalloc_test.h" -#include "test/SFMT-params.h" - -#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64) -#define BIG_ENDIAN64 1 -#endif -#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64) -#define BIG_ENDIAN64 1 -#endif -#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64) -#define BIG_ENDIAN64 1 -#endif -#if defined(ONLY64) && !defined(BIG_ENDIAN64) - #if defined(__GNUC__) - #error "-DONLY64 must be specified with -DBIG_ENDIAN64" - #endif -#undef ONLY64 -#endif -/*------------------------------------------------------ - 128-bit SIMD data type for Altivec, SSE2 or standard C - ------------------------------------------------------*/ -#if defined(HAVE_ALTIVEC) -/** 128-bit data structure */ -union W128_T { - vector unsigned int s; - uint32_t u[4]; -}; -/** 128-bit data type */ -typedef union W128_T w128_t; - -#elif defined(HAVE_SSE2) -/** 128-bit data structure */ -union W128_T { - __m128i si; - uint32_t u[4]; -}; -/** 128-bit data type */ -typedef union W128_T w128_t; - -#else - -/** 128-bit data structure */ -struct W128_T { - uint32_t u[4]; -}; -/** 128-bit data type */ -typedef struct W128_T w128_t; - -#endif - -struct sfmt_s { - /** the 128-bit internal state array */ - w128_t sfmt[N]; - /** index counter to the 32-bit internal state array */ - int idx; - /** a flag: it is 0 if and only if the internal state is not yet - * initialized. */ - int initialized; -}; - -/*-------------------------------------- - FILE GLOBAL VARIABLES - internal state, index counter and flag - --------------------------------------*/ - -/** a parity check vector which certificate the period of 2^{MEXP} */ -static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; - -/*---------------- - STATIC FUNCTIONS - ----------------*/ -JEMALLOC_INLINE_C int idxof(int i); -#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) -JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift); -JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift); -#endif -JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx); -JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); -JEMALLOC_INLINE_C uint32_t func1(uint32_t x); -JEMALLOC_INLINE_C uint32_t func2(uint32_t x); -static void period_certification(sfmt_t *ctx); -#if defined(BIG_ENDIAN64) && !defined(ONLY64) -JEMALLOC_INLINE_C void swap(w128_t *array, int size); -#endif - -#if defined(HAVE_ALTIVEC) - #include "test/SFMT-alti.h" -#elif defined(HAVE_SSE2) - #include "test/SFMT-sse2.h" -#endif - -/** - * This function simulate a 64-bit index of LITTLE ENDIAN - * in BIG ENDIAN machine. - */ -#ifdef ONLY64 -JEMALLOC_INLINE_C int idxof(int i) { - return i ^ 1; -} -#else -JEMALLOC_INLINE_C int idxof(int i) { - return i; -} -#endif -/** - * This function simulates SIMD 128-bit right shift by the standard C. - * The 128-bit integer given in in is shifted by (shift * 8) bits. - * This function simulates the LITTLE ENDIAN SIMD. - * @param out the output of this function - * @param in the 128-bit data to be shifted - * @param shift the shift value - */ -#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) -#ifdef ONLY64 -JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { - uint64_t th, tl, oh, ol; - - th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); - tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); - - oh = th >> (shift * 8); - ol = tl >> (shift * 8); - ol |= th << (64 - shift * 8); - out->u[0] = (uint32_t)(ol >> 32); - out->u[1] = (uint32_t)ol; - out->u[2] = (uint32_t)(oh >> 32); - out->u[3] = (uint32_t)oh; -} -#else -JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { - uint64_t th, tl, oh, ol; - - th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); - tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); - - oh = th >> (shift * 8); - ol = tl >> (shift * 8); - ol |= th << (64 - shift * 8); - out->u[1] = (uint32_t)(ol >> 32); - out->u[0] = (uint32_t)ol; - out->u[3] = (uint32_t)(oh >> 32); - out->u[2] = (uint32_t)oh; -} -#endif -/** - * This function simulates SIMD 128-bit left shift by the standard C. - * The 128-bit integer given in in is shifted by (shift * 8) bits. - * This function simulates the LITTLE ENDIAN SIMD. - * @param out the output of this function - * @param in the 128-bit data to be shifted - * @param shift the shift value - */ -#ifdef ONLY64 -JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { - uint64_t th, tl, oh, ol; - - th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); - tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); - - oh = th << (shift * 8); - ol = tl << (shift * 8); - oh |= tl >> (64 - shift * 8); - out->u[0] = (uint32_t)(ol >> 32); - out->u[1] = (uint32_t)ol; - out->u[2] = (uint32_t)(oh >> 32); - out->u[3] = (uint32_t)oh; -} -#else -JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { - uint64_t th, tl, oh, ol; - - th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); - tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); - - oh = th << (shift * 8); - ol = tl << (shift * 8); - oh |= tl >> (64 - shift * 8); - out->u[1] = (uint32_t)(ol >> 32); - out->u[0] = (uint32_t)ol; - out->u[3] = (uint32_t)(oh >> 32); - out->u[2] = (uint32_t)oh; -} -#endif -#endif - -/** - * This function represents the recursion formula. - * @param r output - * @param a a 128-bit part of the internal state array - * @param b a 128-bit part of the internal state array - * @param c a 128-bit part of the internal state array - * @param d a 128-bit part of the internal state array - */ -#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) -#ifdef ONLY64 -JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, - w128_t *d) { - w128_t x; - w128_t y; - - lshift128(&x, a, SL2); - rshift128(&y, c, SR2); - r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] - ^ (d->u[0] << SL1); - r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] - ^ (d->u[1] << SL1); - r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] - ^ (d->u[2] << SL1); - r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] - ^ (d->u[3] << SL1); -} -#else -JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, - w128_t *d) { - w128_t x; - w128_t y; - - lshift128(&x, a, SL2); - rshift128(&y, c, SR2); - r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] - ^ (d->u[0] << SL1); - r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] - ^ (d->u[1] << SL1); - r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] - ^ (d->u[2] << SL1); - r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] - ^ (d->u[3] << SL1); -} -#endif -#endif - -#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) -/** - * This function fills the internal state array with pseudorandom - * integers. - */ -JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) { - int i; - w128_t *r1, *r2; - - r1 = &ctx->sfmt[N - 2]; - r2 = &ctx->sfmt[N - 1]; - for (i = 0; i < N - POS1; i++) { - do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, - r2); - r1 = r2; - r2 = &ctx->sfmt[i]; - } - for (; i < N; i++) { - do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1, - r2); - r1 = r2; - r2 = &ctx->sfmt[i]; - } -} - -/** - * This function fills the user-specified array with pseudorandom - * integers. - * - * @param array an 128-bit array to be filled by pseudorandom numbers. - * @param size number of 128-bit pseudorandom numbers to be generated. - */ -JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { - int i, j; - w128_t *r1, *r2; - - r1 = &ctx->sfmt[N - 2]; - r2 = &ctx->sfmt[N - 1]; - for (i = 0; i < N - POS1; i++) { - do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); - r1 = r2; - r2 = &array[i]; - } - for (; i < N; i++) { - do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); - r1 = r2; - r2 = &array[i]; - } - for (; i < size - N; i++) { - do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); - r1 = r2; - r2 = &array[i]; - } - for (j = 0; j < 2 * N - size; j++) { - ctx->sfmt[j] = array[j + size - N]; - } - for (; i < size; i++, j++) { - do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); - r1 = r2; - r2 = &array[i]; - ctx->sfmt[j] = array[i]; - } -} -#endif - -#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) -JEMALLOC_INLINE_C void swap(w128_t *array, int size) { - int i; - uint32_t x, y; - - for (i = 0; i < size; i++) { - x = array[i].u[0]; - y = array[i].u[2]; - array[i].u[0] = array[i].u[1]; - array[i].u[2] = array[i].u[3]; - array[i].u[1] = x; - array[i].u[3] = y; - } -} -#endif -/** - * This function represents a function used in the initialization - * by init_by_array - * @param x 32-bit integer - * @return 32-bit integer - */ -static uint32_t func1(uint32_t x) { - return (x ^ (x >> 27)) * (uint32_t)1664525UL; -} - -/** - * This function represents a function used in the initialization - * by init_by_array - * @param x 32-bit integer - * @return 32-bit integer - */ -static uint32_t func2(uint32_t x) { - return (x ^ (x >> 27)) * (uint32_t)1566083941UL; -} - -/** - * This function certificate the period of 2^{MEXP} - */ -static void period_certification(sfmt_t *ctx) { - int inner = 0; - int i, j; - uint32_t work; - uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; - - for (i = 0; i < 4; i++) - inner ^= psfmt32[idxof(i)] & parity[i]; - for (i = 16; i > 0; i >>= 1) - inner ^= inner >> i; - inner &= 1; - /* check OK */ - if (inner == 1) { - return; - } - /* check NG, and modification */ - for (i = 0; i < 4; i++) { - work = 1; - for (j = 0; j < 32; j++) { - if ((work & parity[i]) != 0) { - psfmt32[idxof(i)] ^= work; - return; - } - work = work << 1; - } - } -} - -/*---------------- - PUBLIC FUNCTIONS - ----------------*/ -/** - * This function returns the identification string. - * The string shows the word size, the Mersenne exponent, - * and all parameters of this generator. - */ -const char *get_idstring(void) { - return IDSTR; -} - -/** - * This function returns the minimum size of array used for \b - * fill_array32() function. - * @return minimum size of array used for fill_array32() function. - */ -int get_min_array_size32(void) { - return N32; -} - -/** - * This function returns the minimum size of array used for \b - * fill_array64() function. - * @return minimum size of array used for fill_array64() function. - */ -int get_min_array_size64(void) { - return N64; -} - -#ifndef ONLY64 -/** - * This function generates and returns 32-bit pseudorandom number. - * init_gen_rand or init_by_array must be called before this function. - * @return 32-bit pseudorandom number - */ -uint32_t gen_rand32(sfmt_t *ctx) { - uint32_t r; - uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; - - assert(ctx->initialized); - if (ctx->idx >= N32) { - gen_rand_all(ctx); - ctx->idx = 0; - } - r = psfmt32[ctx->idx++]; - return r; -} - -/* Generate a random integer in [0..limit). */ -uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { - uint32_t ret, above; - - above = 0xffffffffU - (0xffffffffU % limit); - while (1) { - ret = gen_rand32(ctx); - if (ret < above) { - ret %= limit; - break; - } - } - return ret; -} -#endif -/** - * This function generates and returns 64-bit pseudorandom number. - * init_gen_rand or init_by_array must be called before this function. - * The function gen_rand64 should not be called after gen_rand32, - * unless an initialization is again executed. - * @return 64-bit pseudorandom number - */ -uint64_t gen_rand64(sfmt_t *ctx) { -#if defined(BIG_ENDIAN64) && !defined(ONLY64) - uint32_t r1, r2; - uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; -#else - uint64_t r; - uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; -#endif - - assert(ctx->initialized); - assert(ctx->idx % 2 == 0); - - if (ctx->idx >= N32) { - gen_rand_all(ctx); - ctx->idx = 0; - } -#if defined(BIG_ENDIAN64) && !defined(ONLY64) - r1 = psfmt32[ctx->idx]; - r2 = psfmt32[ctx->idx + 1]; - ctx->idx += 2; - return ((uint64_t)r2 << 32) | r1; -#else - r = psfmt64[ctx->idx / 2]; - ctx->idx += 2; - return r; -#endif -} - -/* Generate a random integer in [0..limit). */ -uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { - uint64_t ret, above; - - above = 0xffffffffffffffffLLU - (0xffffffffffffffffLLU % limit); - while (1) { - ret = gen_rand64(ctx); - if (ret < above) { - ret %= limit; - break; - } - } - return ret; -} - -#ifndef ONLY64 -/** - * This function generates pseudorandom 32-bit integers in the - * specified array[] by one call. The number of pseudorandom integers - * is specified by the argument size, which must be at least 624 and a - * multiple of four. The generation by this function is much faster - * than the following gen_rand function. - * - * For initialization, init_gen_rand or init_by_array must be called - * before the first call of this function. This function can not be - * used after calling gen_rand function, without initialization. - * - * @param array an array where pseudorandom 32-bit integers are filled - * by this function. The pointer to the array must be \b "aligned" - * (namely, must be a multiple of 16) in the SIMD version, since it - * refers to the address of a 128-bit integer. In the standard C - * version, the pointer is arbitrary. - * - * @param size the number of 32-bit pseudorandom integers to be - * generated. size must be a multiple of 4, and greater than or equal - * to (MEXP / 128 + 1) * 4. - * - * @note \b memalign or \b posix_memalign is available to get aligned - * memory. Mac OSX doesn't have these functions, but \b malloc of OSX - * returns the pointer to the aligned memory block. - */ -void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { - assert(ctx->initialized); - assert(ctx->idx == N32); - assert(size % 4 == 0); - assert(size >= N32); - - gen_rand_array(ctx, (w128_t *)array, size / 4); - ctx->idx = N32; -} -#endif - -/** - * This function generates pseudorandom 64-bit integers in the - * specified array[] by one call. The number of pseudorandom integers - * is specified by the argument size, which must be at least 312 and a - * multiple of two. The generation by this function is much faster - * than the following gen_rand function. - * - * For initialization, init_gen_rand or init_by_array must be called - * before the first call of this function. This function can not be - * used after calling gen_rand function, without initialization. - * - * @param array an array where pseudorandom 64-bit integers are filled - * by this function. The pointer to the array must be "aligned" - * (namely, must be a multiple of 16) in the SIMD version, since it - * refers to the address of a 128-bit integer. In the standard C - * version, the pointer is arbitrary. - * - * @param size the number of 64-bit pseudorandom integers to be - * generated. size must be a multiple of 2, and greater than or equal - * to (MEXP / 128 + 1) * 2 - * - * @note \b memalign or \b posix_memalign is available to get aligned - * memory. Mac OSX doesn't have these functions, but \b malloc of OSX - * returns the pointer to the aligned memory block. - */ -void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { - assert(ctx->initialized); - assert(ctx->idx == N32); - assert(size % 2 == 0); - assert(size >= N64); - - gen_rand_array(ctx, (w128_t *)array, size / 2); - ctx->idx = N32; - -#if defined(BIG_ENDIAN64) && !defined(ONLY64) - swap((w128_t *)array, size /2); -#endif -} - -/** - * This function initializes the internal state array with a 32-bit - * integer seed. - * - * @param seed a 32-bit integer used as the seed. - */ -sfmt_t *init_gen_rand(uint32_t seed) { - void *p; - sfmt_t *ctx; - int i; - uint32_t *psfmt32; - - if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { - return NULL; - } - ctx = (sfmt_t *)p; - psfmt32 = &ctx->sfmt[0].u[0]; - - psfmt32[idxof(0)] = seed; - for (i = 1; i < N32; i++) { - psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] - ^ (psfmt32[idxof(i - 1)] >> 30)) - + i; - } - ctx->idx = N32; - period_certification(ctx); - ctx->initialized = 1; - - return ctx; -} - -/** - * This function initializes the internal state array, - * with an array of 32-bit integers used as the seeds - * @param init_key the array of 32-bit integers, used as a seed. - * @param key_length the length of init_key. - */ -sfmt_t *init_by_array(uint32_t *init_key, int key_length) { - void *p; - sfmt_t *ctx; - int i, j, count; - uint32_t r; - int lag; - int mid; - int size = N * 4; - uint32_t *psfmt32; - - if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { - return NULL; - } - ctx = (sfmt_t *)p; - psfmt32 = &ctx->sfmt[0].u[0]; - - if (size >= 623) { - lag = 11; - } else if (size >= 68) { - lag = 7; - } else if (size >= 39) { - lag = 5; - } else { - lag = 3; - } - mid = (size - lag) / 2; - - memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); - if (key_length + 1 > N32) { - count = key_length + 1; - } else { - count = N32; - } - r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] - ^ psfmt32[idxof(N32 - 1)]); - psfmt32[idxof(mid)] += r; - r += key_length; - psfmt32[idxof(mid + lag)] += r; - psfmt32[idxof(0)] = r; - - count--; - for (i = 1, j = 0; (j < count) && (j < key_length); j++) { - r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] - ^ psfmt32[idxof((i + N32 - 1) % N32)]); - psfmt32[idxof((i + mid) % N32)] += r; - r += init_key[j] + i; - psfmt32[idxof((i + mid + lag) % N32)] += r; - psfmt32[idxof(i)] = r; - i = (i + 1) % N32; - } - for (; j < count; j++) { - r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] - ^ psfmt32[idxof((i + N32 - 1) % N32)]); - psfmt32[idxof((i + mid) % N32)] += r; - r += i; - psfmt32[idxof((i + mid + lag) % N32)] += r; - psfmt32[idxof(i)] = r; - i = (i + 1) % N32; - } - for (j = 0; j < N32; j++) { - r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] - + psfmt32[idxof((i + N32 - 1) % N32)]); - psfmt32[idxof((i + mid) % N32)] ^= r; - r -= i; - psfmt32[idxof((i + mid + lag) % N32)] ^= r; - psfmt32[idxof(i)] = r; - i = (i + 1) % N32; - } - - ctx->idx = N32; - period_certification(ctx); - ctx->initialized = 1; - - return ctx; -} - -void fini_gen_rand(sfmt_t *ctx) { - assert(ctx != NULL); - - ctx->initialized = 0; - free(ctx); -} diff --git a/lib/jemalloc-3.6.0/test/src/math.c b/lib/jemalloc-3.6.0/test/src/math.c deleted file mode 100644 index 887a363..0000000 --- a/lib/jemalloc-3.6.0/test/src/math.c +++ /dev/null @@ -1,2 +0,0 @@ -#define MATH_C_ -#include "test/jemalloc_test.h" diff --git a/lib/jemalloc-3.6.0/test/src/mtx.c b/lib/jemalloc-3.6.0/test/src/mtx.c deleted file mode 100644 index 41b95d5..0000000 --- a/lib/jemalloc-3.6.0/test/src/mtx.c +++ /dev/null @@ -1,62 +0,0 @@ -#include "test/jemalloc_test.h" - -bool -mtx_init(mtx_t *mtx) -{ - -#ifdef _WIN32 - if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) - return (true); -#elif (defined(JEMALLOC_OSSPIN)) - mtx->lock = 0; -#else - pthread_mutexattr_t attr; - - if (pthread_mutexattr_init(&attr) != 0) - return (true); - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); - if (pthread_mutex_init(&mtx->lock, &attr) != 0) { - pthread_mutexattr_destroy(&attr); - return (true); - } - pthread_mutexattr_destroy(&attr); -#endif - return (false); -} - -void -mtx_fini(mtx_t *mtx) -{ - -#ifdef _WIN32 -#elif (defined(JEMALLOC_OSSPIN)) -#else - pthread_mutex_destroy(&mtx->lock); -#endif -} - -void -mtx_lock(mtx_t *mtx) -{ - -#ifdef _WIN32 - EnterCriticalSection(&mtx->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockLock(&mtx->lock); -#else - pthread_mutex_lock(&mtx->lock); -#endif -} - -void -mtx_unlock(mtx_t *mtx) -{ - -#ifdef _WIN32 - LeaveCriticalSection(&mtx->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockUnlock(&mtx->lock); -#else - pthread_mutex_unlock(&mtx->lock); -#endif -} diff --git a/lib/jemalloc-3.6.0/test/src/test.c b/lib/jemalloc-3.6.0/test/src/test.c deleted file mode 100644 index 528d858..0000000 --- a/lib/jemalloc-3.6.0/test/src/test.c +++ /dev/null @@ -1,94 +0,0 @@ -#include "test/jemalloc_test.h" - -static unsigned test_count = 0; -static test_status_t test_counts[test_status_count] = {0, 0, 0}; -static test_status_t test_status = test_status_pass; -static const char * test_name = ""; - -JEMALLOC_ATTR(format(printf, 1, 2)) -void -test_skip(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - malloc_vcprintf(NULL, NULL, format, ap); - va_end(ap); - malloc_printf("\n"); - test_status = test_status_skip; -} - -JEMALLOC_ATTR(format(printf, 1, 2)) -void -test_fail(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - malloc_vcprintf(NULL, NULL, format, ap); - va_end(ap); - malloc_printf("\n"); - test_status = test_status_fail; -} - -static const char * -test_status_string(test_status_t test_status) -{ - - switch (test_status) { - case test_status_pass: return "pass"; - case test_status_skip: return "skip"; - case test_status_fail: return "fail"; - default: not_reached(); - } -} - -void -p_test_init(const char *name) -{ - - test_count++; - test_status = test_status_pass; - test_name = name; -} - -void -p_test_fini(void) -{ - - test_counts[test_status]++; - malloc_printf("%s: %s\n", test_name, test_status_string(test_status)); -} - -test_status_t -p_test(test_t* t, ...) -{ - test_status_t ret = test_status_pass; - va_list ap; - - va_start(ap, t); - for (; t != NULL; t = va_arg(ap, test_t*)) { - t(); - if (test_status > ret) - ret = test_status; - } - va_end(ap); - - malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", - test_status_string(test_status_pass), - test_counts[test_status_pass], test_count, - test_status_string(test_status_skip), - test_counts[test_status_skip], test_count, - test_status_string(test_status_fail), - test_counts[test_status_fail], test_count); - - return (ret); -} - -void -p_test_fail(const char *prefix, const char *message) -{ - - malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); - test_status = test_status_fail; -} diff --git a/lib/jemalloc-3.6.0/test/src/thd.c b/lib/jemalloc-3.6.0/test/src/thd.c deleted file mode 100644 index 233242a..0000000 --- a/lib/jemalloc-3.6.0/test/src/thd.c +++ /dev/null @@ -1,35 +0,0 @@ -#include "test/jemalloc_test.h" - -#ifdef _WIN32 -void -thd_create(thd_t *thd, void *(*proc)(void *), void *arg) -{ - LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; - *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); - if (*thd == NULL) - test_fail("Error in CreateThread()\n"); -} - -void -thd_join(thd_t thd, void **ret) -{ - - WaitForSingleObject(thd, INFINITE); -} - -#else -void -thd_create(thd_t *thd, void *(*proc)(void *), void *arg) -{ - - if (pthread_create(thd, NULL, proc, arg) != 0) - test_fail("Error in pthread_create()\n"); -} - -void -thd_join(thd_t thd, void **ret) -{ - - pthread_join(thd, ret); -} -#endif diff --git a/lib/jemalloc-3.6.0/test/test.sh.in b/lib/jemalloc-3.6.0/test/test.sh.in deleted file mode 100644 index a39f99f..0000000 --- a/lib/jemalloc-3.6.0/test/test.sh.in +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/sh - -case @abi@ in - macho) - export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" - ;; - pecoff) - export PATH="${PATH}:@objroot@lib" - ;; - *) - ;; -esac - -# Corresponds to test_status_t. -pass_code=0 -skip_code=1 -fail_code=2 - -pass_count=0 -skip_count=0 -fail_count=0 -for t in $@; do - if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then - echo - fi - echo "=== ${t} ===" - ${t}@exe@ @abs_srcroot@ @abs_objroot@ - result_code=$? - case ${result_code} in - ${pass_code}) - pass_count=$((pass_count+1)) - ;; - ${skip_code}) - skip_count=$((skip_count+1)) - ;; - ${fail_code}) - fail_count=$((fail_count+1)) - ;; - *) - echo "Test harness error" 1>&2 - exit 1 - esac -done - -total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` -echo -echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" - -if [ ${fail_count} -eq 0 ] ; then - exit 0 -else - exit 1 -fi diff --git a/lib/jemalloc-3.6.0/test/unit/SFMT.c b/lib/jemalloc-3.6.0/test/unit/SFMT.c deleted file mode 100644 index c57bd68..0000000 --- a/lib/jemalloc-3.6.0/test/unit/SFMT.c +++ /dev/null @@ -1,1605 +0,0 @@ -/* - * This file derives from SFMT 1.3.3 - * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was - * released under the terms of the following license: - * - * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima - * University. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Hiroshima University nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include "test/jemalloc_test.h" - -#define BLOCK_SIZE 10000 -#define BLOCK_SIZE64 (BLOCK_SIZE / 2) -#define COUNT_1 1000 -#define COUNT_2 700 - -static const uint32_t init_gen_rand_32_expected[] = { - 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, - 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U, - 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U, - 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U, - 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U, - 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U, - 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U, - 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U, - 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U, - 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U, - 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U, - 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U, - 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U, - 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U, - 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U, - 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U, - 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U, - 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U, - 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U, - 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U, - 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U, - 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U, - 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U, - 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U, - 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U, - 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U, - 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U, - 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U, - 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U, - 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U, - 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U, - 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U, - 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U, - 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U, - 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U, - 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U, - 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U, - 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U, - 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U, - 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U, - 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U, - 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U, - 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U, - 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U, - 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U, - 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U, - 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U, - 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U, - 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U, - 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U, - 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U, - 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U, - 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U, - 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U, - 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U, - 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U, - 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U, - 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U, - 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U, - 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U, - 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U, - 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U, - 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U, - 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U, - 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U, - 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U, - 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U, - 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U, - 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U, - 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U, - 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U, - 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U, - 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U, - 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U, - 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U, - 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U, - 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U, - 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U, - 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U, - 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U, - 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U, - 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U, - 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U, - 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U, - 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U, - 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U, - 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U, - 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U, - 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U, - 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U, - 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U, - 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U, - 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U, - 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U, - 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U, - 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U, - 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U, - 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U, - 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U, - 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U, - 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U, - 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U, - 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U, - 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U, - 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U, - 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U, - 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U, - 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U, - 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U, - 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U, - 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U, - 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U, - 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U, - 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U, - 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U, - 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U, - 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U, - 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U, - 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U, - 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U, - 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U, - 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U, - 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U, - 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U, - 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U, - 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U, - 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U, - 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U, - 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U, - 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U, - 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U, - 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U, - 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U, - 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U, - 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U, - 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U, - 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U, - 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U, - 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U, - 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U, - 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U, - 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U, - 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U, - 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U, - 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U, - 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U, - 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U, - 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U, - 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U, - 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U, - 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U, - 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U, - 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U, - 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U, - 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U, - 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U, - 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U, - 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U, - 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U, - 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U, - 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U, - 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U, - 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U, - 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U, - 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U, - 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U, - 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U, - 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U, - 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U, - 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U, - 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U, - 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U, - 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U, - 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U, - 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U, - 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U, - 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U, - 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U, - 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U, - 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U, - 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U, - 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U, - 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U, - 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U, - 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U, - 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U, - 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U, - 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U, - 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U, - 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U, - 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U, - 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U, - 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U, - 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U, - 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U, - 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U, - 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U, - 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U, - 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U, - 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U -}; -static const uint32_t init_by_array_32_expected[] = { - 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U, - 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U, - 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U, - 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U, - 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U, - 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U, - 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U, - 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U, - 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U, - 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U, - 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U, - 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U, - 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U, - 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U, - 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U, - 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U, - 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U, - 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U, - 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U, - 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U, - 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U, - 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U, - 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U, - 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U, - 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U, - 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U, - 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U, - 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U, - 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U, - 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U, - 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U, - 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U, - 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U, - 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U, - 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U, - 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U, - 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U, - 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U, - 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U, - 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U, - 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U, - 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U, - 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U, - 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U, - 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U, - 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U, - 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U, - 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U, - 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U, - 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U, - 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U, - 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U, - 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U, - 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U, - 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U, - 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U, - 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U, - 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U, - 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U, - 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U, - 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U, - 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U, - 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U, - 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U, - 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U, - 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U, - 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U, - 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U, - 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U, - 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U, - 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U, - 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U, - 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U, - 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U, - 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U, - 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U, - 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U, - 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U, - 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U, - 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U, - 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U, - 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U, - 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U, - 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U, - 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U, - 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U, - 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U, - 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U, - 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U, - 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U, - 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U, - 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U, - 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U, - 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U, - 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U, - 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U, - 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U, - 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U, - 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U, - 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U, - 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U, - 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U, - 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U, - 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U, - 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U, - 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U, - 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U, - 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U, - 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U, - 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U, - 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U, - 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U, - 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U, - 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U, - 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U, - 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U, - 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U, - 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U, - 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U, - 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U, - 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U, - 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U, - 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U, - 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U, - 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U, - 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U, - 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U, - 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U, - 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U, - 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U, - 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U, - 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U, - 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U, - 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U, - 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U, - 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U, - 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U, - 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U, - 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U, - 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U, - 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U, - 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U, - 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U, - 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U, - 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U, - 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U, - 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U, - 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U, - 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U, - 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U, - 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U, - 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U, - 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U, - 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U, - 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U, - 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U, - 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U, - 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U, - 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U, - 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U, - 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U, - 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U, - 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U, - 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U, - 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U, - 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U, - 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U, - 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U, - 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U, - 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U, - 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U, - 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U, - 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U, - 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U, - 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U, - 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U, - 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U, - 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U, - 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U, - 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U, - 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U, - 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U, - 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U, - 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U, - 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U, - 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U, - 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U, - 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U, - 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U, - 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U, - 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U, - 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U, - 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U, - 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U, - 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U, - 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U, - 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U, - 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U, - 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U, - 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U -}; -static const uint64_t init_gen_rand_64_expected[] = { - QU(16924766246869039260LLU), QU( 8201438687333352714LLU), - QU( 2265290287015001750LLU), QU(18397264611805473832LLU), - QU( 3375255223302384358LLU), QU( 6345559975416828796LLU), - QU(18229739242790328073LLU), QU( 7596792742098800905LLU), - QU( 255338647169685981LLU), QU( 2052747240048610300LLU), - QU(18328151576097299343LLU), QU(12472905421133796567LLU), - QU(11315245349717600863LLU), QU(16594110197775871209LLU), - QU(15708751964632456450LLU), QU(10452031272054632535LLU), - QU(11097646720811454386LLU), QU( 4556090668445745441LLU), - QU(17116187693090663106LLU), QU(14931526836144510645LLU), - QU( 9190752218020552591LLU), QU( 9625800285771901401LLU), - QU(13995141077659972832LLU), QU( 5194209094927829625LLU), - QU( 4156788379151063303LLU), QU( 8523452593770139494LLU), - QU(14082382103049296727LLU), QU( 2462601863986088483LLU), - QU( 3030583461592840678LLU), QU( 5221622077872827681LLU), - QU( 3084210671228981236LLU), QU(13956758381389953823LLU), - QU(13503889856213423831LLU), QU(15696904024189836170LLU), - QU( 4612584152877036206LLU), QU( 6231135538447867881LLU), - QU(10172457294158869468LLU), QU( 6452258628466708150LLU), - QU(14044432824917330221LLU), QU( 370168364480044279LLU), - QU(10102144686427193359LLU), QU( 667870489994776076LLU), - QU( 2732271956925885858LLU), QU(18027788905977284151LLU), - QU(15009842788582923859LLU), QU( 7136357960180199542LLU), - QU(15901736243475578127LLU), QU(16951293785352615701LLU), - QU(10551492125243691632LLU), QU(17668869969146434804LLU), - QU(13646002971174390445LLU), QU( 9804471050759613248LLU), - QU( 5511670439655935493LLU), QU(18103342091070400926LLU), - QU(17224512747665137533LLU), QU(15534627482992618168LLU), - QU( 1423813266186582647LLU), QU(15821176807932930024LLU), - QU( 30323369733607156LLU), QU(11599382494723479403LLU), - QU( 653856076586810062LLU), QU( 3176437395144899659LLU), - QU(14028076268147963917LLU), QU(16156398271809666195LLU), - QU( 3166955484848201676LLU), QU( 5746805620136919390LLU), - QU(17297845208891256593LLU), QU(11691653183226428483LLU), - QU(17900026146506981577LLU), QU(15387382115755971042LLU), - QU(16923567681040845943LLU), QU( 8039057517199388606LLU), - QU(11748409241468629263LLU), QU( 794358245539076095LLU), - QU(13438501964693401242LLU), QU(14036803236515618962LLU), - QU( 5252311215205424721LLU), QU(17806589612915509081LLU), - QU( 6802767092397596006LLU), QU(14212120431184557140LLU), - QU( 1072951366761385712LLU), QU(13098491780722836296LLU), - QU( 9466676828710797353LLU), QU(12673056849042830081LLU), - QU(12763726623645357580LLU), QU(16468961652999309493LLU), - QU(15305979875636438926LLU), QU(17444713151223449734LLU), - QU( 5692214267627883674LLU), QU(13049589139196151505LLU), - QU( 880115207831670745LLU), QU( 1776529075789695498LLU), - QU(16695225897801466485LLU), QU(10666901778795346845LLU), - QU( 6164389346722833869LLU), QU( 2863817793264300475LLU), - QU( 9464049921886304754LLU), QU( 3993566636740015468LLU), - QU( 9983749692528514136LLU), QU(16375286075057755211LLU), - QU(16042643417005440820LLU), QU(11445419662923489877LLU), - QU( 7999038846885158836LLU), QU( 6721913661721511535LLU), - QU( 5363052654139357320LLU), QU( 1817788761173584205LLU), - QU(13290974386445856444LLU), QU( 4650350818937984680LLU), - QU( 8219183528102484836LLU), QU( 1569862923500819899LLU), - QU( 4189359732136641860LLU), QU(14202822961683148583LLU), - QU( 4457498315309429058LLU), QU(13089067387019074834LLU), - QU(11075517153328927293LLU), QU(10277016248336668389LLU), - QU( 7070509725324401122LLU), QU(17808892017780289380LLU), - QU(13143367339909287349LLU), QU( 1377743745360085151LLU), - QU( 5749341807421286485LLU), QU(14832814616770931325LLU), - QU( 7688820635324359492LLU), QU(10960474011539770045LLU), - QU( 81970066653179790LLU), QU(12619476072607878022LLU), - QU( 4419566616271201744LLU), QU(15147917311750568503LLU), - QU( 5549739182852706345LLU), QU( 7308198397975204770LLU), - QU(13580425496671289278LLU), QU(17070764785210130301LLU), - QU( 8202832846285604405LLU), QU( 6873046287640887249LLU), - QU( 6927424434308206114LLU), QU( 6139014645937224874LLU), - QU(10290373645978487639LLU), QU(15904261291701523804LLU), - QU( 9628743442057826883LLU), QU(18383429096255546714LLU), - QU( 4977413265753686967LLU), QU( 7714317492425012869LLU), - QU( 9025232586309926193LLU), QU(14627338359776709107LLU), - QU(14759849896467790763LLU), QU(10931129435864423252LLU), - QU( 4588456988775014359LLU), QU(10699388531797056724LLU), - QU( 468652268869238792LLU), QU( 5755943035328078086LLU), - QU( 2102437379988580216LLU), QU( 9986312786506674028LLU), - QU( 2654207180040945604LLU), QU( 8726634790559960062LLU), - QU( 100497234871808137LLU), QU( 2800137176951425819LLU), - QU( 6076627612918553487LLU), QU( 5780186919186152796LLU), - QU( 8179183595769929098LLU), QU( 6009426283716221169LLU), - QU( 2796662551397449358LLU), QU( 1756961367041986764LLU), - QU( 6972897917355606205LLU), QU(14524774345368968243LLU), - QU( 2773529684745706940LLU), QU( 4853632376213075959LLU), - QU( 4198177923731358102LLU), QU( 8271224913084139776LLU), - QU( 2741753121611092226LLU), QU(16782366145996731181LLU), - QU(15426125238972640790LLU), QU(13595497100671260342LLU), - QU( 3173531022836259898LLU), QU( 6573264560319511662LLU), - QU(18041111951511157441LLU), QU( 2351433581833135952LLU), - QU( 3113255578908173487LLU), QU( 1739371330877858784LLU), - QU(16046126562789165480LLU), QU( 8072101652214192925LLU), - QU(15267091584090664910LLU), QU( 9309579200403648940LLU), - QU( 5218892439752408722LLU), QU(14492477246004337115LLU), - QU(17431037586679770619LLU), QU( 7385248135963250480LLU), - QU( 9580144956565560660LLU), QU( 4919546228040008720LLU), - QU(15261542469145035584LLU), QU(18233297270822253102LLU), - QU( 5453248417992302857LLU), QU( 9309519155931460285LLU), - QU(10342813012345291756LLU), QU(15676085186784762381LLU), - QU(15912092950691300645LLU), QU( 9371053121499003195LLU), - QU( 9897186478226866746LLU), QU(14061858287188196327LLU), - QU( 122575971620788119LLU), QU(12146750969116317754LLU), - QU( 4438317272813245201LLU), QU( 8332576791009527119LLU), - QU(13907785691786542057LLU), QU(10374194887283287467LLU), - QU( 2098798755649059566LLU), QU( 3416235197748288894LLU), - QU( 8688269957320773484LLU), QU( 7503964602397371571LLU), - QU(16724977015147478236LLU), QU( 9461512855439858184LLU), - QU(13259049744534534727LLU), QU( 3583094952542899294LLU), - QU( 8764245731305528292LLU), QU(13240823595462088985LLU), - QU(13716141617617910448LLU), QU(18114969519935960955LLU), - QU( 2297553615798302206LLU), QU( 4585521442944663362LLU), - QU(17776858680630198686LLU), QU( 4685873229192163363LLU), - QU( 152558080671135627LLU), QU(15424900540842670088LLU), - QU(13229630297130024108LLU), QU(17530268788245718717LLU), - QU(16675633913065714144LLU), QU( 3158912717897568068LLU), - QU(15399132185380087288LLU), QU( 7401418744515677872LLU), - QU(13135412922344398535LLU), QU( 6385314346100509511LLU), - QU(13962867001134161139LLU), QU(10272780155442671999LLU), - QU(12894856086597769142LLU), QU(13340877795287554994LLU), - QU(12913630602094607396LLU), QU(12543167911119793857LLU), - QU(17343570372251873096LLU), QU(10959487764494150545LLU), - QU( 6966737953093821128LLU), QU(13780699135496988601LLU), - QU( 4405070719380142046LLU), QU(14923788365607284982LLU), - QU( 2869487678905148380LLU), QU( 6416272754197188403LLU), - QU(15017380475943612591LLU), QU( 1995636220918429487LLU), - QU( 3402016804620122716LLU), QU(15800188663407057080LLU), - QU(11362369990390932882LLU), QU(15262183501637986147LLU), - QU(10239175385387371494LLU), QU( 9352042420365748334LLU), - QU( 1682457034285119875LLU), QU( 1724710651376289644LLU), - QU( 2038157098893817966LLU), QU( 9897825558324608773LLU), - QU( 1477666236519164736LLU), QU(16835397314511233640LLU), - QU(10370866327005346508LLU), QU(10157504370660621982LLU), - QU(12113904045335882069LLU), QU(13326444439742783008LLU), - QU(11302769043000765804LLU), QU(13594979923955228484LLU), - QU(11779351762613475968LLU), QU( 3786101619539298383LLU), - QU( 8021122969180846063LLU), QU(15745904401162500495LLU), - QU(10762168465993897267LLU), QU(13552058957896319026LLU), - QU(11200228655252462013LLU), QU( 5035370357337441226LLU), - QU( 7593918984545500013LLU), QU( 5418554918361528700LLU), - QU( 4858270799405446371LLU), QU( 9974659566876282544LLU), - QU(18227595922273957859LLU), QU( 2772778443635656220LLU), - QU(14285143053182085385LLU), QU( 9939700992429600469LLU), - QU(12756185904545598068LLU), QU( 2020783375367345262LLU), - QU( 57026775058331227LLU), QU( 950827867930065454LLU), - QU( 6602279670145371217LLU), QU( 2291171535443566929LLU), - QU( 5832380724425010313LLU), QU( 1220343904715982285LLU), - QU(17045542598598037633LLU), QU(15460481779702820971LLU), - QU(13948388779949365130LLU), QU(13975040175430829518LLU), - QU(17477538238425541763LLU), QU(11104663041851745725LLU), - QU(15860992957141157587LLU), QU(14529434633012950138LLU), - QU( 2504838019075394203LLU), QU( 7512113882611121886LLU), - QU( 4859973559980886617LLU), QU( 1258601555703250219LLU), - QU(15594548157514316394LLU), QU( 4516730171963773048LLU), - QU(11380103193905031983LLU), QU( 6809282239982353344LLU), - QU(18045256930420065002LLU), QU( 2453702683108791859LLU), - QU( 977214582986981460LLU), QU( 2006410402232713466LLU), - QU( 6192236267216378358LLU), QU( 3429468402195675253LLU), - QU(18146933153017348921LLU), QU(17369978576367231139LLU), - QU( 1246940717230386603LLU), QU(11335758870083327110LLU), - QU(14166488801730353682LLU), QU( 9008573127269635732LLU), - QU(10776025389820643815LLU), QU(15087605441903942962LLU), - QU( 1359542462712147922LLU), QU(13898874411226454206LLU), - QU(17911176066536804411LLU), QU( 9435590428600085274LLU), - QU( 294488509967864007LLU), QU( 8890111397567922046LLU), - QU( 7987823476034328778LLU), QU(13263827582440967651LLU), - QU( 7503774813106751573LLU), QU(14974747296185646837LLU), - QU( 8504765037032103375LLU), QU(17340303357444536213LLU), - QU( 7704610912964485743LLU), QU( 8107533670327205061LLU), - QU( 9062969835083315985LLU), QU(16968963142126734184LLU), - QU(12958041214190810180LLU), QU( 2720170147759570200LLU), - QU( 2986358963942189566LLU), QU(14884226322219356580LLU), - QU( 286224325144368520LLU), QU(11313800433154279797LLU), - QU(18366849528439673248LLU), QU(17899725929482368789LLU), - QU( 3730004284609106799LLU), QU( 1654474302052767205LLU), - QU( 5006698007047077032LLU), QU( 8196893913601182838LLU), - QU(15214541774425211640LLU), QU(17391346045606626073LLU), - QU( 8369003584076969089LLU), QU( 3939046733368550293LLU), - QU(10178639720308707785LLU), QU( 2180248669304388697LLU), - QU( 62894391300126322LLU), QU( 9205708961736223191LLU), - QU( 6837431058165360438LLU), QU( 3150743890848308214LLU), - QU(17849330658111464583LLU), QU(12214815643135450865LLU), - QU(13410713840519603402LLU), QU( 3200778126692046802LLU), - QU(13354780043041779313LLU), QU( 800850022756886036LLU), - QU(15660052933953067433LLU), QU( 6572823544154375676LLU), - QU(11030281857015819266LLU), QU(12682241941471433835LLU), - QU(11654136407300274693LLU), QU( 4517795492388641109LLU), - QU( 9757017371504524244LLU), QU(17833043400781889277LLU), - QU(12685085201747792227LLU), QU(10408057728835019573LLU), - QU( 98370418513455221LLU), QU( 6732663555696848598LLU), - QU(13248530959948529780LLU), QU( 3530441401230622826LLU), - QU(18188251992895660615LLU), QU( 1847918354186383756LLU), - QU( 1127392190402660921LLU), QU(11293734643143819463LLU), - QU( 3015506344578682982LLU), QU(13852645444071153329LLU), - QU( 2121359659091349142LLU), QU( 1294604376116677694LLU), - QU( 5616576231286352318LLU), QU( 7112502442954235625LLU), - QU(11676228199551561689LLU), QU(12925182803007305359LLU), - QU( 7852375518160493082LLU), QU( 1136513130539296154LLU), - QU( 5636923900916593195LLU), QU( 3221077517612607747LLU), - QU(17784790465798152513LLU), QU( 3554210049056995938LLU), - QU(17476839685878225874LLU), QU( 3206836372585575732LLU), - QU( 2765333945644823430LLU), QU(10080070903718799528LLU), - QU( 5412370818878286353LLU), QU( 9689685887726257728LLU), - QU( 8236117509123533998LLU), QU( 1951139137165040214LLU), - QU( 4492205209227980349LLU), QU(16541291230861602967LLU), - QU( 1424371548301437940LLU), QU( 9117562079669206794LLU), - QU(14374681563251691625LLU), QU(13873164030199921303LLU), - QU( 6680317946770936731LLU), QU(15586334026918276214LLU), - QU(10896213950976109802LLU), QU( 9506261949596413689LLU), - QU( 9903949574308040616LLU), QU( 6038397344557204470LLU), - QU( 174601465422373648LLU), QU(15946141191338238030LLU), - QU(17142225620992044937LLU), QU( 7552030283784477064LLU), - QU( 2947372384532947997LLU), QU( 510797021688197711LLU), - QU( 4962499439249363461LLU), QU( 23770320158385357LLU), - QU( 959774499105138124LLU), QU( 1468396011518788276LLU), - QU( 2015698006852312308LLU), QU( 4149400718489980136LLU), - QU( 5992916099522371188LLU), QU(10819182935265531076LLU), - QU(16189787999192351131LLU), QU( 342833961790261950LLU), - QU(12470830319550495336LLU), QU(18128495041912812501LLU), - QU( 1193600899723524337LLU), QU( 9056793666590079770LLU), - QU( 2154021227041669041LLU), QU( 4963570213951235735LLU), - QU( 4865075960209211409LLU), QU( 2097724599039942963LLU), - QU( 2024080278583179845LLU), QU(11527054549196576736LLU), - QU(10650256084182390252LLU), QU( 4808408648695766755LLU), - QU( 1642839215013788844LLU), QU(10607187948250398390LLU), - QU( 7076868166085913508LLU), QU( 730522571106887032LLU), - QU(12500579240208524895LLU), QU( 4484390097311355324LLU), - QU(15145801330700623870LLU), QU( 8055827661392944028LLU), - QU( 5865092976832712268LLU), QU(15159212508053625143LLU), - QU( 3560964582876483341LLU), QU( 4070052741344438280LLU), - QU( 6032585709886855634LLU), QU(15643262320904604873LLU), - QU( 2565119772293371111LLU), QU( 318314293065348260LLU), - QU(15047458749141511872LLU), QU( 7772788389811528730LLU), - QU( 7081187494343801976LLU), QU( 6465136009467253947LLU), - QU(10425940692543362069LLU), QU( 554608190318339115LLU), - QU(14796699860302125214LLU), QU( 1638153134431111443LLU), - QU(10336967447052276248LLU), QU( 8412308070396592958LLU), - QU( 4004557277152051226LLU), QU( 8143598997278774834LLU), - QU(16413323996508783221LLU), QU(13139418758033994949LLU), - QU( 9772709138335006667LLU), QU( 2818167159287157659LLU), - QU(17091740573832523669LLU), QU(14629199013130751608LLU), - QU(18268322711500338185LLU), QU( 8290963415675493063LLU), - QU( 8830864907452542588LLU), QU( 1614839084637494849LLU), - QU(14855358500870422231LLU), QU( 3472996748392519937LLU), - QU(15317151166268877716LLU), QU( 5825895018698400362LLU), - QU(16730208429367544129LLU), QU(10481156578141202800LLU), - QU( 4746166512382823750LLU), QU(12720876014472464998LLU), - QU( 8825177124486735972LLU), QU(13733447296837467838LLU), - QU( 6412293741681359625LLU), QU( 8313213138756135033LLU), - QU(11421481194803712517LLU), QU( 7997007691544174032LLU), - QU( 6812963847917605930LLU), QU( 9683091901227558641LLU), - QU(14703594165860324713LLU), QU( 1775476144519618309LLU), - QU( 2724283288516469519LLU), QU( 717642555185856868LLU), - QU( 8736402192215092346LLU), QU(11878800336431381021LLU), - QU( 4348816066017061293LLU), QU( 6115112756583631307LLU), - QU( 9176597239667142976LLU), QU(12615622714894259204LLU), - QU(10283406711301385987LLU), QU( 5111762509485379420LLU), - QU( 3118290051198688449LLU), QU( 7345123071632232145LLU), - QU( 9176423451688682359LLU), QU( 4843865456157868971LLU), - QU(12008036363752566088LLU), QU(12058837181919397720LLU), - QU( 2145073958457347366LLU), QU( 1526504881672818067LLU), - QU( 3488830105567134848LLU), QU(13208362960674805143LLU), - QU( 4077549672899572192LLU), QU( 7770995684693818365LLU), - QU( 1398532341546313593LLU), QU(12711859908703927840LLU), - QU( 1417561172594446813LLU), QU(17045191024194170604LLU), - QU( 4101933177604931713LLU), QU(14708428834203480320LLU), - QU(17447509264469407724LLU), QU(14314821973983434255LLU), - QU(17990472271061617265LLU), QU( 5087756685841673942LLU), - QU(12797820586893859939LLU), QU( 1778128952671092879LLU), - QU( 3535918530508665898LLU), QU( 9035729701042481301LLU), - QU(14808661568277079962LLU), QU(14587345077537747914LLU), - QU(11920080002323122708LLU), QU( 6426515805197278753LLU), - QU( 3295612216725984831LLU), QU(11040722532100876120LLU), - QU(12305952936387598754LLU), QU(16097391899742004253LLU), - QU( 4908537335606182208LLU), QU(12446674552196795504LLU), - QU(16010497855816895177LLU), QU( 9194378874788615551LLU), - QU( 3382957529567613384LLU), QU( 5154647600754974077LLU), - QU( 9801822865328396141LLU), QU( 9023662173919288143LLU), - QU(17623115353825147868LLU), QU( 8238115767443015816LLU), - QU(15811444159859002560LLU), QU( 9085612528904059661LLU), - QU( 6888601089398614254LLU), QU( 258252992894160189LLU), - QU( 6704363880792428622LLU), QU( 6114966032147235763LLU), - QU(11075393882690261875LLU), QU( 8797664238933620407LLU), - QU( 5901892006476726920LLU), QU( 5309780159285518958LLU), - QU(14940808387240817367LLU), QU(14642032021449656698LLU), - QU( 9808256672068504139LLU), QU( 3670135111380607658LLU), - QU(11211211097845960152LLU), QU( 1474304506716695808LLU), - QU(15843166204506876239LLU), QU( 7661051252471780561LLU), - QU(10170905502249418476LLU), QU( 7801416045582028589LLU), - QU( 2763981484737053050LLU), QU( 9491377905499253054LLU), - QU(16201395896336915095LLU), QU( 9256513756442782198LLU), - QU( 5411283157972456034LLU), QU( 5059433122288321676LLU), - QU( 4327408006721123357LLU), QU( 9278544078834433377LLU), - QU( 7601527110882281612LLU), QU(11848295896975505251LLU), - QU(12096998801094735560LLU), QU(14773480339823506413LLU), - QU(15586227433895802149LLU), QU(12786541257830242872LLU), - QU( 6904692985140503067LLU), QU( 5309011515263103959LLU), - QU(12105257191179371066LLU), QU(14654380212442225037LLU), - QU( 2556774974190695009LLU), QU( 4461297399927600261LLU), - QU(14888225660915118646LLU), QU(14915459341148291824LLU), - QU( 2738802166252327631LLU), QU( 6047155789239131512LLU), - QU(12920545353217010338LLU), QU(10697617257007840205LLU), - QU( 2751585253158203504LLU), QU(13252729159780047496LLU), - QU(14700326134672815469LLU), QU(14082527904374600529LLU), - QU(16852962273496542070LLU), QU(17446675504235853907LLU), - QU(15019600398527572311LLU), QU(12312781346344081551LLU), - QU(14524667935039810450LLU), QU( 5634005663377195738LLU), - QU(11375574739525000569LLU), QU( 2423665396433260040LLU), - QU( 5222836914796015410LLU), QU( 4397666386492647387LLU), - QU( 4619294441691707638LLU), QU( 665088602354770716LLU), - QU(13246495665281593610LLU), QU( 6564144270549729409LLU), - QU(10223216188145661688LLU), QU( 3961556907299230585LLU), - QU(11543262515492439914LLU), QU(16118031437285993790LLU), - QU( 7143417964520166465LLU), QU(13295053515909486772LLU), - QU( 40434666004899675LLU), QU(17127804194038347164LLU), - QU( 8599165966560586269LLU), QU( 8214016749011284903LLU), - QU(13725130352140465239LLU), QU( 5467254474431726291LLU), - QU( 7748584297438219877LLU), QU(16933551114829772472LLU), - QU( 2169618439506799400LLU), QU( 2169787627665113463LLU), - QU(17314493571267943764LLU), QU(18053575102911354912LLU), - QU(11928303275378476973LLU), QU(11593850925061715550LLU), - QU(17782269923473589362LLU), QU( 3280235307704747039LLU), - QU( 6145343578598685149LLU), QU(17080117031114086090LLU), - QU(18066839902983594755LLU), QU( 6517508430331020706LLU), - QU( 8092908893950411541LLU), QU(12558378233386153732LLU), - QU( 4476532167973132976LLU), QU(16081642430367025016LLU), - QU( 4233154094369139361LLU), QU( 8693630486693161027LLU), - QU(11244959343027742285LLU), QU(12273503967768513508LLU), - QU(14108978636385284876LLU), QU( 7242414665378826984LLU), - QU( 6561316938846562432LLU), QU( 8601038474994665795LLU), - QU(17532942353612365904LLU), QU(17940076637020912186LLU), - QU( 7340260368823171304LLU), QU( 7061807613916067905LLU), - QU(10561734935039519326LLU), QU(17990796503724650862LLU), - QU( 6208732943911827159LLU), QU( 359077562804090617LLU), - QU(14177751537784403113LLU), QU(10659599444915362902LLU), - QU(15081727220615085833LLU), QU(13417573895659757486LLU), - QU(15513842342017811524LLU), QU(11814141516204288231LLU), - QU( 1827312513875101814LLU), QU( 2804611699894603103LLU), - QU(17116500469975602763LLU), QU(12270191815211952087LLU), - QU(12256358467786024988LLU), QU(18435021722453971267LLU), - QU( 671330264390865618LLU), QU( 476504300460286050LLU), - QU(16465470901027093441LLU), QU( 4047724406247136402LLU), - QU( 1322305451411883346LLU), QU( 1388308688834322280LLU), - QU( 7303989085269758176LLU), QU( 9323792664765233642LLU), - QU( 4542762575316368936LLU), QU(17342696132794337618LLU), - QU( 4588025054768498379LLU), QU(13415475057390330804LLU), - QU(17880279491733405570LLU), QU(10610553400618620353LLU), - QU( 3180842072658960139LLU), QU(13002966655454270120LLU), - QU( 1665301181064982826LLU), QU( 7083673946791258979LLU), - QU( 190522247122496820LLU), QU(17388280237250677740LLU), - QU( 8430770379923642945LLU), QU(12987180971921668584LLU), - QU( 2311086108365390642LLU), QU( 2870984383579822345LLU), - QU(14014682609164653318LLU), QU(14467187293062251484LLU), - QU( 192186361147413298LLU), QU(15171951713531796524LLU), - QU( 9900305495015948728LLU), QU(17958004775615466344LLU), - QU(14346380954498606514LLU), QU(18040047357617407096LLU), - QU( 5035237584833424532LLU), QU(15089555460613972287LLU), - QU( 4131411873749729831LLU), QU( 1329013581168250330LLU), - QU(10095353333051193949LLU), QU(10749518561022462716LLU), - QU( 9050611429810755847LLU), QU(15022028840236655649LLU), - QU( 8775554279239748298LLU), QU(13105754025489230502LLU), - QU(15471300118574167585LLU), QU( 89864764002355628LLU), - QU( 8776416323420466637LLU), QU( 5280258630612040891LLU), - QU( 2719174488591862912LLU), QU( 7599309137399661994LLU), - QU(15012887256778039979LLU), QU(14062981725630928925LLU), - QU(12038536286991689603LLU), QU( 7089756544681775245LLU), - QU(10376661532744718039LLU), QU( 1265198725901533130LLU), - QU(13807996727081142408LLU), QU( 2935019626765036403LLU), - QU( 7651672460680700141LLU), QU( 3644093016200370795LLU), - QU( 2840982578090080674LLU), QU(17956262740157449201LLU), - QU(18267979450492880548LLU), QU(11799503659796848070LLU), - QU( 9942537025669672388LLU), QU(11886606816406990297LLU), - QU( 5488594946437447576LLU), QU( 7226714353282744302LLU), - QU( 3784851653123877043LLU), QU( 878018453244803041LLU), - QU(12110022586268616085LLU), QU( 734072179404675123LLU), - QU(11869573627998248542LLU), QU( 469150421297783998LLU), - QU( 260151124912803804LLU), QU(11639179410120968649LLU), - QU( 9318165193840846253LLU), QU(12795671722734758075LLU), - QU(15318410297267253933LLU), QU( 691524703570062620LLU), - QU( 5837129010576994601LLU), QU(15045963859726941052LLU), - QU( 5850056944932238169LLU), QU(12017434144750943807LLU), - QU( 7447139064928956574LLU), QU( 3101711812658245019LLU), - QU(16052940704474982954LLU), QU(18195745945986994042LLU), - QU( 8932252132785575659LLU), QU(13390817488106794834LLU), - QU(11582771836502517453LLU), QU( 4964411326683611686LLU), - QU( 2195093981702694011LLU), QU(14145229538389675669LLU), - QU(16459605532062271798LLU), QU( 866316924816482864LLU), - QU( 4593041209937286377LLU), QU( 8415491391910972138LLU), - QU( 4171236715600528969LLU), QU(16637569303336782889LLU), - QU( 2002011073439212680LLU), QU(17695124661097601411LLU), - QU( 4627687053598611702LLU), QU( 7895831936020190403LLU), - QU( 8455951300917267802LLU), QU( 2923861649108534854LLU), - QU( 8344557563927786255LLU), QU( 6408671940373352556LLU), - QU(12210227354536675772LLU), QU(14294804157294222295LLU), - QU(10103022425071085127LLU), QU(10092959489504123771LLU), - QU( 6554774405376736268LLU), QU(12629917718410641774LLU), - QU( 6260933257596067126LLU), QU( 2460827021439369673LLU), - QU( 2541962996717103668LLU), QU( 597377203127351475LLU), - QU( 5316984203117315309LLU), QU( 4811211393563241961LLU), - QU(13119698597255811641LLU), QU( 8048691512862388981LLU), - QU(10216818971194073842LLU), QU( 4612229970165291764LLU), - QU(10000980798419974770LLU), QU( 6877640812402540687LLU), - QU( 1488727563290436992LLU), QU( 2227774069895697318LLU), - QU(11237754507523316593LLU), QU(13478948605382290972LLU), - QU( 1963583846976858124LLU), QU( 5512309205269276457LLU), - QU( 3972770164717652347LLU), QU( 3841751276198975037LLU), - QU(10283343042181903117LLU), QU( 8564001259792872199LLU), - QU(16472187244722489221LLU), QU( 8953493499268945921LLU), - QU( 3518747340357279580LLU), QU( 4003157546223963073LLU), - QU( 3270305958289814590LLU), QU( 3966704458129482496LLU), - QU( 8122141865926661939LLU), QU(14627734748099506653LLU), - QU(13064426990862560568LLU), QU( 2414079187889870829LLU), - QU( 5378461209354225306LLU), QU(10841985740128255566LLU), - QU( 538582442885401738LLU), QU( 7535089183482905946LLU), - QU(16117559957598879095LLU), QU( 8477890721414539741LLU), - QU( 1459127491209533386LLU), QU(17035126360733620462LLU), - QU( 8517668552872379126LLU), QU(10292151468337355014LLU), - QU(17081267732745344157LLU), QU(13751455337946087178LLU), - QU(14026945459523832966LLU), QU( 6653278775061723516LLU), - QU(10619085543856390441LLU), QU( 2196343631481122885LLU), - QU(10045966074702826136LLU), QU(10082317330452718282LLU), - QU( 5920859259504831242LLU), QU( 9951879073426540617LLU), - QU( 7074696649151414158LLU), QU(15808193543879464318LLU), - QU( 7385247772746953374LLU), QU( 3192003544283864292LLU), - QU(18153684490917593847LLU), QU(12423498260668568905LLU), - QU(10957758099756378169LLU), QU(11488762179911016040LLU), - QU( 2099931186465333782LLU), QU(11180979581250294432LLU), - QU( 8098916250668367933LLU), QU( 3529200436790763465LLU), - QU(12988418908674681745LLU), QU( 6147567275954808580LLU), - QU( 3207503344604030989LLU), QU(10761592604898615360LLU), - QU( 229854861031893504LLU), QU( 8809853962667144291LLU), - QU(13957364469005693860LLU), QU( 7634287665224495886LLU), - QU(12353487366976556874LLU), QU( 1134423796317152034LLU), - QU( 2088992471334107068LLU), QU( 7393372127190799698LLU), - QU( 1845367839871058391LLU), QU( 207922563987322884LLU), - QU(11960870813159944976LLU), QU(12182120053317317363LLU), - QU(17307358132571709283LLU), QU(13871081155552824936LLU), - QU(18304446751741566262LLU), QU( 7178705220184302849LLU), - QU(10929605677758824425LLU), QU(16446976977835806844LLU), - QU(13723874412159769044LLU), QU( 6942854352100915216LLU), - QU( 1726308474365729390LLU), QU( 2150078766445323155LLU), - QU(15345558947919656626LLU), QU(12145453828874527201LLU), - QU( 2054448620739726849LLU), QU( 2740102003352628137LLU), - QU(11294462163577610655LLU), QU( 756164283387413743LLU), - QU(17841144758438810880LLU), QU(10802406021185415861LLU), - QU( 8716455530476737846LLU), QU( 6321788834517649606LLU), - QU(14681322910577468426LLU), QU(17330043563884336387LLU), - QU(12701802180050071614LLU), QU(14695105111079727151LLU), - QU( 5112098511654172830LLU), QU( 4957505496794139973LLU), - QU( 8270979451952045982LLU), QU(12307685939199120969LLU), - QU(12425799408953443032LLU), QU( 8376410143634796588LLU), - QU(16621778679680060464LLU), QU( 3580497854566660073LLU), - QU( 1122515747803382416LLU), QU( 857664980960597599LLU), - QU( 6343640119895925918LLU), QU(12878473260854462891LLU), - QU(10036813920765722626LLU), QU(14451335468363173812LLU), - QU( 5476809692401102807LLU), QU(16442255173514366342LLU), - QU(13060203194757167104LLU), QU(14354124071243177715LLU), - QU(15961249405696125227LLU), QU(13703893649690872584LLU), - QU( 363907326340340064LLU), QU( 6247455540491754842LLU), - QU(12242249332757832361LLU), QU( 156065475679796717LLU), - QU( 9351116235749732355LLU), QU( 4590350628677701405LLU), - QU( 1671195940982350389LLU), QU(13501398458898451905LLU), - QU( 6526341991225002255LLU), QU( 1689782913778157592LLU), - QU( 7439222350869010334LLU), QU(13975150263226478308LLU), - QU(11411961169932682710LLU), QU(17204271834833847277LLU), - QU( 541534742544435367LLU), QU( 6591191931218949684LLU), - QU( 2645454775478232486LLU), QU( 4322857481256485321LLU), - QU( 8477416487553065110LLU), QU(12902505428548435048LLU), - QU( 971445777981341415LLU), QU(14995104682744976712LLU), - QU( 4243341648807158063LLU), QU( 8695061252721927661LLU), - QU( 5028202003270177222LLU), QU( 2289257340915567840LLU), - QU(13870416345121866007LLU), QU(13994481698072092233LLU), - QU( 6912785400753196481LLU), QU( 2278309315841980139LLU), - QU( 4329765449648304839LLU), QU( 5963108095785485298LLU), - QU( 4880024847478722478LLU), QU(16015608779890240947LLU), - QU( 1866679034261393544LLU), QU( 914821179919731519LLU), - QU( 9643404035648760131LLU), QU( 2418114953615593915LLU), - QU( 944756836073702374LLU), QU(15186388048737296834LLU), - QU( 7723355336128442206LLU), QU( 7500747479679599691LLU), - QU(18013961306453293634LLU), QU( 2315274808095756456LLU), - QU(13655308255424029566LLU), QU(17203800273561677098LLU), - QU( 1382158694422087756LLU), QU( 5090390250309588976LLU), - QU( 517170818384213989LLU), QU( 1612709252627729621LLU), - QU( 1330118955572449606LLU), QU( 300922478056709885LLU), - QU(18115693291289091987LLU), QU(13491407109725238321LLU), - QU(15293714633593827320LLU), QU( 5151539373053314504LLU), - QU( 5951523243743139207LLU), QU(14459112015249527975LLU), - QU( 5456113959000700739LLU), QU( 3877918438464873016LLU), - QU(12534071654260163555LLU), QU(15871678376893555041LLU), - QU(11005484805712025549LLU), QU(16353066973143374252LLU), - QU( 4358331472063256685LLU), QU( 8268349332210859288LLU), - QU(12485161590939658075LLU), QU(13955993592854471343LLU), - QU( 5911446886848367039LLU), QU(14925834086813706974LLU), - QU( 6590362597857994805LLU), QU( 1280544923533661875LLU), - QU( 1637756018947988164LLU), QU( 4734090064512686329LLU), - QU(16693705263131485912LLU), QU( 6834882340494360958LLU), - QU( 8120732176159658505LLU), QU( 2244371958905329346LLU), - QU(10447499707729734021LLU), QU( 7318742361446942194LLU), - QU( 8032857516355555296LLU), QU(14023605983059313116LLU), - QU( 1032336061815461376LLU), QU( 9840995337876562612LLU), - QU( 9869256223029203587LLU), QU(12227975697177267636LLU), - QU(12728115115844186033LLU), QU( 7752058479783205470LLU), - QU( 729733219713393087LLU), QU(12954017801239007622LLU) -}; -static const uint64_t init_by_array_64_expected[] = { - QU( 2100341266307895239LLU), QU( 8344256300489757943LLU), - QU(15687933285484243894LLU), QU( 8268620370277076319LLU), - QU(12371852309826545459LLU), QU( 8800491541730110238LLU), - QU(18113268950100835773LLU), QU( 2886823658884438119LLU), - QU( 3293667307248180724LLU), QU( 9307928143300172731LLU), - QU( 7688082017574293629LLU), QU( 900986224735166665LLU), - QU( 9977972710722265039LLU), QU( 6008205004994830552LLU), - QU( 546909104521689292LLU), QU( 7428471521869107594LLU), - QU(14777563419314721179LLU), QU(16116143076567350053LLU), - QU( 5322685342003142329LLU), QU( 4200427048445863473LLU), - QU( 4693092150132559146LLU), QU(13671425863759338582LLU), - QU( 6747117460737639916LLU), QU( 4732666080236551150LLU), - QU( 5912839950611941263LLU), QU( 3903717554504704909LLU), - QU( 2615667650256786818LLU), QU(10844129913887006352LLU), - QU(13786467861810997820LLU), QU(14267853002994021570LLU), - QU(13767807302847237439LLU), QU(16407963253707224617LLU), - QU( 4802498363698583497LLU), QU( 2523802839317209764LLU), - QU( 3822579397797475589LLU), QU( 8950320572212130610LLU), - QU( 3745623504978342534LLU), QU(16092609066068482806LLU), - QU( 9817016950274642398LLU), QU(10591660660323829098LLU), - QU(11751606650792815920LLU), QU( 5122873818577122211LLU), - QU(17209553764913936624LLU), QU( 6249057709284380343LLU), - QU(15088791264695071830LLU), QU(15344673071709851930LLU), - QU( 4345751415293646084LLU), QU( 2542865750703067928LLU), - QU(13520525127852368784LLU), QU(18294188662880997241LLU), - QU( 3871781938044881523LLU), QU( 2873487268122812184LLU), - QU(15099676759482679005LLU), QU(15442599127239350490LLU), - QU( 6311893274367710888LLU), QU( 3286118760484672933LLU), - QU( 4146067961333542189LLU), QU(13303942567897208770LLU), - QU( 8196013722255630418LLU), QU( 4437815439340979989LLU), - QU(15433791533450605135LLU), QU( 4254828956815687049LLU), - QU( 1310903207708286015LLU), QU(10529182764462398549LLU), - QU(14900231311660638810LLU), QU( 9727017277104609793LLU), - QU( 1821308310948199033LLU), QU(11628861435066772084LLU), - QU( 9469019138491546924LLU), QU( 3145812670532604988LLU), - QU( 9938468915045491919LLU), QU( 1562447430672662142LLU), - QU(13963995266697989134LLU), QU( 3356884357625028695LLU), - QU( 4499850304584309747LLU), QU( 8456825817023658122LLU), - QU(10859039922814285279LLU), QU( 8099512337972526555LLU), - QU( 348006375109672149LLU), QU(11919893998241688603LLU), - QU( 1104199577402948826LLU), QU(16689191854356060289LLU), - QU(10992552041730168078LLU), QU( 7243733172705465836LLU), - QU( 5668075606180319560LLU), QU(18182847037333286970LLU), - QU( 4290215357664631322LLU), QU( 4061414220791828613LLU), - QU(13006291061652989604LLU), QU( 7140491178917128798LLU), - QU(12703446217663283481LLU), QU( 5500220597564558267LLU), - QU(10330551509971296358LLU), QU(15958554768648714492LLU), - QU( 5174555954515360045LLU), QU( 1731318837687577735LLU), - QU( 3557700801048354857LLU), QU(13764012341928616198LLU), - QU(13115166194379119043LLU), QU( 7989321021560255519LLU), - QU( 2103584280905877040LLU), QU( 9230788662155228488LLU), - QU(16396629323325547654LLU), QU( 657926409811318051LLU), - QU(15046700264391400727LLU), QU( 5120132858771880830LLU), - QU( 7934160097989028561LLU), QU( 6963121488531976245LLU), - QU(17412329602621742089LLU), QU(15144843053931774092LLU), - QU(17204176651763054532LLU), QU(13166595387554065870LLU), - QU( 8590377810513960213LLU), QU( 5834365135373991938LLU), - QU( 7640913007182226243LLU), QU( 3479394703859418425LLU), - QU(16402784452644521040LLU), QU( 4993979809687083980LLU), - QU(13254522168097688865LLU), QU(15643659095244365219LLU), - QU( 5881437660538424982LLU), QU(11174892200618987379LLU), - QU( 254409966159711077LLU), QU(17158413043140549909LLU), - QU( 3638048789290376272LLU), QU( 1376816930299489190LLU), - QU( 4622462095217761923LLU), QU(15086407973010263515LLU), - QU(13253971772784692238LLU), QU( 5270549043541649236LLU), - QU(11182714186805411604LLU), QU(12283846437495577140LLU), - QU( 5297647149908953219LLU), QU(10047451738316836654LLU), - QU( 4938228100367874746LLU), QU(12328523025304077923LLU), - QU( 3601049438595312361LLU), QU( 9313624118352733770LLU), - QU(13322966086117661798LLU), QU(16660005705644029394LLU), - QU(11337677526988872373LLU), QU(13869299102574417795LLU), - QU(15642043183045645437LLU), QU( 3021755569085880019LLU), - QU( 4979741767761188161LLU), QU(13679979092079279587LLU), - QU( 3344685842861071743LLU), QU(13947960059899588104LLU), - QU( 305806934293368007LLU), QU( 5749173929201650029LLU), - QU(11123724852118844098LLU), QU(15128987688788879802LLU), - QU(15251651211024665009LLU), QU( 7689925933816577776LLU), - QU(16732804392695859449LLU), QU(17087345401014078468LLU), - QU(14315108589159048871LLU), QU( 4820700266619778917LLU), - QU(16709637539357958441LLU), QU( 4936227875177351374LLU), - QU( 2137907697912987247LLU), QU(11628565601408395420LLU), - QU( 2333250549241556786LLU), QU( 5711200379577778637LLU), - QU( 5170680131529031729LLU), QU(12620392043061335164LLU), - QU( 95363390101096078LLU), QU( 5487981914081709462LLU), - QU( 1763109823981838620LLU), QU( 3395861271473224396LLU), - QU( 1300496844282213595LLU), QU( 6894316212820232902LLU), - QU(10673859651135576674LLU), QU( 5911839658857903252LLU), - QU(17407110743387299102LLU), QU( 8257427154623140385LLU), - QU(11389003026741800267LLU), QU( 4070043211095013717LLU), - QU(11663806997145259025LLU), QU(15265598950648798210LLU), - QU( 630585789434030934LLU), QU( 3524446529213587334LLU), - QU( 7186424168495184211LLU), QU(10806585451386379021LLU), - QU(11120017753500499273LLU), QU( 1586837651387701301LLU), - QU(17530454400954415544LLU), QU( 9991670045077880430LLU), - QU( 7550997268990730180LLU), QU( 8640249196597379304LLU), - QU( 3522203892786893823LLU), QU(10401116549878854788LLU), - QU(13690285544733124852LLU), QU( 8295785675455774586LLU), - QU(15535716172155117603LLU), QU( 3112108583723722511LLU), - QU(17633179955339271113LLU), QU(18154208056063759375LLU), - QU( 1866409236285815666LLU), QU(13326075895396412882LLU), - QU( 8756261842948020025LLU), QU( 6281852999868439131LLU), - QU(15087653361275292858LLU), QU(10333923911152949397LLU), - QU( 5265567645757408500LLU), QU(12728041843210352184LLU), - QU( 6347959327507828759LLU), QU( 154112802625564758LLU), - QU(18235228308679780218LLU), QU( 3253805274673352418LLU), - QU( 4849171610689031197LLU), QU(17948529398340432518LLU), - QU(13803510475637409167LLU), QU(13506570190409883095LLU), - QU(15870801273282960805LLU), QU( 8451286481299170773LLU), - QU( 9562190620034457541LLU), QU( 8518905387449138364LLU), - QU(12681306401363385655LLU), QU( 3788073690559762558LLU), - QU( 5256820289573487769LLU), QU( 2752021372314875467LLU), - QU( 6354035166862520716LLU), QU( 4328956378309739069LLU), - QU( 449087441228269600LLU), QU( 5533508742653090868LLU), - QU( 1260389420404746988LLU), QU(18175394473289055097LLU), - QU( 1535467109660399420LLU), QU( 8818894282874061442LLU), - QU(12140873243824811213LLU), QU(15031386653823014946LLU), - QU( 1286028221456149232LLU), QU( 6329608889367858784LLU), - QU( 9419654354945132725LLU), QU( 6094576547061672379LLU), - QU(17706217251847450255LLU), QU( 1733495073065878126LLU), - QU(16918923754607552663LLU), QU( 8881949849954945044LLU), - QU(12938977706896313891LLU), QU(14043628638299793407LLU), - QU(18393874581723718233LLU), QU( 6886318534846892044LLU), - QU(14577870878038334081LLU), QU(13541558383439414119LLU), - QU(13570472158807588273LLU), QU(18300760537910283361LLU), - QU( 818368572800609205LLU), QU( 1417000585112573219LLU), - QU(12337533143867683655LLU), QU(12433180994702314480LLU), - QU( 778190005829189083LLU), QU(13667356216206524711LLU), - QU( 9866149895295225230LLU), QU(11043240490417111999LLU), - QU( 1123933826541378598LLU), QU( 6469631933605123610LLU), - QU(14508554074431980040LLU), QU(13918931242962026714LLU), - QU( 2870785929342348285LLU), QU(14786362626740736974LLU), - QU(13176680060902695786LLU), QU( 9591778613541679456LLU), - QU( 9097662885117436706LLU), QU( 749262234240924947LLU), - QU( 1944844067793307093LLU), QU( 4339214904577487742LLU), - QU( 8009584152961946551LLU), QU(16073159501225501777LLU), - QU( 3335870590499306217LLU), QU(17088312653151202847LLU), - QU( 3108893142681931848LLU), QU(16636841767202792021LLU), - QU(10423316431118400637LLU), QU( 8008357368674443506LLU), - QU(11340015231914677875LLU), QU(17687896501594936090LLU), - QU(15173627921763199958LLU), QU( 542569482243721959LLU), - QU(15071714982769812975LLU), QU( 4466624872151386956LLU), - QU( 1901780715602332461LLU), QU( 9822227742154351098LLU), - QU( 1479332892928648780LLU), QU( 6981611948382474400LLU), - QU( 7620824924456077376LLU), QU(14095973329429406782LLU), - QU( 7902744005696185404LLU), QU(15830577219375036920LLU), - QU(10287076667317764416LLU), QU(12334872764071724025LLU), - QU( 4419302088133544331LLU), QU(14455842851266090520LLU), - QU(12488077416504654222LLU), QU( 7953892017701886766LLU), - QU( 6331484925529519007LLU), QU( 4902145853785030022LLU), - QU(17010159216096443073LLU), QU(11945354668653886087LLU), - QU(15112022728645230829LLU), QU(17363484484522986742LLU), - QU( 4423497825896692887LLU), QU( 8155489510809067471LLU), - QU( 258966605622576285LLU), QU( 5462958075742020534LLU), - QU( 6763710214913276228LLU), QU( 2368935183451109054LLU), - QU(14209506165246453811LLU), QU( 2646257040978514881LLU), - QU( 3776001911922207672LLU), QU( 1419304601390147631LLU), - QU(14987366598022458284LLU), QU( 3977770701065815721LLU), - QU( 730820417451838898LLU), QU( 3982991703612885327LLU), - QU( 2803544519671388477LLU), QU(17067667221114424649LLU), - QU( 2922555119737867166LLU), QU( 1989477584121460932LLU), - QU(15020387605892337354LLU), QU( 9293277796427533547LLU), - QU(10722181424063557247LLU), QU(16704542332047511651LLU), - QU( 5008286236142089514LLU), QU(16174732308747382540LLU), - QU(17597019485798338402LLU), QU(13081745199110622093LLU), - QU( 8850305883842258115LLU), QU(12723629125624589005LLU), - QU( 8140566453402805978LLU), QU(15356684607680935061LLU), - QU(14222190387342648650LLU), QU(11134610460665975178LLU), - QU( 1259799058620984266LLU), QU(13281656268025610041LLU), - QU( 298262561068153992LLU), QU(12277871700239212922LLU), - QU(13911297774719779438LLU), QU(16556727962761474934LLU), - QU(17903010316654728010LLU), QU( 9682617699648434744LLU), - QU(14757681836838592850LLU), QU( 1327242446558524473LLU), - QU(11126645098780572792LLU), QU( 1883602329313221774LLU), - QU( 2543897783922776873LLU), QU(15029168513767772842LLU), - QU(12710270651039129878LLU), QU(16118202956069604504LLU), - QU(15010759372168680524LLU), QU( 2296827082251923948LLU), - QU(10793729742623518101LLU), QU(13829764151845413046LLU), - QU(17769301223184451213LLU), QU( 3118268169210783372LLU), - QU(17626204544105123127LLU), QU( 7416718488974352644LLU), - QU(10450751996212925994LLU), QU( 9352529519128770586LLU), - QU( 259347569641110140LLU), QU( 8048588892269692697LLU), - QU( 1774414152306494058LLU), QU(10669548347214355622LLU), - QU(13061992253816795081LLU), QU(18432677803063861659LLU), - QU( 8879191055593984333LLU), QU(12433753195199268041LLU), - QU(14919392415439730602LLU), QU( 6612848378595332963LLU), - QU( 6320986812036143628LLU), QU(10465592420226092859LLU), - QU( 4196009278962570808LLU), QU( 3747816564473572224LLU), - QU(17941203486133732898LLU), QU( 2350310037040505198LLU), - QU( 5811779859134370113LLU), QU(10492109599506195126LLU), - QU( 7699650690179541274LLU), QU( 1954338494306022961LLU), - QU(14095816969027231152LLU), QU( 5841346919964852061LLU), - QU(14945969510148214735LLU), QU( 3680200305887550992LLU), - QU( 6218047466131695792LLU), QU( 8242165745175775096LLU), - QU(11021371934053307357LLU), QU( 1265099502753169797LLU), - QU( 4644347436111321718LLU), QU( 3609296916782832859LLU), - QU( 8109807992218521571LLU), QU(18387884215648662020LLU), - QU(14656324896296392902LLU), QU(17386819091238216751LLU), - QU(17788300878582317152LLU), QU( 7919446259742399591LLU), - QU( 4466613134576358004LLU), QU(12928181023667938509LLU), - QU(13147446154454932030LLU), QU(16552129038252734620LLU), - QU( 8395299403738822450LLU), QU(11313817655275361164LLU), - QU( 434258809499511718LLU), QU( 2074882104954788676LLU), - QU( 7929892178759395518LLU), QU( 9006461629105745388LLU), - QU( 5176475650000323086LLU), QU(11128357033468341069LLU), - QU(12026158851559118955LLU), QU(14699716249471156500LLU), - QU( 448982497120206757LLU), QU( 4156475356685519900LLU), - QU( 6063816103417215727LLU), QU(10073289387954971479LLU), - QU( 8174466846138590962LLU), QU( 2675777452363449006LLU), - QU( 9090685420572474281LLU), QU( 6659652652765562060LLU), - QU(12923120304018106621LLU), QU(11117480560334526775LLU), - QU( 937910473424587511LLU), QU( 1838692113502346645LLU), - QU(11133914074648726180LLU), QU( 7922600945143884053LLU), - QU(13435287702700959550LLU), QU( 5287964921251123332LLU), - QU(11354875374575318947LLU), QU(17955724760748238133LLU), - QU(13728617396297106512LLU), QU( 4107449660118101255LLU), - QU( 1210269794886589623LLU), QU(11408687205733456282LLU), - QU( 4538354710392677887LLU), QU(13566803319341319267LLU), - QU(17870798107734050771LLU), QU( 3354318982568089135LLU), - QU( 9034450839405133651LLU), QU(13087431795753424314LLU), - QU( 950333102820688239LLU), QU( 1968360654535604116LLU), - QU(16840551645563314995LLU), QU( 8867501803892924995LLU), - QU(11395388644490626845LLU), QU( 1529815836300732204LLU), - QU(13330848522996608842LLU), QU( 1813432878817504265LLU), - QU( 2336867432693429560LLU), QU(15192805445973385902LLU), - QU( 2528593071076407877LLU), QU( 128459777936689248LLU), - QU( 9976345382867214866LLU), QU( 6208885766767996043LLU), - QU(14982349522273141706LLU), QU( 3099654362410737822LLU), - QU(13776700761947297661LLU), QU( 8806185470684925550LLU), - QU( 8151717890410585321LLU), QU( 640860591588072925LLU), - QU(14592096303937307465LLU), QU( 9056472419613564846LLU), - QU(14861544647742266352LLU), QU(12703771500398470216LLU), - QU( 3142372800384138465LLU), QU( 6201105606917248196LLU), - QU(18337516409359270184LLU), QU(15042268695665115339LLU), - QU(15188246541383283846LLU), QU(12800028693090114519LLU), - QU( 5992859621101493472LLU), QU(18278043971816803521LLU), - QU( 9002773075219424560LLU), QU( 7325707116943598353LLU), - QU( 7930571931248040822LLU), QU( 5645275869617023448LLU), - QU( 7266107455295958487LLU), QU( 4363664528273524411LLU), - QU(14313875763787479809LLU), QU(17059695613553486802LLU), - QU( 9247761425889940932LLU), QU(13704726459237593128LLU), - QU( 2701312427328909832LLU), QU(17235532008287243115LLU), - QU(14093147761491729538LLU), QU( 6247352273768386516LLU), - QU( 8268710048153268415LLU), QU( 7985295214477182083LLU), - QU(15624495190888896807LLU), QU( 3772753430045262788LLU), - QU( 9133991620474991698LLU), QU( 5665791943316256028LLU), - QU( 7551996832462193473LLU), QU(13163729206798953877LLU), - QU( 9263532074153846374LLU), QU( 1015460703698618353LLU), - QU(17929874696989519390LLU), QU(18257884721466153847LLU), - QU(16271867543011222991LLU), QU( 3905971519021791941LLU), - QU(16814488397137052085LLU), QU( 1321197685504621613LLU), - QU( 2870359191894002181LLU), QU(14317282970323395450LLU), - QU(13663920845511074366LLU), QU( 2052463995796539594LLU), - QU(14126345686431444337LLU), QU( 1727572121947022534LLU), - QU(17793552254485594241LLU), QU( 6738857418849205750LLU), - QU( 1282987123157442952LLU), QU(16655480021581159251LLU), - QU( 6784587032080183866LLU), QU(14726758805359965162LLU), - QU( 7577995933961987349LLU), QU(12539609320311114036LLU), - QU(10789773033385439494LLU), QU( 8517001497411158227LLU), - QU(10075543932136339710LLU), QU(14838152340938811081LLU), - QU( 9560840631794044194LLU), QU(17445736541454117475LLU), - QU(10633026464336393186LLU), QU(15705729708242246293LLU), - QU( 1117517596891411098LLU), QU( 4305657943415886942LLU), - QU( 4948856840533979263LLU), QU(16071681989041789593LLU), - QU(13723031429272486527LLU), QU( 7639567622306509462LLU), - QU(12670424537483090390LLU), QU( 9715223453097197134LLU), - QU( 5457173389992686394LLU), QU( 289857129276135145LLU), - QU(17048610270521972512LLU), QU( 692768013309835485LLU), - QU(14823232360546632057LLU), QU(18218002361317895936LLU), - QU( 3281724260212650204LLU), QU(16453957266549513795LLU), - QU( 8592711109774511881LLU), QU( 929825123473369579LLU), - QU(15966784769764367791LLU), QU( 9627344291450607588LLU), - QU(10849555504977813287LLU), QU( 9234566913936339275LLU), - QU( 6413807690366911210LLU), QU(10862389016184219267LLU), - QU(13842504799335374048LLU), QU( 1531994113376881174LLU), - QU( 2081314867544364459LLU), QU(16430628791616959932LLU), - QU( 8314714038654394368LLU), QU( 9155473892098431813LLU), - QU(12577843786670475704LLU), QU( 4399161106452401017LLU), - QU( 1668083091682623186LLU), QU( 1741383777203714216LLU), - QU( 2162597285417794374LLU), QU(15841980159165218736LLU), - QU( 1971354603551467079LLU), QU( 1206714764913205968LLU), - QU( 4790860439591272330LLU), QU(14699375615594055799LLU), - QU( 8374423871657449988LLU), QU(10950685736472937738LLU), - QU( 697344331343267176LLU), QU(10084998763118059810LLU), - QU(12897369539795983124LLU), QU(12351260292144383605LLU), - QU( 1268810970176811234LLU), QU( 7406287800414582768LLU), - QU( 516169557043807831LLU), QU( 5077568278710520380LLU), - QU( 3828791738309039304LLU), QU( 7721974069946943610LLU), - QU( 3534670260981096460LLU), QU( 4865792189600584891LLU), - QU(16892578493734337298LLU), QU( 9161499464278042590LLU), - QU(11976149624067055931LLU), QU(13219479887277343990LLU), - QU(14161556738111500680LLU), QU(14670715255011223056LLU), - QU( 4671205678403576558LLU), QU(12633022931454259781LLU), - QU(14821376219869187646LLU), QU( 751181776484317028LLU), - QU( 2192211308839047070LLU), QU(11787306362361245189LLU), - QU(10672375120744095707LLU), QU( 4601972328345244467LLU), - QU(15457217788831125879LLU), QU( 8464345256775460809LLU), - QU(10191938789487159478LLU), QU( 6184348739615197613LLU), - QU(11425436778806882100LLU), QU( 2739227089124319793LLU), - QU( 461464518456000551LLU), QU( 4689850170029177442LLU), - QU( 6120307814374078625LLU), QU(11153579230681708671LLU), - QU( 7891721473905347926LLU), QU(10281646937824872400LLU), - QU( 3026099648191332248LLU), QU( 8666750296953273818LLU), - QU(14978499698844363232LLU), QU(13303395102890132065LLU), - QU( 8182358205292864080LLU), QU(10560547713972971291LLU), - QU(11981635489418959093LLU), QU( 3134621354935288409LLU), - QU(11580681977404383968LLU), QU(14205530317404088650LLU), - QU( 5997789011854923157LLU), QU(13659151593432238041LLU), - QU(11664332114338865086LLU), QU( 7490351383220929386LLU), - QU( 7189290499881530378LLU), QU(15039262734271020220LLU), - QU( 2057217285976980055LLU), QU( 555570804905355739LLU), - QU(11235311968348555110LLU), QU(13824557146269603217LLU), - QU(16906788840653099693LLU), QU( 7222878245455661677LLU), - QU( 5245139444332423756LLU), QU( 4723748462805674292LLU), - QU(12216509815698568612LLU), QU(17402362976648951187LLU), - QU(17389614836810366768LLU), QU( 4880936484146667711LLU), - QU( 9085007839292639880LLU), QU(13837353458498535449LLU), - QU(11914419854360366677LLU), QU(16595890135313864103LLU), - QU( 6313969847197627222LLU), QU(18296909792163910431LLU), - QU(10041780113382084042LLU), QU( 2499478551172884794LLU), - QU(11057894246241189489LLU), QU( 9742243032389068555LLU), - QU(12838934582673196228LLU), QU(13437023235248490367LLU), - QU(13372420669446163240LLU), QU( 6752564244716909224LLU), - QU( 7157333073400313737LLU), QU(12230281516370654308LLU), - QU( 1182884552219419117LLU), QU( 2955125381312499218LLU), - QU(10308827097079443249LLU), QU( 1337648572986534958LLU), - QU(16378788590020343939LLU), QU( 108619126514420935LLU), - QU( 3990981009621629188LLU), QU( 5460953070230946410LLU), - QU( 9703328329366531883LLU), QU(13166631489188077236LLU), - QU( 1104768831213675170LLU), QU( 3447930458553877908LLU), - QU( 8067172487769945676LLU), QU( 5445802098190775347LLU), - QU( 3244840981648973873LLU), QU(17314668322981950060LLU), - QU( 5006812527827763807LLU), QU(18158695070225526260LLU), - QU( 2824536478852417853LLU), QU(13974775809127519886LLU), - QU( 9814362769074067392LLU), QU(17276205156374862128LLU), - QU(11361680725379306967LLU), QU( 3422581970382012542LLU), - QU(11003189603753241266LLU), QU(11194292945277862261LLU), - QU( 6839623313908521348LLU), QU(11935326462707324634LLU), - QU( 1611456788685878444LLU), QU(13112620989475558907LLU), - QU( 517659108904450427LLU), QU(13558114318574407624LLU), - QU(15699089742731633077LLU), QU( 4988979278862685458LLU), - QU( 8111373583056521297LLU), QU( 3891258746615399627LLU), - QU( 8137298251469718086LLU), QU(12748663295624701649LLU), - QU( 4389835683495292062LLU), QU( 5775217872128831729LLU), - QU( 9462091896405534927LLU), QU( 8498124108820263989LLU), - QU( 8059131278842839525LLU), QU(10503167994254090892LLU), - QU(11613153541070396656LLU), QU(18069248738504647790LLU), - QU( 570657419109768508LLU), QU( 3950574167771159665LLU), - QU( 5514655599604313077LLU), QU( 2908460854428484165LLU), - QU(10777722615935663114LLU), QU(12007363304839279486LLU), - QU( 9800646187569484767LLU), QU( 8795423564889864287LLU), - QU(14257396680131028419LLU), QU( 6405465117315096498LLU), - QU( 7939411072208774878LLU), QU(17577572378528990006LLU), - QU(14785873806715994850LLU), QU(16770572680854747390LLU), - QU(18127549474419396481LLU), QU(11637013449455757750LLU), - QU(14371851933996761086LLU), QU( 3601181063650110280LLU), - QU( 4126442845019316144LLU), QU(10198287239244320669LLU), - QU(18000169628555379659LLU), QU(18392482400739978269LLU), - QU( 6219919037686919957LLU), QU( 3610085377719446052LLU), - QU( 2513925039981776336LLU), QU(16679413537926716955LLU), - QU(12903302131714909434LLU), QU( 5581145789762985009LLU), - QU(12325955044293303233LLU), QU(17216111180742141204LLU), - QU( 6321919595276545740LLU), QU( 3507521147216174501LLU), - QU( 9659194593319481840LLU), QU(11473976005975358326LLU), - QU(14742730101435987026LLU), QU( 492845897709954780LLU), - QU(16976371186162599676LLU), QU(17712703422837648655LLU), - QU( 9881254778587061697LLU), QU( 8413223156302299551LLU), - QU( 1563841828254089168LLU), QU( 9996032758786671975LLU), - QU( 138877700583772667LLU), QU(13003043368574995989LLU), - QU( 4390573668650456587LLU), QU( 8610287390568126755LLU), - QU(15126904974266642199LLU), QU( 6703637238986057662LLU), - QU( 2873075592956810157LLU), QU( 6035080933946049418LLU), - QU(13382846581202353014LLU), QU( 7303971031814642463LLU), - QU(18418024405307444267LLU), QU( 5847096731675404647LLU), - QU( 4035880699639842500LLU), QU(11525348625112218478LLU), - QU( 3041162365459574102LLU), QU( 2604734487727986558LLU), - QU(15526341771636983145LLU), QU(14556052310697370254LLU), - QU(12997787077930808155LLU), QU( 9601806501755554499LLU), - QU(11349677952521423389LLU), QU(14956777807644899350LLU), - QU(16559736957742852721LLU), QU(12360828274778140726LLU), - QU( 6685373272009662513LLU), QU(16932258748055324130LLU), - QU(15918051131954158508LLU), QU( 1692312913140790144LLU), - QU( 546653826801637367LLU), QU( 5341587076045986652LLU), - QU(14975057236342585662LLU), QU(12374976357340622412LLU), - QU(10328833995181940552LLU), QU(12831807101710443149LLU), - QU(10548514914382545716LLU), QU( 2217806727199715993LLU), - QU(12627067369242845138LLU), QU( 4598965364035438158LLU), - QU( 150923352751318171LLU), QU(14274109544442257283LLU), - QU( 4696661475093863031LLU), QU( 1505764114384654516LLU), - QU(10699185831891495147LLU), QU( 2392353847713620519LLU), - QU( 3652870166711788383LLU), QU( 8640653276221911108LLU), - QU( 3894077592275889704LLU), QU( 4918592872135964845LLU), - QU(16379121273281400789LLU), QU(12058465483591683656LLU), - QU(11250106829302924945LLU), QU( 1147537556296983005LLU), - QU( 6376342756004613268LLU), QU(14967128191709280506LLU), - QU(18007449949790627628LLU), QU( 9497178279316537841LLU), - QU( 7920174844809394893LLU), QU(10037752595255719907LLU), - QU(15875342784985217697LLU), QU(15311615921712850696LLU), - QU( 9552902652110992950LLU), QU(14054979450099721140LLU), - QU( 5998709773566417349LLU), QU(18027910339276320187LLU), - QU( 8223099053868585554LLU), QU( 7842270354824999767LLU), - QU( 4896315688770080292LLU), QU(12969320296569787895LLU), - QU( 2674321489185759961LLU), QU( 4053615936864718439LLU), - QU(11349775270588617578LLU), QU( 4743019256284553975LLU), - QU( 5602100217469723769LLU), QU(14398995691411527813LLU), - QU( 7412170493796825470LLU), QU( 836262406131744846LLU), - QU( 8231086633845153022LLU), QU( 5161377920438552287LLU), - QU( 8828731196169924949LLU), QU(16211142246465502680LLU), - QU( 3307990879253687818LLU), QU( 5193405406899782022LLU), - QU( 8510842117467566693LLU), QU( 6070955181022405365LLU), - QU(14482950231361409799LLU), QU(12585159371331138077LLU), - QU( 3511537678933588148LLU), QU( 2041849474531116417LLU), - QU(10944936685095345792LLU), QU(18303116923079107729LLU), - QU( 2720566371239725320LLU), QU( 4958672473562397622LLU), - QU( 3032326668253243412LLU), QU(13689418691726908338LLU), - QU( 1895205511728843996LLU), QU( 8146303515271990527LLU), - QU(16507343500056113480LLU), QU( 473996939105902919LLU), - QU( 9897686885246881481LLU), QU(14606433762712790575LLU), - QU( 6732796251605566368LLU), QU( 1399778120855368916LLU), - QU( 935023885182833777LLU), QU(16066282816186753477LLU), - QU( 7291270991820612055LLU), QU(17530230393129853844LLU), - QU(10223493623477451366LLU), QU(15841725630495676683LLU), - QU(17379567246435515824LLU), QU( 8588251429375561971LLU), - QU(18339511210887206423LLU), QU(17349587430725976100LLU), - QU(12244876521394838088LLU), QU( 6382187714147161259LLU), - QU(12335807181848950831LLU), QU(16948885622305460665LLU), - QU(13755097796371520506LLU), QU(14806740373324947801LLU), - QU( 4828699633859287703LLU), QU( 8209879281452301604LLU), - QU(12435716669553736437LLU), QU(13970976859588452131LLU), - QU( 6233960842566773148LLU), QU(12507096267900505759LLU), - QU( 1198713114381279421LLU), QU(14989862731124149015LLU), - QU(15932189508707978949LLU), QU( 2526406641432708722LLU), - QU( 29187427817271982LLU), QU( 1499802773054556353LLU), - QU(10816638187021897173LLU), QU( 5436139270839738132LLU), - QU( 6659882287036010082LLU), QU( 2154048955317173697LLU), - QU(10887317019333757642LLU), QU(16281091802634424955LLU), - QU(10754549879915384901LLU), QU(10760611745769249815LLU), - QU( 2161505946972504002LLU), QU( 5243132808986265107LLU), - QU(10129852179873415416LLU), QU( 710339480008649081LLU), - QU( 7802129453068808528LLU), QU(17967213567178907213LLU), - QU(15730859124668605599LLU), QU(13058356168962376502LLU), - QU( 3701224985413645909LLU), QU(14464065869149109264LLU), - QU( 9959272418844311646LLU), QU(10157426099515958752LLU), - QU(14013736814538268528LLU), QU(17797456992065653951LLU), - QU(17418878140257344806LLU), QU(15457429073540561521LLU), - QU( 2184426881360949378LLU), QU( 2062193041154712416LLU), - QU( 8553463347406931661LLU), QU( 4913057625202871854LLU), - QU( 2668943682126618425LLU), QU(17064444737891172288LLU), - QU( 4997115903913298637LLU), QU(12019402608892327416LLU), - QU(17603584559765897352LLU), QU(11367529582073647975LLU), - QU( 8211476043518436050LLU), QU( 8676849804070323674LLU), - QU(18431829230394475730LLU), QU(10490177861361247904LLU), - QU( 9508720602025651349LLU), QU( 7409627448555722700LLU), - QU( 5804047018862729008LLU), QU(11943858176893142594LLU), - QU(11908095418933847092LLU), QU( 5415449345715887652LLU), - QU( 1554022699166156407LLU), QU( 9073322106406017161LLU), - QU( 7080630967969047082LLU), QU(18049736940860732943LLU), - QU(12748714242594196794LLU), QU( 1226992415735156741LLU), - QU(17900981019609531193LLU), QU(11720739744008710999LLU), - QU( 3006400683394775434LLU), QU(11347974011751996028LLU), - QU( 3316999628257954608LLU), QU( 8384484563557639101LLU), - QU(18117794685961729767LLU), QU( 1900145025596618194LLU), - QU(17459527840632892676LLU), QU( 5634784101865710994LLU), - QU( 7918619300292897158LLU), QU( 3146577625026301350LLU), - QU( 9955212856499068767LLU), QU( 1873995843681746975LLU), - QU( 1561487759967972194LLU), QU( 8322718804375878474LLU), - QU(11300284215327028366LLU), QU( 4667391032508998982LLU), - QU( 9820104494306625580LLU), QU(17922397968599970610LLU), - QU( 1784690461886786712LLU), QU(14940365084341346821LLU), - QU( 5348719575594186181LLU), QU(10720419084507855261LLU), - QU(14210394354145143274LLU), QU( 2426468692164000131LLU), - QU(16271062114607059202LLU), QU(14851904092357070247LLU), - QU( 6524493015693121897LLU), QU( 9825473835127138531LLU), - QU(14222500616268569578LLU), QU(15521484052007487468LLU), - QU(14462579404124614699LLU), QU(11012375590820665520LLU), - QU(11625327350536084927LLU), QU(14452017765243785417LLU), - QU( 9989342263518766305LLU), QU( 3640105471101803790LLU), - QU( 4749866455897513242LLU), QU(13963064946736312044LLU), - QU(10007416591973223791LLU), QU(18314132234717431115LLU), - QU( 3286596588617483450LLU), QU( 7726163455370818765LLU), - QU( 7575454721115379328LLU), QU( 5308331576437663422LLU), - QU(18288821894903530934LLU), QU( 8028405805410554106LLU), - QU(15744019832103296628LLU), QU( 149765559630932100LLU), - QU( 6137705557200071977LLU), QU(14513416315434803615LLU), - QU(11665702820128984473LLU), QU( 218926670505601386LLU), - QU( 6868675028717769519LLU), QU(15282016569441512302LLU), - QU( 5707000497782960236LLU), QU( 6671120586555079567LLU), - QU( 2194098052618985448LLU), QU(16849577895477330978LLU), - QU(12957148471017466283LLU), QU( 1997805535404859393LLU), - QU( 1180721060263860490LLU), QU(13206391310193756958LLU), - QU(12980208674461861797LLU), QU( 3825967775058875366LLU), - QU(17543433670782042631LLU), QU( 1518339070120322730LLU), - QU(16344584340890991669LLU), QU( 2611327165318529819LLU), - QU(11265022723283422529LLU), QU( 4001552800373196817LLU), - QU(14509595890079346161LLU), QU( 3528717165416234562LLU), - QU(18153222571501914072LLU), QU( 9387182977209744425LLU), - QU(10064342315985580021LLU), QU(11373678413215253977LLU), - QU( 2308457853228798099LLU), QU( 9729042942839545302LLU), - QU( 7833785471140127746LLU), QU( 6351049900319844436LLU), - QU(14454610627133496067LLU), QU(12533175683634819111LLU), - QU(15570163926716513029LLU), QU(13356980519185762498LLU) -}; - -TEST_BEGIN(test_gen_rand_32) -{ - uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); - uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); - int i; - uint32_t r32; - sfmt_t *ctx; - - assert_d_le(get_min_array_size32(), BLOCK_SIZE, - "Array size too small"); - ctx = init_gen_rand(1234); - fill_array32(ctx, array32, BLOCK_SIZE); - fill_array32(ctx, array32_2, BLOCK_SIZE); - fini_gen_rand(ctx); - - ctx = init_gen_rand(1234); - for (i = 0; i < BLOCK_SIZE; i++) { - if (i < COUNT_1) { - assert_u32_eq(array32[i], init_gen_rand_32_expected[i], - "Output mismatch for i=%d", i); - } - r32 = gen_rand32(ctx); - assert_u32_eq(r32, array32[i], - "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); - } - for (i = 0; i < COUNT_2; i++) { - r32 = gen_rand32(ctx); - assert_u32_eq(r32, array32_2[i], - "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], - r32); - } - fini_gen_rand(ctx); -} -TEST_END - -TEST_BEGIN(test_by_array_32) -{ - uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); - uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); - int i; - uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0}; - uint32_t r32; - sfmt_t *ctx; - - assert_d_le(get_min_array_size32(), BLOCK_SIZE, - "Array size too small"); - ctx = init_by_array(ini, 4); - fill_array32(ctx, array32, BLOCK_SIZE); - fill_array32(ctx, array32_2, BLOCK_SIZE); - fini_gen_rand(ctx); - - ctx = init_by_array(ini, 4); - for (i = 0; i < BLOCK_SIZE; i++) { - if (i < COUNT_1) { - assert_u32_eq(array32[i], init_by_array_32_expected[i], - "Output mismatch for i=%d", i); - } - r32 = gen_rand32(ctx); - assert_u32_eq(r32, array32[i], - "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); - } - for (i = 0; i < COUNT_2; i++) { - r32 = gen_rand32(ctx); - assert_u32_eq(r32, array32_2[i], - "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], - r32); - } - fini_gen_rand(ctx); -} -TEST_END - -TEST_BEGIN(test_gen_rand_64) -{ - uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); - uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); - int i; - uint64_t r; - sfmt_t *ctx; - - assert_d_le(get_min_array_size64(), BLOCK_SIZE64, - "Array size too small"); - ctx = init_gen_rand(4321); - fill_array64(ctx, array64, BLOCK_SIZE64); - fill_array64(ctx, array64_2, BLOCK_SIZE64); - fini_gen_rand(ctx); - - ctx = init_gen_rand(4321); - for (i = 0; i < BLOCK_SIZE64; i++) { - if (i < COUNT_1) { - assert_u64_eq(array64[i], init_gen_rand_64_expected[i], - "Output mismatch for i=%d", i); - } - r = gen_rand64(ctx); - assert_u64_eq(r, array64[i], - "Mismatch at array64[%d]=%"PRIx64", gen=%"PRIx64, i, - array64[i], r); - } - for (i = 0; i < COUNT_2; i++) { - r = gen_rand64(ctx); - assert_u64_eq(r, array64_2[i], - "Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64"", i, - array64_2[i], r); - } - fini_gen_rand(ctx); -} -TEST_END - -TEST_BEGIN(test_by_array_64) -{ - uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); - uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); - int i; - uint64_t r; - uint32_t ini[] = {5, 4, 3, 2, 1}; - sfmt_t *ctx; - - assert_d_le(get_min_array_size64(), BLOCK_SIZE64, - "Array size too small"); - ctx = init_by_array(ini, 5); - fill_array64(ctx, array64, BLOCK_SIZE64); - fill_array64(ctx, array64_2, BLOCK_SIZE64); - fini_gen_rand(ctx); - - ctx = init_by_array(ini, 5); - for (i = 0; i < BLOCK_SIZE64; i++) { - if (i < COUNT_1) { - assert_u64_eq(array64[i], init_by_array_64_expected[i], - "Output mismatch for i=%d", i); - } - r = gen_rand64(ctx); - assert_u64_eq(r, array64[i], - "Mismatch at array64[%d]=%"PRIx64" gen=%"PRIx64, i, - array64[i], r); - } - for (i = 0; i < COUNT_2; i++) { - r = gen_rand64(ctx); - assert_u64_eq(r, array64_2[i], - "Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64, i, - array64_2[i], r); - } - fini_gen_rand(ctx); -} -TEST_END - -int -main(void) -{ - - return (test( - test_gen_rand_32, - test_by_array_32, - test_gen_rand_64, - test_by_array_64)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/bitmap.c b/lib/jemalloc-3.6.0/test/unit/bitmap.c deleted file mode 100644 index 8086b88..0000000 --- a/lib/jemalloc-3.6.0/test/unit/bitmap.c +++ /dev/null @@ -1,165 +0,0 @@ -#include "test/jemalloc_test.h" - -#if (LG_BITMAP_MAXBITS > 12) -# define MAXBITS 4500 -#else -# define MAXBITS (1U << LG_BITMAP_MAXBITS) -#endif - -TEST_BEGIN(test_bitmap_size) -{ - size_t i, prev_size; - - prev_size = 0; - for (i = 1; i <= MAXBITS; i++) { - size_t size = bitmap_size(i); - assert_true(size >= prev_size, - "Bitmap size is smaller than expected"); - prev_size = size; - } -} -TEST_END - -TEST_BEGIN(test_bitmap_init) -{ - size_t i; - - for (i = 1; i <= MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) { - assert_false(bitmap_get(bitmap, &binfo, j), - "Bit should be unset"); - } - free(bitmap); - } - } -} -TEST_END - -TEST_BEGIN(test_bitmap_set) -{ - size_t i; - - for (i = 1; i <= MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - free(bitmap); - } - } -} -TEST_END - -TEST_BEGIN(test_bitmap_unset) -{ - size_t i; - - for (i = 1; i <= MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - for (j = 0; j < i; j++) - bitmap_unset(bitmap, &binfo, j); - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - free(bitmap); - } - } -} -TEST_END - -TEST_BEGIN(test_bitmap_sfu) -{ - size_t i; - - for (i = 1; i <= MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - ssize_t j; - bitmap_t *bitmap = malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - /* Iteratively set bits starting at the beginning. */ - for (j = 0; j < i; j++) { - assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, - "First unset bit should be just after " - "previous first unset bit"); - } - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - - /* - * Iteratively unset bits starting at the end, and - * verify that bitmap_sfu() reaches the unset bits. - */ - for (j = i - 1; j >= 0; j--) { - bitmap_unset(bitmap, &binfo, j); - assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, - "First unset bit should the bit previously " - "unset"); - bitmap_unset(bitmap, &binfo, j); - } - assert_false(bitmap_get(bitmap, &binfo, 0), - "Bit should be unset"); - - /* - * Iteratively set bits starting at the beginning, and - * verify that bitmap_sfu() looks past them. - */ - for (j = 1; j < i; j++) { - bitmap_set(bitmap, &binfo, j - 1); - assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, - "First unset bit should be just after the " - "bit previously set"); - bitmap_unset(bitmap, &binfo, j); - } - assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1, - "First unset bit should be the last bit"); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - free(bitmap); - } - } -} -TEST_END - -int -main(void) -{ - - return (test( - test_bitmap_size, - test_bitmap_init, - test_bitmap_set, - test_bitmap_unset, - test_bitmap_sfu)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/ckh.c b/lib/jemalloc-3.6.0/test/unit/ckh.c deleted file mode 100644 index b214c27..0000000 --- a/lib/jemalloc-3.6.0/test/unit/ckh.c +++ /dev/null @@ -1,206 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_new_delete) -{ - ckh_t ckh; - - assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp), - "Unexpected ckh_new() error"); - ckh_delete(&ckh); - - assert_false(ckh_new(&ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp), - "Unexpected ckh_new() error"); - ckh_delete(&ckh); -} -TEST_END - -TEST_BEGIN(test_count_insert_search_remove) -{ - ckh_t ckh; - const char *strs[] = { - "a string", - "A string", - "a string.", - "A string." - }; - const char *missing = "A string not in the hash table."; - size_t i; - - assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp), - "Unexpected ckh_new() error"); - assert_zu_eq(ckh_count(&ckh), 0, - "ckh_count() should return %zu, but it returned %zu", ZU(0), - ckh_count(&ckh)); - - /* Insert. */ - for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { - ckh_insert(&ckh, strs[i], strs[i]); - assert_zu_eq(ckh_count(&ckh), i+1, - "ckh_count() should return %zu, but it returned %zu", i+1, - ckh_count(&ckh)); - } - - /* Search. */ - for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { - union { - void *p; - const char *s; - } k, v; - void **kp, **vp; - const char *ks, *vs; - - kp = (i & 1) ? &k.p : NULL; - vp = (i & 2) ? &v.p : NULL; - k.p = NULL; - v.p = NULL; - assert_false(ckh_search(&ckh, strs[i], kp, vp), - "Unexpected ckh_search() error"); - - ks = (i & 1) ? strs[i] : (const char *)NULL; - vs = (i & 2) ? strs[i] : (const char *)NULL; - assert_ptr_eq((void *)ks, (void *)k.s, - "Key mismatch, i=%zu", i); - assert_ptr_eq((void *)vs, (void *)v.s, - "Value mismatch, i=%zu", i); - } - assert_true(ckh_search(&ckh, missing, NULL, NULL), - "Unexpected ckh_search() success"); - - /* Remove. */ - for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { - union { - void *p; - const char *s; - } k, v; - void **kp, **vp; - const char *ks, *vs; - - kp = (i & 1) ? &k.p : NULL; - vp = (i & 2) ? &v.p : NULL; - k.p = NULL; - v.p = NULL; - assert_false(ckh_remove(&ckh, strs[i], kp, vp), - "Unexpected ckh_remove() error"); - - ks = (i & 1) ? strs[i] : (const char *)NULL; - vs = (i & 2) ? strs[i] : (const char *)NULL; - assert_ptr_eq((void *)ks, (void *)k.s, - "Key mismatch, i=%zu", i); - assert_ptr_eq((void *)vs, (void *)v.s, - "Value mismatch, i=%zu", i); - assert_zu_eq(ckh_count(&ckh), - sizeof(strs)/sizeof(const char *) - i - 1, - "ckh_count() should return %zu, but it returned %zu", - sizeof(strs)/sizeof(const char *) - i - 1, - ckh_count(&ckh)); - } - - ckh_delete(&ckh); -} -TEST_END - -TEST_BEGIN(test_insert_iter_remove) -{ -#define NITEMS ZU(1000) - ckh_t ckh; - void **p[NITEMS]; - void *q, *r; - size_t i; - - assert_false(ckh_new(&ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp), - "Unexpected ckh_new() error"); - - for (i = 0; i < NITEMS; i++) { - p[i] = mallocx(i+1, 0); - assert_ptr_not_null(p[i], "Unexpected mallocx() failure"); - } - - for (i = 0; i < NITEMS; i++) { - size_t j; - - for (j = i; j < NITEMS; j++) { - assert_false(ckh_insert(&ckh, p[j], p[j]), - "Unexpected ckh_insert() failure"); - assert_false(ckh_search(&ckh, p[j], &q, &r), - "Unexpected ckh_search() failure"); - assert_ptr_eq(p[j], q, "Key pointer mismatch"); - assert_ptr_eq(p[j], r, "Value pointer mismatch"); - } - - assert_zu_eq(ckh_count(&ckh), NITEMS, - "ckh_count() should return %zu, but it returned %zu", - NITEMS, ckh_count(&ckh)); - - for (j = i + 1; j < NITEMS; j++) { - assert_false(ckh_search(&ckh, p[j], NULL, NULL), - "Unexpected ckh_search() failure"); - assert_false(ckh_remove(&ckh, p[j], &q, &r), - "Unexpected ckh_remove() failure"); - assert_ptr_eq(p[j], q, "Key pointer mismatch"); - assert_ptr_eq(p[j], r, "Value pointer mismatch"); - assert_true(ckh_search(&ckh, p[j], NULL, NULL), - "Unexpected ckh_search() success"); - assert_true(ckh_remove(&ckh, p[j], &q, &r), - "Unexpected ckh_remove() success"); - } - - { - bool seen[NITEMS]; - size_t tabind; - - memset(seen, 0, sizeof(seen)); - - for (tabind = 0; ckh_iter(&ckh, &tabind, &q, &r) == - false;) { - size_t k; - - assert_ptr_eq(q, r, "Key and val not equal"); - - for (k = 0; k < NITEMS; k++) { - if (p[k] == q) { - assert_false(seen[k], - "Item %zu already seen", k); - seen[k] = true; - break; - } - } - } - - for (j = 0; j < i + 1; j++) - assert_true(seen[j], "Item %zu not seen", j); - for (; j < NITEMS; j++) - assert_false(seen[j], "Item %zu seen", j); - } - } - - for (i = 0; i < NITEMS; i++) { - assert_false(ckh_search(&ckh, p[i], NULL, NULL), - "Unexpected ckh_search() failure"); - assert_false(ckh_remove(&ckh, p[i], &q, &r), - "Unexpected ckh_remove() failure"); - assert_ptr_eq(p[i], q, "Key pointer mismatch"); - assert_ptr_eq(p[i], r, "Value pointer mismatch"); - assert_true(ckh_search(&ckh, p[i], NULL, NULL), - "Unexpected ckh_search() success"); - assert_true(ckh_remove(&ckh, p[i], &q, &r), - "Unexpected ckh_remove() success"); - dallocx(p[i], 0); - } - - assert_zu_eq(ckh_count(&ckh), 0, - "ckh_count() should return %zu, but it returned %zu", ZU(0), - ckh_count(&ckh)); - ckh_delete(&ckh); -#undef NITEMS -} -TEST_END - -int -main(void) -{ - - return (test( - test_new_delete, - test_count_insert_search_remove, - test_insert_iter_remove)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/hash.c b/lib/jemalloc-3.6.0/test/unit/hash.c deleted file mode 100644 index abb394a..0000000 --- a/lib/jemalloc-3.6.0/test/unit/hash.c +++ /dev/null @@ -1,171 +0,0 @@ -/* - * This file is based on code that is part of SMHasher - * (https://code.google.com/p/smhasher/), and is subject to the MIT license - * (http://www.opensource.org/licenses/mit-license.php). Both email addresses - * associated with the source code's revision history belong to Austin Appleby, - * and the revision history ranges from 2010 to 2012. Therefore the copyright - * and license are here taken to be: - * - * Copyright (c) 2010-2012 Austin Appleby - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "test/jemalloc_test.h" - -typedef enum { - hash_variant_x86_32, - hash_variant_x86_128, - hash_variant_x64_128 -} hash_variant_t; - -static size_t -hash_variant_bits(hash_variant_t variant) -{ - - switch (variant) { - case hash_variant_x86_32: return (32); - case hash_variant_x86_128: return (128); - case hash_variant_x64_128: return (128); - default: not_reached(); - } -} - -static const char * -hash_variant_string(hash_variant_t variant) -{ - - switch (variant) { - case hash_variant_x86_32: return ("hash_x86_32"); - case hash_variant_x86_128: return ("hash_x86_128"); - case hash_variant_x64_128: return ("hash_x64_128"); - default: not_reached(); - } -} - -static void -hash_variant_verify(hash_variant_t variant) -{ - const size_t hashbytes = hash_variant_bits(variant) / 8; - uint8_t key[256]; - uint8_t hashes[hashbytes * 256]; - uint8_t final[hashbytes]; - unsigned i; - uint32_t computed, expected; - - memset(key, 0, sizeof(key)); - memset(hashes, 0, sizeof(hashes)); - memset(final, 0, sizeof(final)); - - /* - * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the - * seed. - */ - for (i = 0; i < 256; i++) { - key[i] = (uint8_t)i; - switch (variant) { - case hash_variant_x86_32: { - uint32_t out; - out = hash_x86_32(key, i, 256-i); - memcpy(&hashes[i*hashbytes], &out, hashbytes); - break; - } case hash_variant_x86_128: { - uint64_t out[2]; - hash_x86_128(key, i, 256-i, out); - memcpy(&hashes[i*hashbytes], out, hashbytes); - break; - } case hash_variant_x64_128: { - uint64_t out[2]; - hash_x64_128(key, i, 256-i, out); - memcpy(&hashes[i*hashbytes], out, hashbytes); - break; - } default: not_reached(); - } - } - - /* Hash the result array. */ - switch (variant) { - case hash_variant_x86_32: { - uint32_t out = hash_x86_32(hashes, hashbytes*256, 0); - memcpy(final, &out, sizeof(out)); - break; - } case hash_variant_x86_128: { - uint64_t out[2]; - hash_x86_128(hashes, hashbytes*256, 0, out); - memcpy(final, out, sizeof(out)); - break; - } case hash_variant_x64_128: { - uint64_t out[2]; - hash_x64_128(hashes, hashbytes*256, 0, out); - memcpy(final, out, sizeof(out)); - break; - } default: not_reached(); - } - - computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) | - (final[3] << 24); - - switch (variant) { -#ifdef JEMALLOC_BIG_ENDIAN - case hash_variant_x86_32: expected = 0x6213303eU; break; - case hash_variant_x86_128: expected = 0x266820caU; break; - case hash_variant_x64_128: expected = 0xcc622b6fU; break; -#else - case hash_variant_x86_32: expected = 0xb0f57ee3U; break; - case hash_variant_x86_128: expected = 0xb3ece62aU; break; - case hash_variant_x64_128: expected = 0x6384ba69U; break; -#endif - default: not_reached(); - } - - assert_u32_eq(computed, expected, - "Hash mismatch for %s(): expected %#x but got %#x", - hash_variant_string(variant), expected, computed); -} - -TEST_BEGIN(test_hash_x86_32) -{ - - hash_variant_verify(hash_variant_x86_32); -} -TEST_END - -TEST_BEGIN(test_hash_x86_128) -{ - - hash_variant_verify(hash_variant_x86_128); -} -TEST_END - -TEST_BEGIN(test_hash_x64_128) -{ - - hash_variant_verify(hash_variant_x64_128); -} -TEST_END - -int -main(void) -{ - - return (test( - test_hash_x86_32, - test_hash_x86_128, - test_hash_x64_128)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/junk.c b/lib/jemalloc-3.6.0/test/unit/junk.c deleted file mode 100644 index 85bbf9e..0000000 --- a/lib/jemalloc-3.6.0/test/unit/junk.c +++ /dev/null @@ -1,222 +0,0 @@ -#include "test/jemalloc_test.h" - -#ifdef JEMALLOC_FILL -const char *malloc_conf = - "abort:false,junk:true,zero:false,redzone:true,quarantine:0"; -#endif - -static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; -static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig; -static huge_dalloc_junk_t *huge_dalloc_junk_orig; -static void *most_recently_junked; - -static void -arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) -{ - size_t i; - - arena_dalloc_junk_small_orig(ptr, bin_info); - for (i = 0; i < bin_info->reg_size; i++) { - assert_c_eq(((char *)ptr)[i], 0x5a, - "Missing junk fill for byte %zu/%zu of deallocated region", - i, bin_info->reg_size); - } - most_recently_junked = ptr; -} - -static void -arena_dalloc_junk_large_intercept(void *ptr, size_t usize) -{ - size_t i; - - arena_dalloc_junk_large_orig(ptr, usize); - for (i = 0; i < usize; i++) { - assert_c_eq(((char *)ptr)[i], 0x5a, - "Missing junk fill for byte %zu/%zu of deallocated region", - i, usize); - } - most_recently_junked = ptr; -} - -static void -huge_dalloc_junk_intercept(void *ptr, size_t usize) -{ - - huge_dalloc_junk_orig(ptr, usize); - /* - * The conditions under which junk filling actually occurs are nuanced - * enough that it doesn't make sense to duplicate the decision logic in - * test code, so don't actually check that the region is junk-filled. - */ - most_recently_junked = ptr; -} - -static void -test_junk(size_t sz_min, size_t sz_max) -{ - char *s; - size_t sz_prev, sz, i; - - arena_dalloc_junk_small_orig = arena_dalloc_junk_small; - arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; - arena_dalloc_junk_large_orig = arena_dalloc_junk_large; - arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; - huge_dalloc_junk_orig = huge_dalloc_junk; - huge_dalloc_junk = huge_dalloc_junk_intercept; - - sz_prev = 0; - s = (char *)mallocx(sz_min, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - - for (sz = sallocx(s, 0); sz <= sz_max; - sz_prev = sz, sz = sallocx(s, 0)) { - if (sz_prev > 0) { - assert_c_eq(s[0], 'a', - "Previously allocated byte %zu/%zu is corrupted", - ZU(0), sz_prev); - assert_c_eq(s[sz_prev-1], 'a', - "Previously allocated byte %zu/%zu is corrupted", - sz_prev-1, sz_prev); - } - - for (i = sz_prev; i < sz; i++) { - assert_c_eq(s[i], 0xa5, - "Newly allocated byte %zu/%zu isn't junk-filled", - i, sz); - s[i] = 'a'; - } - - if (xallocx(s, sz+1, 0, 0) == sz) { - void *junked = (void *)s; - - s = (char *)rallocx(s, sz+1, 0); - assert_ptr_not_null((void *)s, - "Unexpected rallocx() failure"); - if (!config_mremap || sz+1 <= arena_maxclass) { - assert_ptr_eq(most_recently_junked, junked, - "Expected region of size %zu to be " - "junk-filled", - sz); - } - } - } - - dallocx(s, 0); - assert_ptr_eq(most_recently_junked, (void *)s, - "Expected region of size %zu to be junk-filled", sz); - - arena_dalloc_junk_small = arena_dalloc_junk_small_orig; - arena_dalloc_junk_large = arena_dalloc_junk_large_orig; - huge_dalloc_junk = huge_dalloc_junk_orig; -} - -TEST_BEGIN(test_junk_small) -{ - - test_skip_if(!config_fill); - test_junk(1, SMALL_MAXCLASS-1); -} -TEST_END - -TEST_BEGIN(test_junk_large) -{ - - test_skip_if(!config_fill); - test_junk(SMALL_MAXCLASS+1, arena_maxclass); -} -TEST_END - -TEST_BEGIN(test_junk_huge) -{ - - test_skip_if(!config_fill); - test_junk(arena_maxclass+1, chunksize*2); -} -TEST_END - -arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig; -static void *most_recently_trimmed; - -static void -arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize) -{ - - arena_ralloc_junk_large_orig(ptr, old_usize, usize); - assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize"); - assert_zu_eq(usize, arena_maxclass-PAGE, "Unexpected usize"); - most_recently_trimmed = ptr; -} - -TEST_BEGIN(test_junk_large_ralloc_shrink) -{ - void *p1, *p2; - - p1 = mallocx(arena_maxclass, 0); - assert_ptr_not_null(p1, "Unexpected mallocx() failure"); - - arena_ralloc_junk_large_orig = arena_ralloc_junk_large; - arena_ralloc_junk_large = arena_ralloc_junk_large_intercept; - - p2 = rallocx(p1, arena_maxclass-PAGE, 0); - assert_ptr_eq(p1, p2, "Unexpected move during shrink"); - - arena_ralloc_junk_large = arena_ralloc_junk_large_orig; - - assert_ptr_eq(most_recently_trimmed, p1, - "Expected trimmed portion of region to be junk-filled"); -} -TEST_END - -static bool detected_redzone_corruption; - -static void -arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - detected_redzone_corruption = true; -} - -TEST_BEGIN(test_junk_redzone) -{ - char *s; - arena_redzone_corruption_t *arena_redzone_corruption_orig; - - test_skip_if(!config_fill); - - arena_redzone_corruption_orig = arena_redzone_corruption; - arena_redzone_corruption = arena_redzone_corruption_replacement; - - /* Test underflow. */ - detected_redzone_corruption = false; - s = (char *)mallocx(1, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - s[-1] = 0xbb; - dallocx(s, 0); - assert_true(detected_redzone_corruption, - "Did not detect redzone corruption"); - - /* Test overflow. */ - detected_redzone_corruption = false; - s = (char *)mallocx(1, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - s[sallocx(s, 0)] = 0xbb; - dallocx(s, 0); - assert_true(detected_redzone_corruption, - "Did not detect redzone corruption"); - - arena_redzone_corruption = arena_redzone_corruption_orig; -} -TEST_END - -int -main(void) -{ - - return (test( - test_junk_small, - test_junk_large, - test_junk_huge, - test_junk_large_ralloc_shrink, - test_junk_redzone)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/mallctl.c b/lib/jemalloc-3.6.0/test/unit/mallctl.c deleted file mode 100644 index 31fb810..0000000 --- a/lib/jemalloc-3.6.0/test/unit/mallctl.c +++ /dev/null @@ -1,415 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_mallctl_errors) -{ - uint64_t epoch; - size_t sz; - - assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, - "mallctl() should return ENOENT for non-existent names"); - - assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), - EPERM, "mallctl() should return EPERM on attempt to write " - "read-only value"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1), - EINVAL, "mallctl() should return EINVAL for input size mismatch"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1), - EINVAL, "mallctl() should return EINVAL for input size mismatch"); - - sz = sizeof(epoch)-1; - assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, - "mallctl() should return EINVAL for output size mismatch"); - sz = sizeof(epoch)+1; - assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, - "mallctl() should return EINVAL for output size mismatch"); -} -TEST_END - -TEST_BEGIN(test_mallctlnametomib_errors) -{ - size_t mib[1]; - size_t miblen; - - miblen = sizeof(mib)/sizeof(size_t); - assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, - "mallctlnametomib() should return ENOENT for non-existent names"); -} -TEST_END - -TEST_BEGIN(test_mallctlbymib_errors) -{ - uint64_t epoch; - size_t sz; - size_t mib[1]; - size_t miblen; - - miblen = sizeof(mib)/sizeof(size_t); - assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, - "Unexpected mallctlnametomib() failure"); - - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", - strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " - "attempt to write read-only value"); - - miblen = sizeof(mib)/sizeof(size_t); - assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, - "Unexpected mallctlnametomib() failure"); - - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, - sizeof(epoch)-1), EINVAL, - "mallctlbymib() should return EINVAL for input size mismatch"); - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, - sizeof(epoch)+1), EINVAL, - "mallctlbymib() should return EINVAL for input size mismatch"); - - sz = sizeof(epoch)-1; - assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, - "mallctlbymib() should return EINVAL for output size mismatch"); - sz = sizeof(epoch)+1; - assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, - "mallctlbymib() should return EINVAL for output size mismatch"); -} -TEST_END - -TEST_BEGIN(test_mallctl_read_write) -{ - uint64_t old_epoch, new_epoch; - size_t sz = sizeof(old_epoch); - - /* Blind. */ - assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); - - /* Read. */ - assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); - - /* Write. */ - assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)), - 0, "Unexpected mallctl() failure"); - assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); - - /* Read+write. */ - assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch, - sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); - assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); -} -TEST_END - -TEST_BEGIN(test_mallctlnametomib_short_mib) -{ - size_t mib[4]; - size_t miblen; - - miblen = 3; - mib[3] = 42; - assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, - "Unexpected mallctlnametomib() failure"); - assert_zu_eq(miblen, 3, "Unexpected mib output length"); - assert_zu_eq(mib[3], 42, - "mallctlnametomib() wrote past the end of the input mib"); -} -TEST_END - -TEST_BEGIN(test_mallctl_config) -{ - -#define TEST_MALLCTL_CONFIG(config) do { \ - bool oldval; \ - size_t sz = sizeof(oldval); \ - assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \ - 0, "Unexpected mallctl() failure"); \ - assert_b_eq(oldval, config_##config, "Incorrect config value"); \ - assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ -} while (0) - - TEST_MALLCTL_CONFIG(debug); - TEST_MALLCTL_CONFIG(dss); - TEST_MALLCTL_CONFIG(fill); - TEST_MALLCTL_CONFIG(lazy_lock); - TEST_MALLCTL_CONFIG(mremap); - TEST_MALLCTL_CONFIG(munmap); - TEST_MALLCTL_CONFIG(prof); - TEST_MALLCTL_CONFIG(prof_libgcc); - TEST_MALLCTL_CONFIG(prof_libunwind); - TEST_MALLCTL_CONFIG(stats); - TEST_MALLCTL_CONFIG(tcache); - TEST_MALLCTL_CONFIG(tls); - TEST_MALLCTL_CONFIG(utrace); - TEST_MALLCTL_CONFIG(valgrind); - TEST_MALLCTL_CONFIG(xmalloc); - -#undef TEST_MALLCTL_CONFIG -} -TEST_END - -TEST_BEGIN(test_mallctl_opt) -{ - bool config_always = true; - -#define TEST_MALLCTL_OPT(t, opt, config) do { \ - t oldval; \ - size_t sz = sizeof(oldval); \ - int expected = config_##config ? 0 : ENOENT; \ - int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \ - assert_d_eq(result, expected, \ - "Unexpected mallctl() result for opt."#opt); \ - assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ -} while (0) - - TEST_MALLCTL_OPT(bool, abort, always); - TEST_MALLCTL_OPT(size_t, lg_chunk, always); - TEST_MALLCTL_OPT(const char *, dss, always); - TEST_MALLCTL_OPT(size_t, narenas, always); - TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always); - TEST_MALLCTL_OPT(bool, stats_print, always); - TEST_MALLCTL_OPT(bool, junk, fill); - TEST_MALLCTL_OPT(size_t, quarantine, fill); - TEST_MALLCTL_OPT(bool, redzone, fill); - TEST_MALLCTL_OPT(bool, zero, fill); - TEST_MALLCTL_OPT(bool, utrace, utrace); - TEST_MALLCTL_OPT(bool, valgrind, valgrind); - TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); - TEST_MALLCTL_OPT(bool, tcache, tcache); - TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache); - TEST_MALLCTL_OPT(bool, prof, prof); - TEST_MALLCTL_OPT(const char *, prof_prefix, prof); - TEST_MALLCTL_OPT(bool, prof_active, prof); - TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); - TEST_MALLCTL_OPT(bool, prof_accum, prof); - TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); - TEST_MALLCTL_OPT(bool, prof_gdump, prof); - TEST_MALLCTL_OPT(bool, prof_final, prof); - TEST_MALLCTL_OPT(bool, prof_leak, prof); - -#undef TEST_MALLCTL_OPT -} -TEST_END - -TEST_BEGIN(test_manpage_example) -{ - unsigned nbins, i; - size_t mib[4]; - size_t len, miblen; - - len = sizeof(nbins); - assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, - "Unexpected mallctl() failure"); - - miblen = 4; - assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, - "Unexpected mallctlnametomib() failure"); - for (i = 0; i < nbins; i++) { - size_t bin_size; - - mib[2] = i; - len = sizeof(bin_size); - assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0), - 0, "Unexpected mallctlbymib() failure"); - /* Do something with bin_size... */ - } -} -TEST_END - -TEST_BEGIN(test_thread_arena) -{ - unsigned arena_old, arena_new, narenas; - size_t sz = sizeof(unsigned); - - assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); - arena_new = narenas - 1; - assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, - sizeof(unsigned)), 0, "Unexpected mallctl() failure"); - arena_new = 0; - assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, - sizeof(unsigned)), 0, "Unexpected mallctl() failure"); -} -TEST_END - -TEST_BEGIN(test_arena_i_purge) -{ - unsigned narenas; - size_t sz = sizeof(unsigned); - size_t mib[3]; - size_t miblen = 3; - - assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, - "Unexpected mallctl() failure"); - - assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, - "Unexpected mallctlnametomib() failure"); - mib[1] = narenas; - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, - "Unexpected mallctlbymib() failure"); -} -TEST_END - -TEST_BEGIN(test_arena_i_dss) -{ - const char *dss_prec_old, *dss_prec_new; - size_t sz = sizeof(dss_prec_old); - - dss_prec_new = "primary"; - assert_d_eq(mallctl("arena.0.dss", &dss_prec_old, &sz, &dss_prec_new, - sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); - assert_str_ne(dss_prec_old, "primary", - "Unexpected default for dss precedence"); - - assert_d_eq(mallctl("arena.0.dss", &dss_prec_new, &sz, &dss_prec_old, - sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); -} -TEST_END - -TEST_BEGIN(test_arenas_purge) -{ - unsigned arena = 0; - - assert_d_eq(mallctl("arenas.purge", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - assert_d_eq(mallctl("arenas.purge", NULL, NULL, NULL, 0), 0, - "Unexpected mallctl() failure"); -} -TEST_END - -TEST_BEGIN(test_arenas_initialized) -{ - unsigned narenas; - size_t sz = sizeof(narenas); - - assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - { - bool initialized[narenas]; - - sz = narenas * sizeof(bool); - assert_d_eq(mallctl("arenas.initialized", initialized, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); - } -} -TEST_END - -TEST_BEGIN(test_arenas_constants) -{ - -#define TEST_ARENAS_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \ - "Unexpected mallctl() failure"); \ - assert_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) - - TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); - TEST_ARENAS_CONSTANT(size_t, page, PAGE); - TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); - TEST_ARENAS_CONSTANT(size_t, nlruns, nlclasses); - -#undef TEST_ARENAS_CONSTANT -} -TEST_END - -TEST_BEGIN(test_arenas_bin_constants) -{ - -#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \ - 0, "Unexpected mallctl() failure"); \ - assert_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) - - TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size); - TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs); - TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size); - -#undef TEST_ARENAS_BIN_CONSTANT -} -TEST_END - -TEST_BEGIN(test_arenas_lrun_constants) -{ - -#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \ - 0), 0, "Unexpected mallctl() failure"); \ - assert_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) - - TEST_ARENAS_LRUN_CONSTANT(size_t, size, (1 << LG_PAGE)); - -#undef TEST_ARENAS_LRUN_CONSTANT -} -TEST_END - -TEST_BEGIN(test_arenas_extend) -{ - unsigned narenas_before, arena, narenas_after; - size_t sz = sizeof(unsigned); - - assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - - assert_u_eq(narenas_before+1, narenas_after, - "Unexpected number of arenas before versus after extension"); - assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); -} -TEST_END - -TEST_BEGIN(test_stats_arenas) -{ - -#define TEST_STATS_ARENAS(t, name) do { \ - t name; \ - size_t sz = sizeof(t); \ - assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \ - 0), 0, "Unexpected mallctl() failure"); \ -} while (0) - - TEST_STATS_ARENAS(const char *, dss); - TEST_STATS_ARENAS(unsigned, nthreads); - TEST_STATS_ARENAS(size_t, pactive); - TEST_STATS_ARENAS(size_t, pdirty); - -#undef TEST_STATS_ARENAS -} -TEST_END - -int -main(void) -{ - - return (test( - test_mallctl_errors, - test_mallctlnametomib_errors, - test_mallctlbymib_errors, - test_mallctl_read_write, - test_mallctlnametomib_short_mib, - test_mallctl_config, - test_mallctl_opt, - test_manpage_example, - test_thread_arena, - test_arena_i_purge, - test_arena_i_dss, - test_arenas_purge, - test_arenas_initialized, - test_arenas_constants, - test_arenas_bin_constants, - test_arenas_lrun_constants, - test_arenas_extend, - test_stats_arenas)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/math.c b/lib/jemalloc-3.6.0/test/unit/math.c deleted file mode 100644 index a1b288e..0000000 --- a/lib/jemalloc-3.6.0/test/unit/math.c +++ /dev/null @@ -1,388 +0,0 @@ -#include "test/jemalloc_test.h" - -#define MAX_REL_ERR 1.0e-9 -#define MAX_ABS_ERR 1.0e-9 - -static bool -double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) -{ - double rel_err; - - if (fabs(a - b) < max_abs_err) - return (true); - rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); - return (rel_err < max_rel_err); -} - -static uint64_t -factorial(unsigned x) -{ - uint64_t ret = 1; - unsigned i; - - for (i = 2; i <= x; i++) - ret *= (uint64_t)i; - - return (ret); -} - -TEST_BEGIN(test_ln_gamma_factorial) -{ - unsigned x; - - /* exp(ln_gamma(x)) == (x-1)! for integer x. */ - for (x = 1; x <= 21; x++) { - assert_true(double_eq_rel(exp(ln_gamma(x)), - (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR), - "Incorrect factorial result for x=%u", x); - } -} -TEST_END - -/* Expected ln_gamma([0.0..100.0] increment=0.25). */ -static const double ln_gamma_misc_expected[] = { - INFINITY, - 1.28802252469807743, 0.57236494292470008, 0.20328095143129538, - 0.00000000000000000, -0.09827183642181320, -0.12078223763524518, - -0.08440112102048555, 0.00000000000000000, 0.12487171489239651, - 0.28468287047291918, 0.47521466691493719, 0.69314718055994529, - 0.93580193110872523, 1.20097360234707429, 1.48681557859341718, - 1.79175946922805496, 2.11445692745037128, 2.45373657084244234, - 2.80857141857573644, 3.17805383034794575, 3.56137591038669710, - 3.95781396761871651, 4.36671603662228680, 4.78749174278204581, - 5.21960398699022932, 5.66256205985714178, 6.11591589143154568, - 6.57925121201010121, 7.05218545073853953, 7.53436423675873268, - 8.02545839631598312, 8.52516136106541467, 9.03318691960512332, - 9.54926725730099690, 10.07315123968123949, 10.60460290274525086, - 11.14340011995171231, 11.68933342079726856, 12.24220494005076176, - 12.80182748008146909, 13.36802367147604720, 13.94062521940376342, - 14.51947222506051816, 15.10441257307551943, 15.69530137706046524, - 16.29200047656724237, 16.89437797963419285, 17.50230784587389010, - 18.11566950571089407, 18.73434751193644843, 19.35823122022435427, - 19.98721449566188468, 20.62119544270163018, 21.26007615624470048, - 21.90376249182879320, 22.55216385312342098, 23.20519299513386002, - 23.86276584168908954, 24.52480131594137802, 25.19122118273868338, - 25.86194990184851861, 26.53691449111561340, 27.21604439872720604, - 27.89927138384089389, 28.58652940490193828, 29.27775451504081516, - 29.97288476399884871, 30.67186010608067548, 31.37462231367769050, - 32.08111489594735843, 32.79128302226991565, 33.50507345013689076, - 34.22243445715505317, 34.94331577687681545, 35.66766853819134298, - 36.39544520803305261, 37.12659953718355865, 37.86108650896109395, - 38.59886229060776230, 39.33988418719949465, 40.08411059791735198, - 40.83150097453079752, 41.58201578195490100, 42.33561646075348506, - 43.09226539146988699, 43.85192586067515208, 44.61456202863158893, - 45.38013889847690052, 46.14862228684032885, 46.91997879580877395, - 47.69417578616628361, 48.47118135183522014, 49.25096429545256882, - 50.03349410501914463, 50.81874093156324790, 51.60667556776436982, - 52.39726942748592364, 53.19049452616926743, 53.98632346204390586, - 54.78472939811231157, 55.58568604486942633, 56.38916764371992940, - 57.19514895105859864, 58.00360522298051080, 58.81451220059079787, - 59.62784609588432261, 60.44358357816834371, 61.26170176100199427, - 62.08217818962842927, 62.90499082887649962, 63.73011805151035958, - 64.55753862700632340, 65.38723171073768015, 66.21917683354901385, - 67.05335389170279825, 67.88974313718154008, 68.72832516833013017, - 69.56908092082363737, 70.41199165894616385, 71.25703896716800045, - 72.10420474200799390, 72.95347118416940191, 73.80482079093779646, - 74.65823634883015814, 75.51370092648485866, 76.37119786778275454, - 77.23071078519033961, 78.09222355331530707, 78.95572030266725960, - 79.82118541361435859, 80.68860351052903468, 81.55795945611502873, - 82.42923834590904164, 83.30242550295004378, 84.17750647261028973, - 85.05446701758152983, 85.93329311301090456, 86.81397094178107920, - 87.69648688992882057, 88.58082754219766741, 89.46697967771913795, - 90.35493026581838194, 91.24466646193963015, 92.13617560368709292, - 93.02944520697742803, 93.92446296229978486, 94.82121673107967297, - 95.71969454214321615, 96.61988458827809723, 97.52177522288820910, - 98.42535495673848800, 99.33061245478741341, 100.23753653310367895, - 101.14611615586458981, 102.05634043243354370, 102.96819861451382394, - 103.88168009337621811, 104.79677439715833032, 105.71347118823287303, - 106.63176026064346047, 107.55163153760463501, 108.47307506906540198, - 109.39608102933323153, 110.32063971475740516, 111.24674154146920557, - 112.17437704317786995, 113.10353686902013237, 114.03421178146170689, - 114.96639265424990128, 115.90007047041454769, 116.83523632031698014, - 117.77188139974506953, 118.70999700805310795, 119.64957454634490830, - 120.59060551569974962, 121.53308151543865279, 122.47699424143097247, - 123.42233548443955726, 124.36909712850338394, 125.31727114935689826, - 126.26684961288492559, 127.21782467361175861, 128.17018857322420899, - 129.12393363912724453, 130.07905228303084755, 131.03553699956862033, - 131.99338036494577864, 132.95257503561629164, 133.91311374698926784, - 134.87498931216194364, 135.83819462068046846, 136.80272263732638294, - 137.76856640092901785, 138.73571902320256299, 139.70417368760718091, - 140.67392364823425055, 141.64496222871400732, 142.61728282114600574, - 143.59087888505104047, 144.56574394634486680, 145.54187159633210058, - 146.51925549072063859, 147.49788934865566148, 148.47776695177302031, - 149.45888214327129617, 150.44122882700193600, 151.42480096657754984, - 152.40959258449737490, 153.39559776128982094, 154.38281063467164245, - 155.37122539872302696, 156.36083630307879844, 157.35163765213474107, - 158.34362380426921391, 159.33678917107920370, 160.33112821663092973, - 161.32663545672428995, 162.32330545817117695, 163.32113283808695314, - 164.32011226319519892, 165.32023844914485267, 166.32150615984036790, - 167.32391020678358018, 168.32744544842768164, 169.33210678954270634, - 170.33788918059275375, 171.34478761712384198, 172.35279713916281707, - 173.36191283062726143, 174.37212981874515094, 175.38344327348534080, - 176.39584840699734514, 177.40934047306160437, 178.42391476654847793, - 179.43956662288721304, 180.45629141754378111, 181.47408456550741107, - 182.49294152078630304, 183.51285777591152737, 184.53382886144947861, - 185.55585034552262869, 186.57891783333786861, 187.60302696672312095, - 188.62817342367162610, 189.65435291789341932, 190.68156119837468054, - 191.70979404894376330, 192.73904728784492590, 193.76931676731820176, - 194.80059837318714244, 195.83288802445184729, 196.86618167288995096, - 197.90047530266301123, 198.93576492992946214, 199.97204660246373464, - 201.00931639928148797, 202.04757043027063901, 203.08680483582807597, - 204.12701578650228385, 205.16819948264117102, 206.21035215404597807, - 207.25347005962987623, 208.29754948708190909, 209.34258675253678916, - 210.38857820024875878, 211.43552020227099320, 212.48340915813977858, - 213.53224149456323744, 214.58201366511514152, 215.63272214993284592, - 216.68436345542014010, 217.73693411395422004, 218.79043068359703739, - 219.84484974781133815, 220.90018791517996988, 221.95644181913033322, - 223.01360811766215875, 224.07168349307951871, 225.13066465172661879, - 226.19054832372759734, 227.25133126272962159, 228.31301024565024704, - 229.37558207242807384, 230.43904356577689896, 231.50339157094342113, - 232.56862295546847008, 233.63473460895144740, 234.70172344281823484, - 235.76958639009222907, 236.83832040516844586, 237.90792246359117712, - 238.97838956183431947, 240.04971871708477238, 241.12190696702904802, - 242.19495136964280846, 243.26884900298270509, 244.34359696498191283, - 245.41919237324782443, 246.49563236486270057, 247.57291409618682110, - 248.65103474266476269, 249.72999149863338175, 250.80978157713354904, - 251.89040220972316320, 252.97185064629374551, 254.05412415488834199, - 255.13722002152300661, 256.22113555000953511, 257.30586806178126835, - 258.39141489572085675, 259.47777340799029844, 260.56494097186322279, - 261.65291497755913497, 262.74169283208021852, 263.83127195904967266, - 264.92164979855277807, 266.01282380697938379, 267.10479145686849733, - 268.19755023675537586, 269.29109765101975427, 270.38543121973674488, - 271.48054847852881721, 272.57644697842033565, 273.67312428569374561, - 274.77057798174683967, 275.86880566295326389, 276.96780494052313770, - 278.06757344036617496, 279.16810880295668085, 280.26940868320008349, - 281.37147075030043197, 282.47429268763045229, 283.57787219260217171, - 284.68220697654078322, 285.78729476455760050, 286.89313329542699194, - 287.99972032146268930, 289.10705360839756395, 290.21513093526289140, - 291.32395009427028754, 292.43350889069523646, 293.54380514276073200, - 294.65483668152336350, 295.76660135076059532, 296.87909700685889902, - 297.99232151870342022, 299.10627276756946458, 300.22094864701409733, - 301.33634706277030091, 302.45246593264130297, 303.56930318639643929, - 304.68685676566872189, 305.80512462385280514, 306.92410472600477078, - 308.04379504874236773, 309.16419358014690033, 310.28529831966631036, - 311.40710727801865687, 312.52961847709792664, 313.65282994987899201, - 314.77673974032603610, 315.90134590329950015, 317.02664650446632777, - 318.15263962020929966, 319.27932333753892635, 320.40669575400545455, - 321.53475497761127144, 322.66349912672620803, 323.79292633000159185, - 324.92303472628691452, 326.05382246454587403, 327.18528770377525916, - 328.31742861292224234, 329.45024337080525356, 330.58373016603343331, - 331.71788719692847280, 332.85271267144611329, 333.98820480709991898, - 335.12436183088397001, 336.26118197919845443, 337.39866349777429377, - 338.53680464159958774, 339.67560367484657036, 340.81505887079896411, - 341.95516851178109619, 343.09593088908627578, 344.23734430290727460, - 345.37940706226686416, 346.52211748494903532, 347.66547389743118401, - 348.80947463481720661, 349.95411804077025408, 351.09940246744753267, - 352.24532627543504759, 353.39188783368263103, 354.53908551944078908, - 355.68691771819692349, 356.83538282361303118, 357.98447923746385868, - 359.13420536957539753 -}; - -TEST_BEGIN(test_ln_gamma_misc) -{ - unsigned i; - - for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { - double x = (double)i * 0.25; - assert_true(double_eq_rel(ln_gamma(x), - ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR), - "Incorrect ln_gamma result for i=%u", i); - } -} -TEST_END - -/* Expected pt_norm([0.01..0.99] increment=0.01). */ -static const double pt_norm_expected[] = { - -INFINITY, - -2.32634787404084076, -2.05374891063182252, -1.88079360815125085, - -1.75068607125216946, -1.64485362695147264, -1.55477359459685305, - -1.47579102817917063, -1.40507156030963221, -1.34075503369021654, - -1.28155156554460081, -1.22652812003661049, -1.17498679206608991, - -1.12639112903880045, -1.08031934081495606, -1.03643338949378938, - -0.99445788320975281, -0.95416525314619416, -0.91536508784281390, - -0.87789629505122846, -0.84162123357291418, -0.80642124701824025, - -0.77219321418868492, -0.73884684918521371, -0.70630256284008752, - -0.67448975019608171, -0.64334540539291685, -0.61281299101662701, - -0.58284150727121620, -0.55338471955567281, -0.52440051270804067, - -0.49585034734745320, -0.46769879911450812, -0.43991316567323380, - -0.41246312944140462, -0.38532046640756751, -0.35845879325119373, - -0.33185334643681652, -0.30548078809939738, -0.27931903444745404, - -0.25334710313579978, -0.22754497664114931, -0.20189347914185077, - -0.17637416478086135, -0.15096921549677725, -0.12566134685507399, - -0.10043372051146975, -0.07526986209982976, -0.05015358346473352, - -0.02506890825871106, 0.00000000000000000, 0.02506890825871106, - 0.05015358346473366, 0.07526986209982990, 0.10043372051146990, - 0.12566134685507413, 0.15096921549677739, 0.17637416478086146, - 0.20189347914185105, 0.22754497664114931, 0.25334710313579978, - 0.27931903444745404, 0.30548078809939738, 0.33185334643681652, - 0.35845879325119373, 0.38532046640756762, 0.41246312944140484, - 0.43991316567323391, 0.46769879911450835, 0.49585034734745348, - 0.52440051270804111, 0.55338471955567303, 0.58284150727121620, - 0.61281299101662701, 0.64334540539291685, 0.67448975019608171, - 0.70630256284008752, 0.73884684918521371, 0.77219321418868492, - 0.80642124701824036, 0.84162123357291441, 0.87789629505122879, - 0.91536508784281423, 0.95416525314619460, 0.99445788320975348, - 1.03643338949378938, 1.08031934081495606, 1.12639112903880045, - 1.17498679206608991, 1.22652812003661049, 1.28155156554460081, - 1.34075503369021654, 1.40507156030963265, 1.47579102817917085, - 1.55477359459685394, 1.64485362695147308, 1.75068607125217102, - 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 -}; - -TEST_BEGIN(test_pt_norm) -{ - unsigned i; - - for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { - double p = (double)i * 0.01; - assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i], - MAX_REL_ERR, MAX_ABS_ERR), - "Incorrect pt_norm result for i=%u", i); - } -} -TEST_END - -/* - * Expected pt_chi2(p=[0.01..0.99] increment=0.07, - * df={0.1, 1.1, 10.1, 100.1, 1000.1}). - */ -static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1}; -static const double pt_chi2_expected[] = { - 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17, - 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09, - 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05, - 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03, - 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00, - - 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, - 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, - 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, - 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, - 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, - - 2.606673548632508, 4.602913725294877, 5.646152813924212, - 6.488971315540869, 7.249823275816285, 7.977314231410841, - 8.700354939944047, 9.441728024225892, 10.224338321374127, - 11.076435368801061, 12.039320937038386, 13.183878752697167, - 14.657791935084575, 16.885728216339373, 23.361991680031817, - - 70.14844087392152, 80.92379498849355, 85.53325420085891, - 88.94433120715347, 91.83732712857017, 94.46719943606301, - 96.96896479994635, 99.43412843510363, 101.94074719829733, - 104.57228644307247, 107.43900093448734, 110.71844673417287, - 114.76616819871325, 120.57422505959563, 135.92318818757556, - - 899.0072447849649, 937.9271278858220, 953.8117189560207, - 965.3079371501154, 974.8974061207954, 983.4936235182347, - 991.5691170518946, 999.4334123954690, 1007.3391826856553, - 1015.5445154999951, 1024.3777075619569, 1034.3538789836223, - 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 -}; - -TEST_BEGIN(test_pt_chi2) -{ - unsigned i, j; - unsigned e = 0; - - for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) { - double df = pt_chi2_df[i]; - double ln_gamma_df = ln_gamma(df * 0.5); - for (j = 1; j < 100; j += 7) { - double p = (double)j * 0.01; - assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df), - pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), - "Incorrect pt_chi2 result for i=%u, j=%u", i, j); - e++; - } - } -} -TEST_END - -/* - * Expected pt_gamma(p=[0.1..0.99] increment=0.07, - * shape=[0.5..3.0] increment=0.5). - */ -static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; -static const double pt_gamma_expected[] = { - 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02, - 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01, - 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01, - 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01, - 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00, - - 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, - 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, - 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, - 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, - 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, - - 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, - 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, - 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, - 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, - 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, - - 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, - 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, - 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, - 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, - 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, - - 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, - 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, - 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, - 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, - 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, - - 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, - 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, - 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, - 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, - 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 -}; - -TEST_BEGIN(test_pt_gamma_shape) -{ - unsigned i, j; - unsigned e = 0; - - for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) { - double shape = pt_gamma_shape[i]; - double ln_gamma_shape = ln_gamma(shape); - for (j = 1; j < 100; j += 7) { - double p = (double)j * 0.01; - assert_true(double_eq_rel(pt_gamma(p, shape, 1.0, - ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR, - MAX_ABS_ERR), - "Incorrect pt_gamma result for i=%u, j=%u", i, j); - e++; - } - } -} -TEST_END - -TEST_BEGIN(test_pt_gamma_scale) -{ - double shape = 1.0; - double ln_gamma_shape = ln_gamma(shape); - - assert_true(double_eq_rel( - pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, - pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, - MAX_ABS_ERR), - "Scale should be trivially equivalent to external multiplication"); -} -TEST_END - -int -main(void) -{ - - return (test( - test_ln_gamma_factorial, - test_ln_gamma_misc, - test_pt_norm, - test_pt_chi2, - test_pt_gamma_shape, - test_pt_gamma_scale)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/mq.c b/lib/jemalloc-3.6.0/test/unit/mq.c deleted file mode 100644 index f57e96a..0000000 --- a/lib/jemalloc-3.6.0/test/unit/mq.c +++ /dev/null @@ -1,92 +0,0 @@ -#include "test/jemalloc_test.h" - -#define NSENDERS 3 -#define NMSGS 100000 - -typedef struct mq_msg_s mq_msg_t; -struct mq_msg_s { - mq_msg(mq_msg_t) link; -}; -mq_gen(static, mq_, mq_t, mq_msg_t, link) - -TEST_BEGIN(test_mq_basic) -{ - mq_t mq; - mq_msg_t msg; - - assert_false(mq_init(&mq), "Unexpected mq_init() failure"); - assert_u_eq(mq_count(&mq), 0, "mq should be empty"); - assert_ptr_null(mq_tryget(&mq), - "mq_tryget() should fail when the queue is empty"); - - mq_put(&mq, &msg); - assert_u_eq(mq_count(&mq), 1, "mq should contain one message"); - assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg"); - - mq_put(&mq, &msg); - assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg"); - - mq_fini(&mq); -} -TEST_END - -static void * -thd_receiver_start(void *arg) -{ - mq_t *mq = (mq_t *)arg; - unsigned i; - - for (i = 0; i < (NSENDERS * NMSGS); i++) { - mq_msg_t *msg = mq_get(mq); - assert_ptr_not_null(msg, "mq_get() should never return NULL"); - dallocx(msg, 0); - } - return (NULL); -} - -static void * -thd_sender_start(void *arg) -{ - mq_t *mq = (mq_t *)arg; - unsigned i; - - for (i = 0; i < NMSGS; i++) { - mq_msg_t *msg; - void *p; - p = mallocx(sizeof(mq_msg_t), 0); - assert_ptr_not_null(p, "Unexpected allocm() failure"); - msg = (mq_msg_t *)p; - mq_put(mq, msg); - } - return (NULL); -} - -TEST_BEGIN(test_mq_threaded) -{ - mq_t mq; - thd_t receiver; - thd_t senders[NSENDERS]; - unsigned i; - - assert_false(mq_init(&mq), "Unexpected mq_init() failure"); - - thd_create(&receiver, thd_receiver_start, (void *)&mq); - for (i = 0; i < NSENDERS; i++) - thd_create(&senders[i], thd_sender_start, (void *)&mq); - - thd_join(receiver, NULL); - for (i = 0; i < NSENDERS; i++) - thd_join(senders[i], NULL); - - mq_fini(&mq); -} -TEST_END - -int -main(void) -{ - return (test( - test_mq_basic, - test_mq_threaded)); -} - diff --git a/lib/jemalloc-3.6.0/test/unit/mtx.c b/lib/jemalloc-3.6.0/test/unit/mtx.c deleted file mode 100644 index 96ff694..0000000 --- a/lib/jemalloc-3.6.0/test/unit/mtx.c +++ /dev/null @@ -1,60 +0,0 @@ -#include "test/jemalloc_test.h" - -#define NTHREADS 2 -#define NINCRS 2000000 - -TEST_BEGIN(test_mtx_basic) -{ - mtx_t mtx; - - assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); - mtx_lock(&mtx); - mtx_unlock(&mtx); - mtx_fini(&mtx); -} -TEST_END - -typedef struct { - mtx_t mtx; - unsigned x; -} thd_start_arg_t; - -static void * -thd_start(void *varg) -{ - thd_start_arg_t *arg = (thd_start_arg_t *)varg; - unsigned i; - - for (i = 0; i < NINCRS; i++) { - mtx_lock(&arg->mtx); - arg->x++; - mtx_unlock(&arg->mtx); - } - return (NULL); -} - -TEST_BEGIN(test_mtx_race) -{ - thd_start_arg_t arg; - thd_t thds[NTHREADS]; - unsigned i; - - assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); - arg.x = 0; - for (i = 0; i < NTHREADS; i++) - thd_create(&thds[i], thd_start, (void *)&arg); - for (i = 0; i < NTHREADS; i++) - thd_join(thds[i], NULL); - assert_u_eq(arg.x, NTHREADS * NINCRS, - "Race-related counter corruption"); -} -TEST_END - -int -main(void) -{ - - return (test( - test_mtx_basic, - test_mtx_race)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/prof_accum.c b/lib/jemalloc-3.6.0/test/unit/prof_accum.c deleted file mode 100644 index 050a8a7..0000000 --- a/lib/jemalloc-3.6.0/test/unit/prof_accum.c +++ /dev/null @@ -1,86 +0,0 @@ -#include "prof_accum.h" - -#ifdef JEMALLOC_PROF -const char *malloc_conf = - "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"; -#endif - -static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ - int fd; - - fd = open("/dev/null", O_WRONLY); - assert_d_ne(fd, -1, "Unexpected open() failure"); - - return (fd); -} - -static void * -alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) -{ - - return (alloc_0(thd_ind*NALLOCS_PER_THREAD + iteration)); -} - -static void * -thd_start(void *varg) -{ - unsigned thd_ind = *(unsigned *)varg; - size_t bt_count_prev, bt_count; - unsigned i_prev, i; - - i_prev = 0; - bt_count_prev = 0; - for (i = 0; i < NALLOCS_PER_THREAD; i++) { - void *p = alloc_from_permuted_backtrace(thd_ind, i); - dallocx(p, 0); - if (i % DUMP_INTERVAL == 0) { - assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), - 0, "Unexpected error while dumping heap profile"); - } - - if (i % BT_COUNT_CHECK_INTERVAL == 0 || - i+1 == NALLOCS_PER_THREAD) { - bt_count = prof_bt_count(); - assert_zu_le(bt_count_prev+(i-i_prev), bt_count, - "Expected larger backtrace count increase"); - i_prev = i; - bt_count_prev = bt_count; - } - } - - return (NULL); -} - -TEST_BEGIN(test_idump) -{ - bool active; - thd_t thds[NTHREADS]; - unsigned thd_args[NTHREADS]; - unsigned i; - - test_skip_if(!config_prof); - - active = true; - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure while activating profiling"); - - prof_dump_open = prof_dump_open_intercept; - - for (i = 0; i < NTHREADS; i++) { - thd_args[i] = i; - thd_create(&thds[i], thd_start, (void *)&thd_args[i]); - } - for (i = 0; i < NTHREADS; i++) - thd_join(thds[i], NULL); -} -TEST_END - -int -main(void) -{ - - return (test( - test_idump)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/prof_accum.h b/lib/jemalloc-3.6.0/test/unit/prof_accum.h deleted file mode 100644 index 109d86b..0000000 --- a/lib/jemalloc-3.6.0/test/unit/prof_accum.h +++ /dev/null @@ -1,35 +0,0 @@ -#include "test/jemalloc_test.h" - -#define NTHREADS 4 -#define NALLOCS_PER_THREAD 50 -#define DUMP_INTERVAL 1 -#define BT_COUNT_CHECK_INTERVAL 5 - -#define alloc_n_proto(n) \ -void *alloc_##n(unsigned bits); -alloc_n_proto(0) -alloc_n_proto(1) - -#define alloc_n_gen(n) \ -void * \ -alloc_##n(unsigned bits) \ -{ \ - void *p; \ - \ - if (bits == 0) \ - p = mallocx(1, 0); \ - else { \ - switch (bits & 0x1U) { \ - case 0: \ - p = (alloc_0(bits >> 1)); \ - break; \ - case 1: \ - p = (alloc_1(bits >> 1)); \ - break; \ - default: not_reached(); \ - } \ - } \ - /* Intentionally sabotage tail call optimization. */ \ - assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ - return (p); \ -} diff --git a/lib/jemalloc-3.6.0/test/unit/prof_accum_a.c b/lib/jemalloc-3.6.0/test/unit/prof_accum_a.c deleted file mode 100644 index 42ad521..0000000 --- a/lib/jemalloc-3.6.0/test/unit/prof_accum_a.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "prof_accum.h" - -alloc_n_gen(0) diff --git a/lib/jemalloc-3.6.0/test/unit/prof_accum_b.c b/lib/jemalloc-3.6.0/test/unit/prof_accum_b.c deleted file mode 100644 index 60d9dab..0000000 --- a/lib/jemalloc-3.6.0/test/unit/prof_accum_b.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "prof_accum.h" - -alloc_n_gen(1) diff --git a/lib/jemalloc-3.6.0/test/unit/prof_gdump.c b/lib/jemalloc-3.6.0/test/unit/prof_gdump.c deleted file mode 100644 index a00b105..0000000 --- a/lib/jemalloc-3.6.0/test/unit/prof_gdump.c +++ /dev/null @@ -1,56 +0,0 @@ -#include "test/jemalloc_test.h" - -#ifdef JEMALLOC_PROF -const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true"; -#endif - -static bool did_prof_dump_open; - -static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ - int fd; - - did_prof_dump_open = true; - - fd = open("/dev/null", O_WRONLY); - assert_d_ne(fd, -1, "Unexpected open() failure"); - - return (fd); -} - -TEST_BEGIN(test_gdump) -{ - bool active; - void *p, *q; - - test_skip_if(!config_prof); - - active = true; - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure while activating profiling"); - - prof_dump_open = prof_dump_open_intercept; - - did_prof_dump_open = false; - p = mallocx(chunksize, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - assert_true(did_prof_dump_open, "Expected a profile dump"); - - did_prof_dump_open = false; - q = mallocx(chunksize, 0); - assert_ptr_not_null(q, "Unexpected mallocx() failure"); - assert_true(did_prof_dump_open, "Expected a profile dump"); - - dallocx(p, 0); - dallocx(q, 0); -} -TEST_END - -int -main(void) -{ - - return (test( - test_gdump)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/prof_idump.c b/lib/jemalloc-3.6.0/test/unit/prof_idump.c deleted file mode 100644 index bdea53e..0000000 --- a/lib/jemalloc-3.6.0/test/unit/prof_idump.c +++ /dev/null @@ -1,51 +0,0 @@ -#include "test/jemalloc_test.h" - -#ifdef JEMALLOC_PROF -const char *malloc_conf = - "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0," - "lg_prof_interval:0"; -#endif - -static bool did_prof_dump_open; - -static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ - int fd; - - did_prof_dump_open = true; - - fd = open("/dev/null", O_WRONLY); - assert_d_ne(fd, -1, "Unexpected open() failure"); - - return (fd); -} - -TEST_BEGIN(test_idump) -{ - bool active; - void *p; - - test_skip_if(!config_prof); - - active = true; - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure while activating profiling"); - - prof_dump_open = prof_dump_open_intercept; - - did_prof_dump_open = false; - p = mallocx(1, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - dallocx(p, 0); - assert_true(did_prof_dump_open, "Expected a profile dump"); -} -TEST_END - -int -main(void) -{ - - return (test( - test_idump)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/ql.c b/lib/jemalloc-3.6.0/test/unit/ql.c deleted file mode 100644 index 05fad45..0000000 --- a/lib/jemalloc-3.6.0/test/unit/ql.c +++ /dev/null @@ -1,209 +0,0 @@ -#include "test/jemalloc_test.h" - -/* Number of ring entries, in [2..26]. */ -#define NENTRIES 9 - -typedef struct list_s list_t; -typedef ql_head(list_t) list_head_t; - -struct list_s { - ql_elm(list_t) link; - char id; -}; - -static void -test_empty_list(list_head_t *head) -{ - list_t *t; - unsigned i; - - assert_ptr_null(ql_first(head), "Unexpected element for empty list"); - assert_ptr_null(ql_last(head, link), - "Unexpected element for empty list"); - - i = 0; - ql_foreach(t, head, link) { - i++; - } - assert_u_eq(i, 0, "Unexpected element for empty list"); - - i = 0; - ql_reverse_foreach(t, head, link) { - i++; - } - assert_u_eq(i, 0, "Unexpected element for empty list"); -} - -TEST_BEGIN(test_ql_empty) -{ - list_head_t head; - - ql_new(&head); - test_empty_list(&head); -} -TEST_END - -static void -init_entries(list_t *entries, unsigned nentries) -{ - unsigned i; - - for (i = 0; i < nentries; i++) { - entries[i].id = 'a' + i; - ql_elm_new(&entries[i], link); - } -} - -static void -test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) -{ - list_t *t; - unsigned i; - - assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch"); - assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id, - "Element id mismatch"); - - i = 0; - ql_foreach(t, head, link) { - assert_c_eq(t->id, entries[i].id, "Element id mismatch"); - i++; - } - - i = 0; - ql_reverse_foreach(t, head, link) { - assert_c_eq(t->id, entries[nentries-i-1].id, - "Element id mismatch"); - i++; - } - - for (i = 0; i < nentries-1; i++) { - t = ql_next(head, &entries[i], link); - assert_c_eq(t->id, entries[i+1].id, "Element id mismatch"); - } - assert_ptr_null(ql_next(head, &entries[nentries-1], link), - "Unexpected element"); - - assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element"); - for (i = 1; i < nentries; i++) { - t = ql_prev(head, &entries[i], link); - assert_c_eq(t->id, entries[i-1].id, "Element id mismatch"); - } -} - -TEST_BEGIN(test_ql_tail_insert) -{ - list_head_t head; - list_t entries[NENTRIES]; - unsigned i; - - ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) - ql_tail_insert(&head, &entries[i], link); - - test_entries_list(&head, entries, NENTRIES); -} -TEST_END - -TEST_BEGIN(test_ql_tail_remove) -{ - list_head_t head; - list_t entries[NENTRIES]; - unsigned i; - - ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) - ql_tail_insert(&head, &entries[i], link); - - for (i = 0; i < NENTRIES; i++) { - test_entries_list(&head, entries, NENTRIES-i); - ql_tail_remove(&head, list_t, link); - } - test_empty_list(&head); -} -TEST_END - -TEST_BEGIN(test_ql_head_insert) -{ - list_head_t head; - list_t entries[NENTRIES]; - unsigned i; - - ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) - ql_head_insert(&head, &entries[NENTRIES-i-1], link); - - test_entries_list(&head, entries, NENTRIES); -} -TEST_END - -TEST_BEGIN(test_ql_head_remove) -{ - list_head_t head; - list_t entries[NENTRIES]; - unsigned i; - - ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) - ql_head_insert(&head, &entries[NENTRIES-i-1], link); - - for (i = 0; i < NENTRIES; i++) { - test_entries_list(&head, &entries[i], NENTRIES-i); - ql_head_remove(&head, list_t, link); - } - test_empty_list(&head); -} -TEST_END - -TEST_BEGIN(test_ql_insert) -{ - list_head_t head; - list_t entries[8]; - list_t *a, *b, *c, *d, *e, *f, *g, *h; - - ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); - a = &entries[0]; - b = &entries[1]; - c = &entries[2]; - d = &entries[3]; - e = &entries[4]; - f = &entries[5]; - g = &entries[6]; - h = &entries[7]; - - /* - * ql_remove(), ql_before_insert(), and ql_after_insert() are used - * internally by other macros that are already tested, so there's no - * need to test them completely. However, insertion/deletion from the - * middle of lists is not otherwise tested; do so here. - */ - ql_tail_insert(&head, f, link); - ql_before_insert(&head, f, b, link); - ql_before_insert(&head, f, c, link); - ql_after_insert(f, h, link); - ql_after_insert(f, g, link); - ql_before_insert(&head, b, a, link); - ql_after_insert(c, d, link); - ql_before_insert(&head, f, e, link); - - test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t)); -} -TEST_END - -int -main(void) -{ - - return (test( - test_ql_empty, - test_ql_tail_insert, - test_ql_tail_remove, - test_ql_head_insert, - test_ql_head_remove, - test_ql_insert)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/qr.c b/lib/jemalloc-3.6.0/test/unit/qr.c deleted file mode 100644 index a2a2d90..0000000 --- a/lib/jemalloc-3.6.0/test/unit/qr.c +++ /dev/null @@ -1,248 +0,0 @@ -#include "test/jemalloc_test.h" - -/* Number of ring entries, in [2..26]. */ -#define NENTRIES 9 -/* Split index, in [1..NENTRIES). */ -#define SPLIT_INDEX 5 - -typedef struct ring_s ring_t; - -struct ring_s { - qr(ring_t) link; - char id; -}; - -static void -init_entries(ring_t *entries) -{ - unsigned i; - - for (i = 0; i < NENTRIES; i++) { - qr_new(&entries[i], link); - entries[i].id = 'a' + i; - } -} - -static void -test_independent_entries(ring_t *entries) -{ - ring_t *t; - unsigned i, j; - - for (i = 0; i < NENTRIES; i++) { - j = 0; - qr_foreach(t, &entries[i], link) { - j++; - } - assert_u_eq(j, 1, - "Iteration over single-element ring should visit precisely " - "one element"); - } - for (i = 0; i < NENTRIES; i++) { - j = 0; - qr_reverse_foreach(t, &entries[i], link) { - j++; - } - assert_u_eq(j, 1, - "Iteration over single-element ring should visit precisely " - "one element"); - } - for (i = 0; i < NENTRIES; i++) { - t = qr_next(&entries[i], link); - assert_ptr_eq(t, &entries[i], - "Next element in single-element ring should be same as " - "current element"); - } - for (i = 0; i < NENTRIES; i++) { - t = qr_prev(&entries[i], link); - assert_ptr_eq(t, &entries[i], - "Previous element in single-element ring should be same as " - "current element"); - } -} - -TEST_BEGIN(test_qr_one) -{ - ring_t entries[NENTRIES]; - - init_entries(entries); - test_independent_entries(entries); -} -TEST_END - -static void -test_entries_ring(ring_t *entries) -{ - ring_t *t; - unsigned i, j; - - for (i = 0; i < NENTRIES; i++) { - j = 0; - qr_foreach(t, &entries[i], link) { - assert_c_eq(t->id, entries[(i+j) % NENTRIES].id, - "Element id mismatch"); - j++; - } - } - for (i = 0; i < NENTRIES; i++) { - j = 0; - qr_reverse_foreach(t, &entries[i], link) { - assert_c_eq(t->id, entries[(NENTRIES+i-j-1) % - NENTRIES].id, "Element id mismatch"); - j++; - } - } - for (i = 0; i < NENTRIES; i++) { - t = qr_next(&entries[i], link); - assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, - "Element id mismatch"); - } - for (i = 0; i < NENTRIES; i++) { - t = qr_prev(&entries[i], link); - assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, - "Element id mismatch"); - } -} - -TEST_BEGIN(test_qr_after_insert) -{ - ring_t entries[NENTRIES]; - unsigned i; - - init_entries(entries); - for (i = 1; i < NENTRIES; i++) - qr_after_insert(&entries[i - 1], &entries[i], link); - test_entries_ring(entries); -} -TEST_END - -TEST_BEGIN(test_qr_remove) -{ - ring_t entries[NENTRIES]; - ring_t *t; - unsigned i, j; - - init_entries(entries); - for (i = 1; i < NENTRIES; i++) - qr_after_insert(&entries[i - 1], &entries[i], link); - - for (i = 0; i < NENTRIES; i++) { - j = 0; - qr_foreach(t, &entries[i], link) { - assert_c_eq(t->id, entries[i+j].id, - "Element id mismatch"); - j++; - } - j = 0; - qr_reverse_foreach(t, &entries[i], link) { - assert_c_eq(t->id, entries[NENTRIES - 1 - j].id, - "Element id mismatch"); - j++; - } - qr_remove(&entries[i], link); - } - test_independent_entries(entries); -} -TEST_END - -TEST_BEGIN(test_qr_before_insert) -{ - ring_t entries[NENTRIES]; - ring_t *t; - unsigned i, j; - - init_entries(entries); - for (i = 1; i < NENTRIES; i++) - qr_before_insert(&entries[i - 1], &entries[i], link); - for (i = 0; i < NENTRIES; i++) { - j = 0; - qr_foreach(t, &entries[i], link) { - assert_c_eq(t->id, entries[(NENTRIES+i-j) % - NENTRIES].id, "Element id mismatch"); - j++; - } - } - for (i = 0; i < NENTRIES; i++) { - j = 0; - qr_reverse_foreach(t, &entries[i], link) { - assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id, - "Element id mismatch"); - j++; - } - } - for (i = 0; i < NENTRIES; i++) { - t = qr_next(&entries[i], link); - assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, - "Element id mismatch"); - } - for (i = 0; i < NENTRIES; i++) { - t = qr_prev(&entries[i], link); - assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, - "Element id mismatch"); - } -} -TEST_END - -static void -test_split_entries(ring_t *entries) -{ - ring_t *t; - unsigned i, j; - - for (i = 0; i < NENTRIES; i++) { - j = 0; - qr_foreach(t, &entries[i], link) { - if (i < SPLIT_INDEX) { - assert_c_eq(t->id, - entries[(i+j) % SPLIT_INDEX].id, - "Element id mismatch"); - } else { - assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) % - (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id, - "Element id mismatch"); - } - j++; - } - } -} - -TEST_BEGIN(test_qr_meld_split) -{ - ring_t entries[NENTRIES]; - unsigned i; - - init_entries(entries); - for (i = 1; i < NENTRIES; i++) - qr_after_insert(&entries[i - 1], &entries[i], link); - - qr_split(&entries[0], &entries[SPLIT_INDEX], link); - test_split_entries(entries); - - qr_meld(&entries[0], &entries[SPLIT_INDEX], link); - test_entries_ring(entries); - - qr_meld(&entries[0], &entries[SPLIT_INDEX], link); - test_split_entries(entries); - - qr_split(&entries[0], &entries[SPLIT_INDEX], link); - test_entries_ring(entries); - - qr_split(&entries[0], &entries[0], link); - test_entries_ring(entries); - - qr_meld(&entries[0], &entries[0], link); - test_entries_ring(entries); -} -TEST_END - -int -main(void) -{ - - return (test( - test_qr_one, - test_qr_after_insert, - test_qr_remove, - test_qr_before_insert, - test_qr_meld_split)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/quarantine.c b/lib/jemalloc-3.6.0/test/unit/quarantine.c deleted file mode 100644 index bbd48a5..0000000 --- a/lib/jemalloc-3.6.0/test/unit/quarantine.c +++ /dev/null @@ -1,108 +0,0 @@ -#include "test/jemalloc_test.h" - -#define QUARANTINE_SIZE 8192 -#define STRINGIFY_HELPER(x) #x -#define STRINGIFY(x) STRINGIFY_HELPER(x) - -#ifdef JEMALLOC_FILL -const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:" - STRINGIFY(QUARANTINE_SIZE); -#endif - -void -quarantine_clear(void) -{ - void *p; - - p = mallocx(QUARANTINE_SIZE*2, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - dallocx(p, 0); -} - -TEST_BEGIN(test_quarantine) -{ -#define SZ ZU(256) -#define NQUARANTINED (QUARANTINE_SIZE/SZ) - void *quarantined[NQUARANTINED+1]; - size_t i, j; - - test_skip_if(!config_fill); - - assert_zu_eq(nallocx(SZ, 0), SZ, - "SZ=%zu does not precisely equal a size class", SZ); - - quarantine_clear(); - - /* - * Allocate enough regions to completely fill the quarantine, plus one - * more. The last iteration occurs with a completely full quarantine, - * but no regions should be drained from the quarantine until the last - * deallocation occurs. Therefore no region recycling should occur - * until after this loop completes. - */ - for (i = 0; i < NQUARANTINED+1; i++) { - void *p = mallocx(SZ, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - quarantined[i] = p; - dallocx(p, 0); - for (j = 0; j < i; j++) { - assert_ptr_ne(p, quarantined[j], - "Quarantined region recycled too early; " - "i=%zu, j=%zu", i, j); - } - } -#undef NQUARANTINED -#undef SZ -} -TEST_END - -static bool detected_redzone_corruption; - -static void -arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - detected_redzone_corruption = true; -} - -TEST_BEGIN(test_quarantine_redzone) -{ - char *s; - arena_redzone_corruption_t *arena_redzone_corruption_orig; - - test_skip_if(!config_fill); - - arena_redzone_corruption_orig = arena_redzone_corruption; - arena_redzone_corruption = arena_redzone_corruption_replacement; - - /* Test underflow. */ - detected_redzone_corruption = false; - s = (char *)mallocx(1, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - s[-1] = 0xbb; - dallocx(s, 0); - assert_true(detected_redzone_corruption, - "Did not detect redzone corruption"); - - /* Test overflow. */ - detected_redzone_corruption = false; - s = (char *)mallocx(1, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - s[sallocx(s, 0)] = 0xbb; - dallocx(s, 0); - assert_true(detected_redzone_corruption, - "Did not detect redzone corruption"); - - arena_redzone_corruption = arena_redzone_corruption_orig; -} -TEST_END - -int -main(void) -{ - - return (test( - test_quarantine, - test_quarantine_redzone)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/rb.c b/lib/jemalloc-3.6.0/test/unit/rb.c deleted file mode 100644 index b737485..0000000 --- a/lib/jemalloc-3.6.0/test/unit/rb.c +++ /dev/null @@ -1,333 +0,0 @@ -#include "test/jemalloc_test.h" - -#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ - a_type *rbp_bh_t; \ - for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ - rbp_bh_t != &(a_rbt)->rbt_nil; \ - rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ - if (rbtn_red_get(a_type, a_field, rbp_bh_t) == false) { \ - (r_height)++; \ - } \ - } \ -} while (0) - -typedef struct node_s node_t; - -struct node_s { -#define NODE_MAGIC 0x9823af7e - uint32_t magic; - rb_node(node_t) link; - uint64_t key; -}; - -static int -node_cmp(node_t *a, node_t *b) { - int ret; - - assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); - assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); - - ret = (a->key > b->key) - (a->key < b->key); - if (ret == 0) { - /* - * Duplicates are not allowed in the tree, so force an - * arbitrary ordering for non-identical items with equal keys. - */ - ret = (((uintptr_t)a) > ((uintptr_t)b)) - - (((uintptr_t)a) < ((uintptr_t)b)); - } - return (ret); -} - -typedef rb_tree(node_t) tree_t; -rb_gen(static, tree_, tree_t, node_t, link, node_cmp); - -TEST_BEGIN(test_rb_empty) -{ - tree_t tree; - node_t key; - - tree_new(&tree); - - assert_ptr_null(tree_first(&tree), "Unexpected node"); - assert_ptr_null(tree_last(&tree), "Unexpected node"); - - key.key = 0; - key.magic = NODE_MAGIC; - assert_ptr_null(tree_search(&tree, &key), "Unexpected node"); - - key.key = 0; - key.magic = NODE_MAGIC; - assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node"); - - key.key = 0; - key.magic = NODE_MAGIC; - assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node"); -} -TEST_END - -static unsigned -tree_recurse(node_t *node, unsigned black_height, unsigned black_depth, - node_t *nil) -{ - unsigned ret = 0; - node_t *left_node = rbtn_left_get(node_t, link, node); - node_t *right_node = rbtn_right_get(node_t, link, node); - - if (rbtn_red_get(node_t, link, node) == false) - black_depth++; - - /* Red nodes must be interleaved with black nodes. */ - if (rbtn_red_get(node_t, link, node)) { - assert_false(rbtn_red_get(node_t, link, left_node), - "Node should be black"); - assert_false(rbtn_red_get(node_t, link, right_node), - "Node should be black"); - } - - if (node == nil) - return (ret); - /* Self. */ - assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); - - /* Left subtree. */ - if (left_node != nil) - ret += tree_recurse(left_node, black_height, black_depth, nil); - else - ret += (black_depth != black_height); - - /* Right subtree. */ - if (right_node != nil) - ret += tree_recurse(right_node, black_height, black_depth, nil); - else - ret += (black_depth != black_height); - - return (ret); -} - -static node_t * -tree_iterate_cb(tree_t *tree, node_t *node, void *data) -{ - unsigned *i = (unsigned *)data; - node_t *search_node; - - assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); - - /* Test rb_search(). */ - search_node = tree_search(tree, node); - assert_ptr_eq(search_node, node, - "tree_search() returned unexpected node"); - - /* Test rb_nsearch(). */ - search_node = tree_nsearch(tree, node); - assert_ptr_eq(search_node, node, - "tree_nsearch() returned unexpected node"); - - /* Test rb_psearch(). */ - search_node = tree_psearch(tree, node); - assert_ptr_eq(search_node, node, - "tree_psearch() returned unexpected node"); - - (*i)++; - - return (NULL); -} - -static unsigned -tree_iterate(tree_t *tree) -{ - unsigned i; - - i = 0; - tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); - - return (i); -} - -static unsigned -tree_iterate_reverse(tree_t *tree) -{ - unsigned i; - - i = 0; - tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); - - return (i); -} - -static void -node_remove(tree_t *tree, node_t *node, unsigned nnodes) -{ - node_t *search_node; - unsigned black_height, imbalances; - - tree_remove(tree, node); - - /* Test rb_nsearch(). */ - search_node = tree_nsearch(tree, node); - if (search_node != NULL) { - assert_u64_ge(search_node->key, node->key, - "Key ordering error"); - } - - /* Test rb_psearch(). */ - search_node = tree_psearch(tree, node); - if (search_node != NULL) { - assert_u64_le(search_node->key, node->key, - "Key ordering error"); - } - - node->magic = 0; - - rbtn_black_height(node_t, link, tree, black_height); - imbalances = tree_recurse(tree->rbt_root, black_height, 0, - &(tree->rbt_nil)); - assert_u_eq(imbalances, 0, "Tree is unbalanced"); - assert_u_eq(tree_iterate(tree), nnodes-1, - "Unexpected node iteration count"); - assert_u_eq(tree_iterate_reverse(tree), nnodes-1, - "Unexpected node iteration count"); -} - -static node_t * -remove_iterate_cb(tree_t *tree, node_t *node, void *data) -{ - unsigned *nnodes = (unsigned *)data; - node_t *ret = tree_next(tree, node); - - node_remove(tree, node, *nnodes); - - return (ret); -} - -static node_t * -remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) -{ - unsigned *nnodes = (unsigned *)data; - node_t *ret = tree_prev(tree, node); - - node_remove(tree, node, *nnodes); - - return (ret); -} - -TEST_BEGIN(test_rb_random) -{ -#define NNODES 25 -#define NBAGS 250 -#define SEED 42 - sfmt_t *sfmt; - uint64_t bag[NNODES]; - tree_t tree; - node_t nodes[NNODES]; - unsigned i, j, k, black_height, imbalances; - - sfmt = init_gen_rand(SEED); - for (i = 0; i < NBAGS; i++) { - switch (i) { - case 0: - /* Insert in order. */ - for (j = 0; j < NNODES; j++) - bag[j] = j; - break; - case 1: - /* Insert in reverse order. */ - for (j = 0; j < NNODES; j++) - bag[j] = NNODES - j - 1; - break; - default: - for (j = 0; j < NNODES; j++) - bag[j] = gen_rand64_range(sfmt, NNODES); - } - - for (j = 1; j <= NNODES; j++) { - /* Initialize tree and nodes. */ - tree_new(&tree); - tree.rbt_nil.magic = 0; - for (k = 0; k < j; k++) { - nodes[k].magic = NODE_MAGIC; - nodes[k].key = bag[k]; - } - - /* Insert nodes. */ - for (k = 0; k < j; k++) { - tree_insert(&tree, &nodes[k]); - - rbtn_black_height(node_t, link, &tree, - black_height); - imbalances = tree_recurse(tree.rbt_root, - black_height, 0, &(tree.rbt_nil)); - assert_u_eq(imbalances, 0, - "Tree is unbalanced"); - - assert_u_eq(tree_iterate(&tree), k+1, - "Unexpected node iteration count"); - assert_u_eq(tree_iterate_reverse(&tree), k+1, - "Unexpected node iteration count"); - - assert_ptr_not_null(tree_first(&tree), - "Tree should not be empty"); - assert_ptr_not_null(tree_last(&tree), - "Tree should not be empty"); - - tree_next(&tree, &nodes[k]); - tree_prev(&tree, &nodes[k]); - } - - /* Remove nodes. */ - switch (i % 4) { - case 0: - for (k = 0; k < j; k++) - node_remove(&tree, &nodes[k], j - k); - break; - case 1: - for (k = j; k > 0; k--) - node_remove(&tree, &nodes[k-1], k); - break; - case 2: { - node_t *start; - unsigned nnodes = j; - - start = NULL; - do { - start = tree_iter(&tree, start, - remove_iterate_cb, (void *)&nnodes); - nnodes--; - } while (start != NULL); - assert_u_eq(nnodes, 0, - "Removal terminated early"); - break; - } case 3: { - node_t *start; - unsigned nnodes = j; - - start = NULL; - do { - start = tree_reverse_iter(&tree, start, - remove_reverse_iterate_cb, - (void *)&nnodes); - nnodes--; - } while (start != NULL); - assert_u_eq(nnodes, 0, - "Removal terminated early"); - break; - } default: - not_reached(); - } - } - } - fini_gen_rand(sfmt); -#undef NNODES -#undef NBAGS -#undef SEED -} -TEST_END - -int -main(void) -{ - - return (test( - test_rb_empty, - test_rb_random)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/rtree.c b/lib/jemalloc-3.6.0/test/unit/rtree.c deleted file mode 100644 index 5463055..0000000 --- a/lib/jemalloc-3.6.0/test/unit/rtree.c +++ /dev/null @@ -1,118 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_rtree_get_empty) -{ - unsigned i; - - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t *rtree = rtree_new(i, imalloc, idalloc); - assert_u_eq(rtree_get(rtree, 0), 0, - "rtree_get() should return NULL for empty tree"); - rtree_delete(rtree); - } -} -TEST_END - -TEST_BEGIN(test_rtree_extrema) -{ - unsigned i; - - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t *rtree = rtree_new(i, imalloc, idalloc); - - rtree_set(rtree, 0, 1); - assert_u_eq(rtree_get(rtree, 0), 1, - "rtree_get() should return previously set value"); - - rtree_set(rtree, ~((uintptr_t)0), 1); - assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1, - "rtree_get() should return previously set value"); - - rtree_delete(rtree); - } -} -TEST_END - -TEST_BEGIN(test_rtree_bits) -{ - unsigned i, j, k; - - for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { - uintptr_t keys[] = {0, 1, - (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; - rtree_t *rtree = rtree_new(i, imalloc, idalloc); - - for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { - rtree_set(rtree, keys[j], 1); - for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { - assert_u_eq(rtree_get(rtree, keys[k]), 1, - "rtree_get() should return previously set " - "value and ignore insignificant key bits; " - "i=%u, j=%u, k=%u, set key=%#"PRIxPTR", " - "get key=%#"PRIxPTR, i, j, k, keys[j], - keys[k]); - } - assert_u_eq(rtree_get(rtree, - (((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0, - "Only leftmost rtree leaf should be set; " - "i=%u, j=%u", i, j); - rtree_set(rtree, keys[j], 0); - } - - rtree_delete(rtree); - } -} -TEST_END - -TEST_BEGIN(test_rtree_random) -{ - unsigned i; - sfmt_t *sfmt; -#define NSET 100 -#define SEED 42 - - sfmt = init_gen_rand(SEED); - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t *rtree = rtree_new(i, imalloc, idalloc); - uintptr_t keys[NSET]; - unsigned j; - - for (j = 0; j < NSET; j++) { - keys[j] = (uintptr_t)gen_rand64(sfmt); - rtree_set(rtree, keys[j], 1); - assert_u_eq(rtree_get(rtree, keys[j]), 1, - "rtree_get() should return previously set value"); - } - for (j = 0; j < NSET; j++) { - assert_u_eq(rtree_get(rtree, keys[j]), 1, - "rtree_get() should return previously set value"); - } - - for (j = 0; j < NSET; j++) { - rtree_set(rtree, keys[j], 0); - assert_u_eq(rtree_get(rtree, keys[j]), 0, - "rtree_get() should return previously set value"); - } - for (j = 0; j < NSET; j++) { - assert_u_eq(rtree_get(rtree, keys[j]), 0, - "rtree_get() should return previously set value"); - } - - rtree_delete(rtree); - } - fini_gen_rand(sfmt); -#undef NSET -#undef SEED -} -TEST_END - -int -main(void) -{ - - return (test( - test_rtree_get_empty, - test_rtree_extrema, - test_rtree_bits, - test_rtree_random)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/stats.c b/lib/jemalloc-3.6.0/test/unit/stats.c deleted file mode 100644 index 03a55c7..0000000 --- a/lib/jemalloc-3.6.0/test/unit/stats.c +++ /dev/null @@ -1,380 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_stats_summary) -{ - size_t *cactive; - size_t sz, allocated, active, mapped; - int expected = config_stats ? 0 : ENOENT; - - sz = sizeof(cactive); - assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0), - expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_le(active, *cactive, - "active should be no larger than cactive"); - assert_zu_le(allocated, active, - "allocated should be no larger than active"); - assert_zu_le(active, mapped, - "active should be no larger than mapped"); - } -} -TEST_END - -TEST_BEGIN(test_stats_chunks) -{ - size_t current, high; - uint64_t total; - size_t sz; - int expected = config_stats ? 0 : ENOENT; - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.chunks.current", ¤t, &sz, NULL, 0), - expected, "Unexpected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.chunks.total", &total, &sz, NULL, 0), - expected, "Unexpected mallctl() result"); - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.chunks.high", &high, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_le(current, high, - "current should be no larger than high"); - assert_u64_le((uint64_t)high, total, - "high should be no larger than total"); - } -} -TEST_END - -TEST_BEGIN(test_stats_huge) -{ - void *p; - uint64_t epoch; - size_t allocated; - uint64_t nmalloc, ndalloc; - size_t sz; - int expected = config_stats ? 0 : ENOENT; - - p = mallocx(arena_maxclass+1, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.huge.allocated", &allocated, &sz, NULL, 0), - expected, "Unexpected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.huge.nmalloc", &nmalloc, &sz, NULL, 0), - expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.huge.ndalloc", &ndalloc, &sz, NULL, 0), - expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_gt(allocated, 0, - "allocated should be greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_summary) -{ - unsigned arena; - void *small, *large; - uint64_t epoch; - size_t sz; - int expected = config_stats ? 0 : ENOENT; - size_t mapped; - uint64_t npurge, nmadvise, purged; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - small = mallocx(SMALL_MAXCLASS, 0); - assert_ptr_not_null(small, "Unexpected mallocx() failure"); - large = mallocx(arena_maxclass, 0); - assert_ptr_not_null(large, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, - "Unexpected mallctl() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); - - if (config_stats) { - assert_u64_gt(npurge, 0, - "At least one purge should have occurred"); - assert_u64_le(nmadvise, purged, - "nmadvise should be no greater than purged"); - } - - dallocx(small, 0); - dallocx(large, 0); -} -TEST_END - -void * -thd_start(void *arg) -{ - - return (NULL); -} - -static void -no_lazy_lock(void) -{ - thd_t thd; - - thd_create(&thd, thd_start, NULL); - thd_join(thd, NULL); -} - -TEST_BEGIN(test_stats_arenas_small) -{ - unsigned arena; - void *p; - size_t sz, allocated; - uint64_t epoch, nmalloc, ndalloc, nrequests; - int expected = config_stats ? 0 : ENOENT; - - no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - p = mallocx(SMALL_MAXCLASS, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), - config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_gt(allocated, 0, - "allocated should be greater than zero"); - assert_u64_gt(nmalloc, 0, - "nmalloc should be no greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(nrequests, 0, - "nrequests should be greater than zero"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_large) -{ - unsigned arena; - void *p; - size_t sz, allocated; - uint64_t epoch, nmalloc, ndalloc, nrequests; - int expected = config_stats ? 0 : ENOENT; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - p = mallocx(arena_maxclass, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_gt(allocated, 0, - "allocated should be greater than zero"); - assert_zu_gt(nmalloc, 0, - "nmalloc should be greater than zero"); - assert_zu_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_zu_gt(nrequests, 0, - "nrequests should be greater than zero"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_bins) -{ - unsigned arena; - void *p; - size_t sz, allocated, curruns; - uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t nruns, nreruns; - int expected = config_stats ? 0 : ENOENT; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - p = mallocx(arena_bin_info[0].reg_size, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), - config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz, - NULL, 0), config_tcache ? expected : ENOENT, - "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz, - NULL, 0), config_tcache ? expected : ENOENT, - "Unexpected mallctl() result"); - - assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_gt(allocated, 0, - "allocated should be greater than zero"); - assert_u64_gt(nmalloc, 0, - "nmalloc should be greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(nrequests, 0, - "nrequests should be greater than zero"); - if (config_tcache) { - assert_u64_gt(nfills, 0, - "At least one fill should have occurred"); - assert_u64_gt(nflushes, 0, - "At least one flush should have occurred"); - } - assert_u64_gt(nruns, 0, - "At least one run should have been allocated"); - assert_zu_gt(curruns, 0, - "At least one run should be currently allocated"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_lruns) -{ - unsigned arena; - void *p; - uint64_t epoch, nmalloc, ndalloc, nrequests; - size_t curruns, sz; - int expected = config_stats ? 0 : ENOENT; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - p = mallocx(SMALL_MAXCLASS+1, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_u64_gt(nmalloc, 0, - "nmalloc should be greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(nrequests, 0, - "nrequests should be greater than zero"); - assert_u64_gt(curruns, 0, - "At least one run should be currently allocated"); - } - - dallocx(p, 0); -} -TEST_END - -int -main(void) -{ - - return (test( - test_stats_summary, - test_stats_chunks, - test_stats_huge, - test_stats_arenas_summary, - test_stats_arenas_small, - test_stats_arenas_large, - test_stats_arenas_bins, - test_stats_arenas_lruns)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/tsd.c b/lib/jemalloc-3.6.0/test/unit/tsd.c deleted file mode 100644 index f421c1a..0000000 --- a/lib/jemalloc-3.6.0/test/unit/tsd.c +++ /dev/null @@ -1,71 +0,0 @@ -#include "test/jemalloc_test.h" - -#define THREAD_DATA 0x72b65c10 - -typedef unsigned int data_t; - -static bool data_cleanup_executed; - -void -data_cleanup(void *arg) -{ - data_t *data = (data_t *)arg; - - assert_x_eq(*data, THREAD_DATA, - "Argument passed into cleanup function should match tsd value"); - data_cleanup_executed = true; -} - -malloc_tsd_protos(, data, data_t) -malloc_tsd_externs(data, data_t) -#define DATA_INIT 0x12345678 -malloc_tsd_data(, data, data_t, DATA_INIT) -malloc_tsd_funcs(, data, data_t, DATA_INIT, data_cleanup) - -static void * -thd_start(void *arg) -{ - data_t d = (data_t)(uintptr_t)arg; - assert_x_eq(*data_tsd_get(), DATA_INIT, - "Initial tsd get should return initialization value"); - - data_tsd_set(&d); - assert_x_eq(*data_tsd_get(), d, - "After tsd set, tsd get should return value that was set"); - - d = 0; - assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg, - "Resetting local data should have no effect on tsd"); - - return (NULL); -} - -TEST_BEGIN(test_tsd_main_thread) -{ - - thd_start((void *) 0xa5f3e329); -} -TEST_END - -TEST_BEGIN(test_tsd_sub_thread) -{ - thd_t thd; - - data_cleanup_executed = false; - thd_create(&thd, thd_start, (void *)THREAD_DATA); - thd_join(thd, NULL); - assert_true(data_cleanup_executed, - "Cleanup function should have executed"); -} -TEST_END - -int -main(void) -{ - - data_tsd_boot(); - - return (test( - test_tsd_main_thread, - test_tsd_sub_thread)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/util.c b/lib/jemalloc-3.6.0/test/unit/util.c deleted file mode 100644 index dc3cfe8..0000000 --- a/lib/jemalloc-3.6.0/test/unit/util.c +++ /dev/null @@ -1,294 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_pow2_ceil) -{ - unsigned i, pow2; - size_t x; - - assert_zu_eq(pow2_ceil(0), 0, "Unexpected result"); - - for (i = 0; i < sizeof(size_t) * 8; i++) { - assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i, - "Unexpected result"); - } - - for (i = 2; i < sizeof(size_t) * 8; i++) { - assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i, - "Unexpected result"); - } - - for (i = 0; i < sizeof(size_t) * 8 - 1; i++) { - assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1), - "Unexpected result"); - } - - for (pow2 = 1; pow2 < 25; pow2++) { - for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) { - assert_zu_eq(pow2_ceil(x), ZU(1) << pow2, - "Unexpected result, x=%zu", x); - } - } -} -TEST_END - -TEST_BEGIN(test_malloc_strtoumax_no_endptr) -{ - int err; - - set_errno(0); - assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result"); - err = get_errno(); - assert_d_eq(err, 0, "Unexpected failure"); -} -TEST_END - -TEST_BEGIN(test_malloc_strtoumax) -{ - struct test_s { - const char *input; - const char *expected_remainder; - int base; - int expected_errno; - const char *expected_errno_name; - uintmax_t expected_x; - }; -#define ERR(e) e, #e -#define UMAX(x) ((uintmax_t)x##ULL) - struct test_s tests[] = { - {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, - {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, - {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, - - {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, - {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, - {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, - {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, - - {"42", "", 0, ERR(0), UMAX(42)}, - {"+42", "", 0, ERR(0), UMAX(42)}, - {"-42", "", 0, ERR(0), UMAX(-42)}, - {"042", "", 0, ERR(0), UMAX(042)}, - {"+042", "", 0, ERR(0), UMAX(042)}, - {"-042", "", 0, ERR(0), UMAX(-042)}, - {"0x42", "", 0, ERR(0), UMAX(0x42)}, - {"+0x42", "", 0, ERR(0), UMAX(0x42)}, - {"-0x42", "", 0, ERR(0), UMAX(-0x42)}, - - {"0", "", 0, ERR(0), UMAX(0)}, - {"1", "", 0, ERR(0), UMAX(1)}, - - {"42", "", 0, ERR(0), UMAX(42)}, - {" 42", "", 0, ERR(0), UMAX(42)}, - {"42 ", " ", 0, ERR(0), UMAX(42)}, - {"0x", "x", 0, ERR(0), UMAX(0)}, - {"42x", "x", 0, ERR(0), UMAX(42)}, - - {"07", "", 0, ERR(0), UMAX(7)}, - {"010", "", 0, ERR(0), UMAX(8)}, - {"08", "8", 0, ERR(0), UMAX(0)}, - {"0_", "_", 0, ERR(0), UMAX(0)}, - - {"0x", "x", 0, ERR(0), UMAX(0)}, - {"0X", "X", 0, ERR(0), UMAX(0)}, - {"0xg", "xg", 0, ERR(0), UMAX(0)}, - {"0XA", "", 0, ERR(0), UMAX(10)}, - - {"010", "", 10, ERR(0), UMAX(10)}, - {"0x3", "x3", 10, ERR(0), UMAX(0)}, - - {"12", "2", 2, ERR(0), UMAX(1)}, - {"78", "8", 8, ERR(0), UMAX(7)}, - {"9a", "a", 10, ERR(0), UMAX(9)}, - {"9A", "A", 10, ERR(0), UMAX(9)}, - {"fg", "g", 16, ERR(0), UMAX(15)}, - {"FG", "G", 16, ERR(0), UMAX(15)}, - {"0xfg", "g", 16, ERR(0), UMAX(15)}, - {"0XFG", "G", 16, ERR(0), UMAX(15)}, - {"z_", "_", 36, ERR(0), UMAX(35)}, - {"Z_", "_", 36, ERR(0), UMAX(35)} - }; -#undef ERR -#undef UMAX - unsigned i; - - for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { - struct test_s *test = &tests[i]; - int err; - uintmax_t result; - char *remainder; - - set_errno(0); - result = malloc_strtoumax(test->input, &remainder, test->base); - err = get_errno(); - assert_d_eq(err, test->expected_errno, - "Expected errno %s for \"%s\", base %d", - test->expected_errno_name, test->input, test->base); - assert_str_eq(remainder, test->expected_remainder, - "Unexpected remainder for \"%s\", base %d", - test->input, test->base); - if (err == 0) { - assert_ju_eq(result, test->expected_x, - "Unexpected result for \"%s\", base %d", - test->input, test->base); - } - } -} -TEST_END - -TEST_BEGIN(test_malloc_snprintf_truncated) -{ -#define BUFLEN 15 - char buf[BUFLEN]; - int result; - size_t len; -#define TEST(expected_str_untruncated, fmt...) do { \ - result = malloc_snprintf(buf, len, fmt); \ - assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ - "Unexpected string inequality (\"%s\" vs \"%s\")", \ - buf, expected_str_untruncated); \ - assert_d_eq(result, strlen(expected_str_untruncated), \ - "Unexpected result"); \ -} while (0) - - for (len = 1; len < BUFLEN; len++) { - TEST("012346789", "012346789"); - TEST("a0123b", "a%sb", "0123"); - TEST("a01234567", "a%s%s", "0123", "4567"); - TEST("a0123 ", "a%-6s", "0123"); - TEST("a 0123", "a%6s", "0123"); - TEST("a 012", "a%6.3s", "0123"); - TEST("a 012", "a%*.*s", 6, 3, "0123"); - TEST("a 123b", "a% db", 123); - TEST("a123b", "a%-db", 123); - TEST("a-123b", "a%-db", -123); - TEST("a+123b", "a%+db", 123); - } -#undef BUFLEN -#undef TEST -} -TEST_END - -TEST_BEGIN(test_malloc_snprintf) -{ -#define BUFLEN 128 - char buf[BUFLEN]; - int result; -#define TEST(expected_str, fmt...) do { \ - result = malloc_snprintf(buf, sizeof(buf), fmt); \ - assert_str_eq(buf, expected_str, "Unexpected output"); \ - assert_d_eq(result, strlen(expected_str), "Unexpected result"); \ -} while (0) - - TEST("hello", "hello"); - - TEST("50%, 100%", "50%%, %d%%", 100); - - TEST("a0123b", "a%sb", "0123"); - - TEST("a 0123b", "a%5sb", "0123"); - TEST("a 0123b", "a%*sb", 5, "0123"); - - TEST("a0123 b", "a%-5sb", "0123"); - TEST("a0123b", "a%*sb", -1, "0123"); - TEST("a0123 b", "a%*sb", -5, "0123"); - TEST("a0123 b", "a%-*sb", -5, "0123"); - - TEST("a012b", "a%.3sb", "0123"); - TEST("a012b", "a%.*sb", 3, "0123"); - TEST("a0123b", "a%.*sb", -3, "0123"); - - TEST("a 012b", "a%5.3sb", "0123"); - TEST("a 012b", "a%5.*sb", 3, "0123"); - TEST("a 012b", "a%*.3sb", 5, "0123"); - TEST("a 012b", "a%*.*sb", 5, 3, "0123"); - TEST("a 0123b", "a%*.*sb", 5, -3, "0123"); - - TEST("_abcd_", "_%x_", 0xabcd); - TEST("_0xabcd_", "_%#x_", 0xabcd); - TEST("_1234_", "_%o_", 01234); - TEST("_01234_", "_%#o_", 01234); - TEST("_1234_", "_%u_", 1234); - - TEST("_1234_", "_%d_", 1234); - TEST("_ 1234_", "_% d_", 1234); - TEST("_+1234_", "_%+d_", 1234); - TEST("_-1234_", "_%d_", -1234); - TEST("_-1234_", "_% d_", -1234); - TEST("_-1234_", "_%+d_", -1234); - - TEST("_-1234_", "_%d_", -1234); - TEST("_1234_", "_%d_", 1234); - TEST("_-1234_", "_%i_", -1234); - TEST("_1234_", "_%i_", 1234); - TEST("_01234_", "_%#o_", 01234); - TEST("_1234_", "_%u_", 1234); - TEST("_0x1234abc_", "_%#x_", 0x1234abc); - TEST("_0X1234ABC_", "_%#X_", 0x1234abc); - TEST("_c_", "_%c_", 'c'); - TEST("_string_", "_%s_", "string"); - TEST("_0x42_", "_%p_", ((void *)0x42)); - - TEST("_-1234_", "_%ld_", ((long)-1234)); - TEST("_1234_", "_%ld_", ((long)1234)); - TEST("_-1234_", "_%li_", ((long)-1234)); - TEST("_1234_", "_%li_", ((long)1234)); - TEST("_01234_", "_%#lo_", ((long)01234)); - TEST("_1234_", "_%lu_", ((long)1234)); - TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc)); - TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC)); - - TEST("_-1234_", "_%lld_", ((long long)-1234)); - TEST("_1234_", "_%lld_", ((long long)1234)); - TEST("_-1234_", "_%lli_", ((long long)-1234)); - TEST("_1234_", "_%lli_", ((long long)1234)); - TEST("_01234_", "_%#llo_", ((long long)01234)); - TEST("_1234_", "_%llu_", ((long long)1234)); - TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc)); - TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC)); - - TEST("_-1234_", "_%qd_", ((long long)-1234)); - TEST("_1234_", "_%qd_", ((long long)1234)); - TEST("_-1234_", "_%qi_", ((long long)-1234)); - TEST("_1234_", "_%qi_", ((long long)1234)); - TEST("_01234_", "_%#qo_", ((long long)01234)); - TEST("_1234_", "_%qu_", ((long long)1234)); - TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc)); - TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC)); - - TEST("_-1234_", "_%jd_", ((intmax_t)-1234)); - TEST("_1234_", "_%jd_", ((intmax_t)1234)); - TEST("_-1234_", "_%ji_", ((intmax_t)-1234)); - TEST("_1234_", "_%ji_", ((intmax_t)1234)); - TEST("_01234_", "_%#jo_", ((intmax_t)01234)); - TEST("_1234_", "_%ju_", ((intmax_t)1234)); - TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc)); - TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC)); - - TEST("_1234_", "_%td_", ((ptrdiff_t)1234)); - TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234)); - TEST("_1234_", "_%ti_", ((ptrdiff_t)1234)); - TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234)); - - TEST("_-1234_", "_%zd_", ((ssize_t)-1234)); - TEST("_1234_", "_%zd_", ((ssize_t)1234)); - TEST("_-1234_", "_%zi_", ((ssize_t)-1234)); - TEST("_1234_", "_%zi_", ((ssize_t)1234)); - TEST("_01234_", "_%#zo_", ((ssize_t)01234)); - TEST("_1234_", "_%zu_", ((ssize_t)1234)); - TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc)); - TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC)); -#undef BUFLEN -} -TEST_END - -int -main(void) -{ - - return (test( - test_pow2_ceil, - test_malloc_strtoumax_no_endptr, - test_malloc_strtoumax, - test_malloc_snprintf_truncated, - test_malloc_snprintf)); -} diff --git a/lib/jemalloc-3.6.0/test/unit/zero.c b/lib/jemalloc-3.6.0/test/unit/zero.c deleted file mode 100644 index 65a8f0c..0000000 --- a/lib/jemalloc-3.6.0/test/unit/zero.c +++ /dev/null @@ -1,78 +0,0 @@ -#include "test/jemalloc_test.h" - -#ifdef JEMALLOC_FILL -const char *malloc_conf = - "abort:false,junk:false,zero:true,redzone:false,quarantine:0"; -#endif - -static void -test_zero(size_t sz_min, size_t sz_max) -{ - char *s; - size_t sz_prev, sz, i; - - sz_prev = 0; - s = (char *)mallocx(sz_min, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - - for (sz = sallocx(s, 0); sz <= sz_max; - sz_prev = sz, sz = sallocx(s, 0)) { - if (sz_prev > 0) { - assert_c_eq(s[0], 'a', - "Previously allocated byte %zu/%zu is corrupted", - ZU(0), sz_prev); - assert_c_eq(s[sz_prev-1], 'a', - "Previously allocated byte %zu/%zu is corrupted", - sz_prev-1, sz_prev); - } - - for (i = sz_prev; i < sz; i++) { - assert_c_eq(s[i], 0x0, - "Newly allocated byte %zu/%zu isn't zero-filled", - i, sz); - s[i] = 'a'; - } - - if (xallocx(s, sz+1, 0, 0) == sz) { - s = (char *)rallocx(s, sz+1, 0); - assert_ptr_not_null((void *)s, - "Unexpected rallocx() failure"); - } - } - - dallocx(s, 0); -} - -TEST_BEGIN(test_zero_small) -{ - - test_skip_if(!config_fill); - test_zero(1, SMALL_MAXCLASS-1); -} -TEST_END - -TEST_BEGIN(test_zero_large) -{ - - test_skip_if(!config_fill); - test_zero(SMALL_MAXCLASS+1, arena_maxclass); -} -TEST_END - -TEST_BEGIN(test_zero_huge) -{ - - test_skip_if(!config_fill); - test_zero(arena_maxclass+1, chunksize*2); -} -TEST_END - -int -main(void) -{ - - return (test( - test_zero_small, - test_zero_large, - test_zero_huge)); -}