Author: jasone
Date: Fri May 13 04:03:20 2016
New Revision: 299587
URL: https://svnweb.freebsd.org/changeset/base/299587

Log:
  Update jemalloc to 4.2.0.

Added:
  head/contrib/jemalloc/include/jemalloc/internal/ph.h   (contents, props 
changed)
  head/contrib/jemalloc/include/jemalloc/internal/witness.h   (contents, props 
changed)
  head/contrib/jemalloc/src/witness.c   (contents, props changed)
Modified:
  head/contrib/jemalloc/ChangeLog
  head/contrib/jemalloc/FREEBSD-diffs
  head/contrib/jemalloc/VERSION
  head/contrib/jemalloc/doc/jemalloc.3
  head/contrib/jemalloc/include/jemalloc/internal/arena.h
  head/contrib/jemalloc/include/jemalloc/internal/base.h
  head/contrib/jemalloc/include/jemalloc/internal/bitmap.h
  head/contrib/jemalloc/include/jemalloc/internal/chunk.h
  head/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
  head/contrib/jemalloc/include/jemalloc/internal/ckh.h
  head/contrib/jemalloc/include/jemalloc/internal/ctl.h
  head/contrib/jemalloc/include/jemalloc/internal/extent.h
  head/contrib/jemalloc/include/jemalloc/internal/hash.h
  head/contrib/jemalloc/include/jemalloc/internal/huge.h
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
  head/contrib/jemalloc/include/jemalloc/internal/mb.h
  head/contrib/jemalloc/include/jemalloc/internal/mutex.h
  head/contrib/jemalloc/include/jemalloc/internal/nstime.h
  head/contrib/jemalloc/include/jemalloc/internal/pages.h
  head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
  head/contrib/jemalloc/include/jemalloc/internal/prof.h
  head/contrib/jemalloc/include/jemalloc/internal/rtree.h
  head/contrib/jemalloc/include/jemalloc/internal/stats.h
  head/contrib/jemalloc/include/jemalloc/internal/tcache.h
  head/contrib/jemalloc/include/jemalloc/internal/tsd.h
  head/contrib/jemalloc/include/jemalloc/internal/util.h
  head/contrib/jemalloc/include/jemalloc/internal/valgrind.h
  head/contrib/jemalloc/include/jemalloc/jemalloc.h
  head/contrib/jemalloc/src/arena.c
  head/contrib/jemalloc/src/base.c
  head/contrib/jemalloc/src/bitmap.c
  head/contrib/jemalloc/src/chunk.c
  head/contrib/jemalloc/src/chunk_dss.c
  head/contrib/jemalloc/src/chunk_mmap.c
  head/contrib/jemalloc/src/ckh.c
  head/contrib/jemalloc/src/ctl.c
  head/contrib/jemalloc/src/huge.c
  head/contrib/jemalloc/src/jemalloc.c
  head/contrib/jemalloc/src/mutex.c
  head/contrib/jemalloc/src/nstime.c
  head/contrib/jemalloc/src/pages.c
  head/contrib/jemalloc/src/prof.c
  head/contrib/jemalloc/src/quarantine.c
  head/contrib/jemalloc/src/rtree.c
  head/contrib/jemalloc/src/stats.c
  head/contrib/jemalloc/src/tcache.c
  head/contrib/jemalloc/src/tsd.c
  head/contrib/jemalloc/src/util.c
  head/lib/libc/stdlib/jemalloc/Makefile.inc

Modified: head/contrib/jemalloc/ChangeLog
==============================================================================
--- head/contrib/jemalloc/ChangeLog     Fri May 13 02:58:11 2016        
(r299586)
+++ head/contrib/jemalloc/ChangeLog     Fri May 13 04:03:20 2016        
(r299587)
@@ -4,6 +4,50 @@ brevity.  Much more detail can be found 
 
     https://github.com/jemalloc/jemalloc
 
+* 4.2.0 (May 12, 2016)
+
+  New features:
+  - Add the arena.<i>.reset mallctl, which makes it possible to discard all of
+    an arena's allocations in a single operation.  (@jasone)
+  - Add the stats.retained and stats.arenas.<i>.retained statistics.  (@jasone)
+  - Add the --with-version configure option.  (@jasone)
+  - Support --with-lg-page values larger than actual page size.  (@jasone)
+
+  Optimizations:
+  - Use pairing heaps rather than red-black trees for various hot data
+    structures.  (@djwatson, @jasone)
+  - Streamline fast paths of rtree operations.  (@jasone)
+  - Optimize the fast paths of calloc() and [m,d,sd]allocx().  (@jasone)
+  - Decommit unused virtual memory if the OS does not overcommit.  (@jasone)
+  - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in 
order
+    to avoid unfortunate interactions during fork(2).  (@jasone)
+
+  Bug fixes:
+  - Fix chunk accounting related to triggering gdump profiles.  (@jasone)
+  - Link against librt for clock_gettime(2) if glibc < 2.17.  (@jasone)
+  - Scale leak report summary according to sampling probability.  (@jasone)
+
+* 4.1.1 (May 3, 2016)
+
+  This bugfix release resolves a variety of mostly minor issues, though the
+  bitmap fix is critical for 64-bit Windows.
+
+  Bug fixes:
+  - Fix the linear scan version of bitmap_sfu() to shift by the proper amount
+    even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
+    Windows.  (@jasone)
+  - Fix hashing functions to avoid unaligned memory accesses (and resulting
+    crashes).  This is relevant at least to some ARM-based platforms.
+    (@rkmisra)
+  - Fix fork()-related lock rank ordering reversals.  These reversals were
+    unlikely to cause deadlocks in practice except when heap profiling was
+    enabled and active.  (@jasone)
+  - Fix various chunk leaks in OOM code paths.  (@jasone)
+  - Fix malloc_stats_print() to print opt.narenas correctly.  (@jasone)
+  - Fix MSVC-specific build/test issues.  (@rustyx, @yuslepukhin)
+  - Fix a variety of test failures that were due to test fragility rather than
+    core bugs.  (@jasone)
+
 * 4.1.0 (February 28, 2016)
 
   This release is primarily about optimizations, but it also incorporates a lot
@@ -59,14 +103,14 @@ brevity.  Much more detail can be found 
   Bug fixes:
   - Fix stats.cactive accounting regression.  (@rustyx, @jasone)
   - Handle unaligned keys in hash().  This caused problems for some ARM 
systems.
-    (@jasone, Christopher Ferris)
+    (@jasone, @cferris1000)
   - Refactor arenas array.  In addition to fixing a fork-related deadlock, this
     makes arena lookups faster and simpler.  (@jasone)
   - Move retained memory allocation out of the default chunk allocation
     function, to a location that gets executed even if the application installs
     a custom chunk allocation function.  This resolves a virtual memory leak.
     (@buchgr)
-  - Fix a potential tsd cleanup leak.  (Christopher Ferris, @jasone)
+  - Fix a potential tsd cleanup leak.  (@cferris1000, @jasone)
   - Fix run quantization.  In practice this bug had no impact unless
     applications requested memory with alignment exceeding one page.
     (@jasone, @djwatson)

Modified: head/contrib/jemalloc/FREEBSD-diffs
==============================================================================
--- head/contrib/jemalloc/FREEBSD-diffs Fri May 13 02:58:11 2016        
(r299586)
+++ head/contrib/jemalloc/FREEBSD-diffs Fri May 13 04:03:20 2016        
(r299587)
@@ -1,5 +1,5 @@
 diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index bc5dbd1..ba182da 100644
+index c4a44e3..4626e9b 100644
 --- a/doc/jemalloc.xml.in
 +++ b/doc/jemalloc.xml.in
 @@ -53,11 +53,23 @@
@@ -27,7 +27,7 @@ index bc5dbd1..ba182da 100644
        <refsect2>
          <title>Standard API</title>
          <funcprototype>
-@@ -2905,4 +2917,18 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
+@@ -2961,4 +2973,18 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
      <para>The <function>posix_memalign<parameter/></function> function 
conforms
      to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
    </refsect1>
@@ -47,7 +47,7 @@ index bc5dbd1..ba182da 100644
 +  </refsect1>
  </refentry>
 diff --git a/include/jemalloc/internal/jemalloc_internal.h.in 
b/include/jemalloc/internal/jemalloc_internal.h.in
-index 3f54391..d240256 100644
+index 51bf897..7de22ea 100644
 --- a/include/jemalloc/internal/jemalloc_internal.h.in
 +++ b/include/jemalloc/internal/jemalloc_internal.h.in
 @@ -8,6 +8,9 @@
@@ -90,10 +90,10 @@ index 2b8ca5d..42d97f2 100644
  #ifdef _WIN32
  #  include <windows.h>
 diff --git a/include/jemalloc/internal/mutex.h 
b/include/jemalloc/internal/mutex.h
-index f051f29..561378f 100644
+index 5221799..60ab041 100644
 --- a/include/jemalloc/internal/mutex.h
 +++ b/include/jemalloc/internal/mutex.h
-@@ -47,15 +47,13 @@ struct malloc_mutex_s {
+@@ -52,9 +52,6 @@ struct malloc_mutex_s {
  
  #ifdef JEMALLOC_LAZY_LOCK
  extern bool isthreaded;
@@ -102,19 +102,20 @@ index f051f29..561378f 100644
 -#  define isthreaded true
  #endif
  
- bool  malloc_mutex_init(malloc_mutex_t *mutex);
- void  malloc_mutex_prefork(malloc_mutex_t *mutex);
- void  malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
- void  malloc_mutex_postfork_child(malloc_mutex_t *mutex);
+ bool  malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+@@ -62,6 +59,7 @@ bool malloc_mutex_init(malloc_mutex_t *mutex, const char 
*name,
+ void  malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
+ void  malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
+ void  malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
 +bool  malloc_mutex_first_thread(void);
- bool  mutex_boot(void);
+ bool  malloc_mutex_boot(void);
  
  #endif /* JEMALLOC_H_EXTERNS */
 diff --git a/include/jemalloc/internal/private_symbols.txt 
b/include/jemalloc/internal/private_symbols.txt
-index 5880996..6e94e03 100644
+index f2b6a55..69369c9 100644
 --- a/include/jemalloc/internal/private_symbols.txt
 +++ b/include/jemalloc/internal/private_symbols.txt
-@@ -296,7 +296,6 @@ iralloct_realign
+@@ -311,7 +311,6 @@ iralloct_realign
  isalloc
  isdalloct
  isqalloc
@@ -124,10 +125,10 @@ index 5880996..6e94e03 100644
  jemalloc_postfork_child
 diff --git a/include/jemalloc/jemalloc_FreeBSD.h 
b/include/jemalloc/jemalloc_FreeBSD.h
 new file mode 100644
-index 0000000..433dab5
+index 0000000..c58a8f3
 --- /dev/null
 +++ b/include/jemalloc/jemalloc_FreeBSD.h
-@@ -0,0 +1,160 @@
+@@ -0,0 +1,162 @@
 +/*
 + * Override settings that were generated in jemalloc_defs.h as necessary.
 + */
@@ -138,6 +139,8 @@ index 0000000..433dab5
 +#define       JEMALLOC_DEBUG
 +#endif
 +
++#undef JEMALLOC_DSS
++
 +/*
 + * The following are architecture-dependent, so conditionally define them for
 + * each supported architecture.
@@ -300,7 +303,7 @@ index f943891..47d032c 100755
 +#include "jemalloc_FreeBSD.h"
  EOF
 diff --git a/src/jemalloc.c b/src/jemalloc.c
-index 0735376..a34b85c 100644
+index 40eb2ea..666c49d 100644
 --- a/src/jemalloc.c
 +++ b/src/jemalloc.c
 @@ -4,6 +4,10 @@
@@ -314,7 +317,7 @@ index 0735376..a34b85c 100644
  /* Runtime configuration options. */
  const char    *je_malloc_conf JEMALLOC_ATTR(weak);
  bool  opt_abort =
-@@ -2611,6 +2615,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void 
*ptr)
+@@ -2673,6 +2677,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void 
*ptr)
   */
  
/******************************************************************************/
  /*
@@ -341,7 +344,7 @@ index 0735376..a34b85c 100644
 +      if (p == NULL)
 +              return (ALLOCM_ERR_OOM);
 +      if (rsize != NULL)
-+              *rsize = isalloc(p, config_prof);
++              *rsize = isalloc(tsdn_fetch(), p, config_prof);
 +      *ptr = p;
 +      return (ALLOCM_SUCCESS);
 +}
@@ -370,7 +373,7 @@ index 0735376..a34b85c 100644
 +              } else
 +                      ret = ALLOCM_ERR_OOM;
 +              if (rsize != NULL)
-+                      *rsize = isalloc(*ptr, config_prof);
++                      *rsize = isalloc(tsdn_fetch(), *ptr, config_prof);
 +      }
 +      return (ret);
 +}
@@ -422,8 +425,8 @@ index 0735376..a34b85c 100644
   * The following functions are used by threading libraries for protection of
   * malloc during fork().
   */
-@@ -2717,4 +2822,11 @@ jemalloc_postfork_child(void)
-       ctl_postfork_child();
+@@ -2814,4 +2919,11 @@ jemalloc_postfork_child(void)
+       ctl_postfork_child(tsd_tsdn(tsd));
  }
  
 +void
@@ -435,7 +438,7 @@ index 0735376..a34b85c 100644
 +
  
/******************************************************************************/
 diff --git a/src/mutex.c b/src/mutex.c
-index 2d47af9..934d5aa 100644
+index a1fac34..a24e420 100644
 --- a/src/mutex.c
 +++ b/src/mutex.c
 @@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread,
@@ -456,22 +459,22 @@ index 2d47af9..934d5aa 100644
  #endif
  
  bool
-@@ -137,7 +148,7 @@ malloc_mutex_postfork_child(malloc_mutex_t *mutex)
+@@ -140,7 +151,7 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t 
*mutex)
  }
  
  bool
--mutex_boot(void)
+-malloc_mutex_boot(void)
 +malloc_mutex_first_thread(void)
  {
  
  #ifdef JEMALLOC_MUTEX_INIT_CB
-@@ -151,3 +162,14 @@ mutex_boot(void)
+@@ -154,3 +165,14 @@ malloc_mutex_boot(void)
  #endif
        return (false);
  }
 +
 +bool
-+mutex_boot(void)
++malloc_mutex_boot(void)
 +{
 +
 +#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -481,10 +484,10 @@ index 2d47af9..934d5aa 100644
 +#endif
 +}
 diff --git a/src/util.c b/src/util.c
-index 02673c7..116e981 100644
+index a1c4a2a..04f9153 100644
 --- a/src/util.c
 +++ b/src/util.c
-@@ -66,6 +66,22 @@ wrtmessage(void *cbopaque, const char *s)
+@@ -67,6 +67,22 @@ wrtmessage(void *cbopaque, const char *s)
  
  JEMALLOC_EXPORT void  (*je_malloc_message)(void *, const char *s);
  

Modified: head/contrib/jemalloc/VERSION
==============================================================================
--- head/contrib/jemalloc/VERSION       Fri May 13 02:58:11 2016        
(r299586)
+++ head/contrib/jemalloc/VERSION       Fri May 13 04:03:20 2016        
(r299587)
@@ -1 +1 @@
-4.1.0-1-g994da4232621dd1210fcf39bdf0d6454cefda473
+4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc

Modified: head/contrib/jemalloc/doc/jemalloc.3
==============================================================================
--- head/contrib/jemalloc/doc/jemalloc.3        Fri May 13 02:58:11 2016        
(r299586)
+++ head/contrib/jemalloc/doc/jemalloc.3        Fri May 13 04:03:20 2016        
(r299587)
@@ -2,12 +2,12 @@
 .\"     Title: JEMALLOC
 .\"    Author: Jason Evans
 .\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\"      Date: 02/28/2016
+.\"      Date: 05/12/2016
 .\"    Manual: User Manual
-.\"    Source: jemalloc 4.1.0-1-g994da4232621dd1210fcf39bdf0d6454cefda473
+.\"    Source: jemalloc 4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc
 .\"  Language: English
 .\"
-.TH "JEMALLOC" "3" "02/28/2016" "jemalloc 4.1.0-1-g994da4232621" "User Manual"
+.TH "JEMALLOC" "3" "05/12/2016" "jemalloc 4.2.0-1-gdc7ff6306d7a" "User Manual"
 .\" -----------------------------------------------------------------
 .\" * Define some portability stuff
 .\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
 jemalloc \- general purpose memory allocation functions
 .SH "LIBRARY"
 .PP
-This manual describes jemalloc 
4\&.1\&.0\-1\-g994da4232621dd1210fcf39bdf0d6454cefda473\&. More information can 
be found at the
+This manual describes jemalloc 
4\&.2\&.0\-1\-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc\&. More information can 
be found at the
 \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
 .PP
 The following configuration options are enabled in libc\*(Aqs built\-in 
jemalloc:
@@ -461,7 +461,8 @@ Memory is conceptually broken into equal
 Small objects are managed in groups by page runs\&. Each run maintains a 
bitmap to track which regions are in use\&. Allocation requests that are no 
more than half the quantum (8 or 16, depending on architecture) are rounded up 
to the nearest power of two that is at least
 sizeof(\fBdouble\fR)\&. All other object size classes are multiples of the 
quantum, spaced such that there are four size classes for each doubling in 
size, which limits internal fragmentation to approximately 20% for all but the 
smallest size classes\&. Small size classes are smaller than four times the 
page size, large size classes are smaller than the chunk size (see the
 "opt\&.lg_chunk"
-option), and huge size classes extend from the chunk size up to one size class 
less than the full address space size\&.
+option), and huge size classes extend from the chunk size up to the largest 
size class that does not exceed
+\fBPTRDIFF_MAX\fR\&.
 .PP
 Allocations are packed tightly together, which can be an issue for 
multi\-threaded applications\&. If you need to assure that allocations do not 
suffer from cacheline sharing, round your allocation requests up to the nearest 
multiple of the cacheline size, or specify cacheline alignment when 
allocating\&.
 .PP
@@ -518,6 +519,8 @@ l r l
 ^ r l
 ^ r l
 ^ r l
+^ r l
+^ r l
 ^ r l.
 T{
 Small
@@ -645,6 +648,16 @@ T}
 T}:T{
 \&.\&.\&.
 T}
+:T{
+512 PiB
+T}:T{
+[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]
+T}
+:T{
+1 EiB
+T}:T{
+[5 EiB, 6 EiB, 7 EiB]
+T}
 .TE
 .sp 1
 .SH "MALLCTL NAMESPACE"
@@ -841,7 +854,7 @@ function\&. If
 is specified during configuration, this has the potential to cause deadlock 
for a multi\-threaded process that exits while one or more threads are 
executing in the memory allocation functions\&. Furthermore,
 \fBatexit\fR\fB\fR
 may allocate memory during application initialization and then deadlock 
internally when jemalloc in turn calls
-\fBatexit\fR\fB\fR, so this option is not univerally usable (though the 
application can register its own
+\fBatexit\fR\fB\fR, so this option is not universally usable (though the 
application can register its own
 \fBatexit\fR\fB\fR
 function with equivalent functionality)\&. Therefore, this option should only 
be used with care; it is primarily intended as a performance tuning aid during 
application development\&. This option is disabled by default\&.
 .RE
@@ -1007,7 +1020,7 @@ is controlled by the
 option\&. Note that
 \fBatexit\fR\fB\fR
 may allocate memory during application initialization and then deadlock 
internally when jemalloc in turn calls
-\fBatexit\fR\fB\fR, so this option is not univerally usable (though the 
application can register its own
+\fBatexit\fR\fB\fR, so this option is not universally usable (though the 
application can register its own
 \fBatexit\fR\fB\fR
 function with equivalent functionality)\&. This option is disabled by 
default\&.
 .RE
@@ -1113,6 +1126,14 @@ Trigger decay\-based purging of unused d
 for details\&.
 .RE
 .PP
+"arena\&.<i>\&.reset" (\fBvoid\fR) \-\-
+.RS 4
+Discard all of the arena\*(Aqs extant allocations\&. This interface can only 
be used with arenas created via
+"arenas\&.extend"\&. None of the arena\*(Aqs discarded/cached allocations may 
accessed afterward\&. As part of this requirement, all thread caches which were 
used to allocate/deallocate in conjunction with the arena must be flushed 
beforehand\&. This interface cannot be used if running inside Valgrind, nor if 
the
+quarantine
+size is non\-zero\&.
+.RE
+.PP
 "arena\&.<i>\&.dss" (\fBconst char *\fR) rw
 .RS 4
 Set the precedence of dss allocation as related to mmap allocation for arena 
<i>, or for all arenas if <i> equals
@@ -1503,7 +1524,7 @@ Get the current sample rate (see
 .PP
 "prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR]
 .RS 4
-Average number of bytes allocated between inverval\-based profile dumps\&. See 
the
+Average number of bytes allocated between interval\-based profile dumps\&. See 
the
 "opt\&.lg_prof_interval"
 option for additional information\&.
 .RE
@@ -1547,6 +1568,15 @@ Total number of bytes in active chunks m
 "stats\&.resident"\&.
 .RE
 .PP
+"stats\&.retained" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Total number of bytes in virtual memory mappings that were retained rather 
than being returned to the operating system via e\&.g\&.
+\fBmunmap\fR(2)\&. Retained virtual memory is typically untouched, 
decommitted, or purged, so it has no strongly associated physical memory (see
+chunk hooks
+for details)\&. Retained memory is excluded from mapped memory statistics, 
e\&.g\&.
+"stats\&.mapped"\&.
+.RE
+.PP
 "stats\&.arenas\&.<i>\&.dss" (\fBconst char *\fR) r\-
 .RS 4
 dss (\fBsbrk\fR(2)) allocation precedence as related to
@@ -1592,6 +1622,13 @@ or similar has not been called\&.
 Number of mapped bytes\&.
 .RE
 .PP
+"stats\&.arenas\&.<i>\&.retained" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of retained bytes\&. See
+"stats\&.retained"
+for details\&.
+.RE
+.PP
 "stats\&.arenas\&.<i>\&.metadata\&.mapped" (\fBsize_t\fR) r\- 
[\fB\-\-enable\-stats\fR]
 .RS 4
 Number of mapped bytes in arena chunk headers, which track the states of the 
non\-metadata pages\&.

Modified: head/contrib/jemalloc/include/jemalloc/internal/arena.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/arena.h     Fri May 13 
02:58:11 2016        (r299586)
+++ head/contrib/jemalloc/include/jemalloc/internal/arena.h     Fri May 13 
04:03:20 2016        (r299587)
@@ -36,6 +36,7 @@ typedef enum {
 #define        DECAY_NTICKS_PER_UPDATE 1000
 
 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
+typedef struct arena_avail_links_s arena_avail_links_t;
 typedef struct arena_run_s arena_run_t;
 typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
 typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
@@ -153,13 +154,13 @@ struct arena_runs_dirty_link_s {
  */
 struct arena_chunk_map_misc_s {
        /*
-        * Linkage for run trees.  There are two disjoint uses:
+        * Linkage for run heaps.  There are two disjoint uses:
         *
-        * 1) arena_t's runs_avail tree.
+        * 1) arena_t's runs_avail heaps.
         * 2) arena_run_t conceptually uses this linkage for in-use non-full
         *    runs, rather than directly embedding linkage.
         */
-       rb_node(arena_chunk_map_misc_t)         rb_link;
+       phn(arena_chunk_map_misc_t)             ph_link;
 
        union {
                /* Linkage for list of dirty runs. */
@@ -175,7 +176,7 @@ struct arena_chunk_map_misc_s {
                arena_run_t                     run;
        };
 };
-typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
+typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
 #endif /* JEMALLOC_ARENA_STRUCTS_A */
 
 #ifdef JEMALLOC_ARENA_STRUCTS_B
@@ -272,13 +273,13 @@ struct arena_bin_s {
        arena_run_t             *runcur;
 
        /*
-        * Tree of non-full runs.  This tree is used when looking for an
+        * Heap of non-full runs.  This heap is used when looking for an
         * existing run when runcur is no longer usable.  We choose the
         * non-full run that is lowest in memory; this policy tends to keep
         * objects packed well, and it can also help reduce the number of
         * almost-empty chunks.
         */
-       arena_run_tree_t        runs;
+       arena_run_heap_t        runs;
 
        /* Bin statistics. */
        malloc_bin_stats_t      stats;
@@ -289,10 +290,18 @@ struct arena_s {
        unsigned                ind;
 
        /*
-        * Number of threads currently assigned to this arena.  This field is
-        * synchronized via atomic operations.
+        * Number of threads currently assigned to this arena, synchronized via
+        * atomic operations.  Each thread has two distinct assignments, one for
+        * application-serving allocation, and the other for internal metadata
+        * allocation.  Internal metadata must not be allocated from arenas
+        * created via the arenas.extend mallctl, because the arena.<i>.reset
+        * mallctl indiscriminately discards all allocations for the affected
+        * arena.
+        *
+        *   0: Application allocation.
+        *   1: Internal metadata allocation.
         */
-       unsigned                nthreads;
+       unsigned                nthreads[2];
 
        /*
         * There are three classes of arena operations from a locking
@@ -321,6 +330,10 @@ struct arena_s {
 
        dss_prec_t              dss_prec;
 
+
+       /* Extant arena chunks. */
+       ql_head(extent_node_t)  achunks;
+
        /*
         * In order to avoid rapid chunk allocation/deallocation when an arena
         * oscillates right on the cusp of needing a new chunk, cache the most
@@ -457,10 +470,10 @@ struct arena_s {
        arena_bin_t             bins[NBINS];
 
        /*
-        * Quantized address-ordered trees of this arena's available runs.  The
-        * trees are used for first-best-fit run allocation.
+        * Quantized address-ordered heaps of this arena's available runs.  The
+        * heaps are used for first-best-fit run allocation.
         */
-       arena_run_tree_t        runs_avail[1]; /* Dynamically sized. */
+       arena_run_heap_t        runs_avail[1]; /* Dynamically sized. */
 };
 
 /* Used in conjunction with tsd for fast arena-related context lookup. */
@@ -505,25 +518,28 @@ void      arena_chunk_cache_maybe_insert(aren
     bool cache);
 void   arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
     bool cache);
-extent_node_t  *arena_node_alloc(arena_t *arena);
-void   arena_node_dalloc(arena_t *arena, extent_node_t *node);
-void   *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
-    bool *zero);
-void   arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
-void   arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
-    size_t oldsize, size_t usize);
-void   arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
-    size_t oldsize, size_t usize);
-bool   arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
-    size_t oldsize, size_t usize, bool *zero);
-ssize_t        arena_lg_dirty_mult_get(arena_t *arena);
-bool   arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
-ssize_t        arena_decay_time_get(arena_t *arena);
-bool   arena_decay_time_set(arena_t *arena, ssize_t decay_time);
-void   arena_maybe_purge(arena_t *arena);
-void   arena_purge(arena_t *arena, bool all);
-void   arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
-    szind_t binind, uint64_t prof_accumbytes);
+extent_node_t  *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
+void   arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
+void   *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
+    size_t alignment, bool *zero);
+void   arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
+    size_t usize);
+void   arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
+    void *chunk, size_t oldsize, size_t usize);
+void   arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
+    void *chunk, size_t oldsize, size_t usize);
+bool   arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
+    void *chunk, size_t oldsize, size_t usize, bool *zero);
+ssize_t        arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
+bool   arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
+    ssize_t lg_dirty_mult);
+ssize_t        arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
+bool   arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
+void   arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
+void   arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
+void   arena_reset(tsd_t *tsd, arena_t *arena);
+void   arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
+    tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
 void   arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
     bool zero);
 #ifdef JEMALLOC_JET
@@ -536,17 +552,18 @@ extern arena_dalloc_junk_small_t *arena_
 void   arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
 #endif
 void   arena_quarantine_junk_small(void *ptr, size_t usize);
-void   *arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero);
-void   *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
-    bool zero, tcache_t *tcache);
-void   *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
+void   *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
+    bool zero);
+void   *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
+    szind_t ind, bool zero);
+void   *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
     size_t alignment, bool zero, tcache_t *tcache);
-void   arena_prof_promoted(const void *ptr, size_t size);
-void   arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
-    void *ptr, arena_chunk_map_bits_t *bitselm);
-void   arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t pageind, arena_chunk_map_bits_t *bitselm);
-void   arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
+void   arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
+void   arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
+    arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
+void   arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
+void   arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
     void *ptr, size_t pageind);
 #ifdef JEMALLOC_JET
 typedef void (arena_dalloc_junk_large_t)(void *, size_t);
@@ -554,67 +571,80 @@ extern arena_dalloc_junk_large_t *arena_
 #else
 void   arena_dalloc_junk_large(void *ptr, size_t usize);
 #endif
-void   arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
-    void *ptr);
-void   arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
+void   arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
+    arena_chunk_t *chunk, void *ptr);
+void   arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
     void *ptr);
 #ifdef JEMALLOC_JET
 typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
 extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
 #endif
-bool   arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
-    size_t extra, bool zero);
+bool   arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
+    size_t size, size_t extra, bool zero);
 void   *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
     size_t size, size_t alignment, bool zero, tcache_t *tcache);
-dss_prec_t     arena_dss_prec_get(arena_t *arena);
-bool   arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+dss_prec_t     arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
+bool   arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
 ssize_t        arena_lg_dirty_mult_default_get(void);
 bool   arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
 ssize_t        arena_decay_time_default_get(void);
 bool   arena_decay_time_default_set(ssize_t decay_time);
-void   arena_basic_stats_merge(arena_t *arena, unsigned *nthreads,
+void   arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
+    unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
+    ssize_t *decay_time, size_t *nactive, size_t *ndirty);
+void   arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
     const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
-    size_t *nactive, size_t *ndirty);
-void   arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
-    ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
-    size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
-    malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
-unsigned       arena_nthreads_get(arena_t *arena);
-void   arena_nthreads_inc(arena_t *arena);
-void   arena_nthreads_dec(arena_t *arena);
-arena_t        *arena_new(unsigned ind);
+    size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+    malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+    malloc_huge_stats_t *hstats);
+unsigned       arena_nthreads_get(arena_t *arena, bool internal);
+void   arena_nthreads_inc(arena_t *arena, bool internal);
+void   arena_nthreads_dec(arena_t *arena, bool internal);
+arena_t        *arena_new(tsdn_t *tsdn, unsigned ind);
 bool   arena_boot(void);
-void   arena_prefork(arena_t *arena);
-void   arena_postfork_parent(arena_t *arena);
-void   arena_postfork_child(arena_t *arena);
+void   arena_prefork0(tsdn_t *tsdn, arena_t *arena);
+void   arena_prefork1(tsdn_t *tsdn, arena_t *arena);
+void   arena_prefork2(tsdn_t *tsdn, arena_t *arena);
+void   arena_prefork3(tsdn_t *tsdn, arena_t *arena);
+void   arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
+void   arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
 
 #endif /* JEMALLOC_H_EXTERNS */
 
/******************************************************************************/
 #ifdef JEMALLOC_H_INLINES
 
 #ifndef JEMALLOC_ENABLE_INLINE
-arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
+arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
     size_t pageind);
-arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
+const arena_chunk_map_bits_t   *arena_bitselm_get_const(
+    const arena_chunk_t *chunk, size_t pageind);
+arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
     size_t pageind);
+const arena_chunk_map_misc_t   *arena_miscelm_get_const(
+    const arena_chunk_t *chunk, size_t pageind);
 size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
-void   *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
+void   *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
 arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
 arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
-size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbitsp_read(size_t *mapbitsp);
-size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
+size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
+const size_t   *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
+    size_t pageind);
+size_t arena_mapbitsp_read(const size_t *mapbitsp);
+size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
 size_t arena_mapbits_size_decode(size_t mapbits);
-size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
+size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
     size_t pageind);
-size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
-szind_t        arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
+    size_t pageind);
+size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
+    size_t pageind);
+szind_t        arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t 
pageind);
+size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
+    size_t pageind);
+size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
 void   arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
 size_t arena_mapbits_size_encode(size_t size);
 void   arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
@@ -634,29 +664,31 @@ void      arena_metadata_allocated_sub(arena_
 size_t arena_metadata_allocated_get(arena_t *arena);
 bool   arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
 bool   arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
-bool   arena_prof_accum(arena_t *arena, uint64_t accumbytes);
+bool   arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
 szind_t        arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
 szind_t        arena_bin_index(arena_t *arena, arena_bin_t *bin);
 size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
     const void *ptr);
-prof_tctx_t    *arena_prof_tctx_get(const void *ptr);
-void   arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
-void   arena_prof_tctx_reset(const void *ptr, size_t usize,
+prof_tctx_t    *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
+void   arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+    prof_tctx_t *tctx);
+void   arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
     const void *old_ptr, prof_tctx_t *old_tctx);
-void   arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
-void   arena_decay_tick(tsd_t *tsd, arena_t *arena);
-void   *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
+void   arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
+void   arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
+void   *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
     bool zero, tcache_t *tcache, bool slow_path);
 arena_t        *arena_aalloc(const void *ptr);
-size_t arena_salloc(const void *ptr, bool demote);
-void   arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
-void   arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
+size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
+void   arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
+void   arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+    bool slow_path);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
 #  ifdef JEMALLOC_ARENA_INLINE_A
 JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
-arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
+arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
 {
 
        assert(pageind >= map_bias);
@@ -665,8 +697,15 @@ arena_bitselm_get(arena_chunk_t *chunk, 
        return (&chunk->map_bits[pageind-map_bias]);
 }
 
+JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
+arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+       return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
+}
+
 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
-arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
+arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
 {
 
        assert(pageind >= map_bias);
@@ -676,6 +715,13 @@ arena_miscelm_get(arena_chunk_t *chunk, 
            (uintptr_t)map_misc_offset) + pageind-map_bias);
 }
 
+JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
+arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+       return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
+}
+
 JEMALLOC_ALWAYS_INLINE size_t
 arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
 {
@@ -690,7 +736,7 @@ arena_miscelm_to_pageind(const arena_chu
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
+arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
 {
        arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
        size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -723,24 +769,31 @@ arena_run_to_miscelm(arena_run_t *run)
 }
 
 JEMALLOC_ALWAYS_INLINE size_t *
-arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
 {
 
-       return (&arena_bitselm_get(chunk, pageind)->bits);
+       return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
+}
+
+JEMALLOC_ALWAYS_INLINE const size_t *
+arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+       return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbitsp_read(size_t *mapbitsp)
+arena_mapbitsp_read(const size_t *mapbitsp)
 {
 
        return (*mapbitsp);
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
 {
 
-       return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
+       return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
@@ -760,7 +813,7 @@ arena_mapbits_size_decode(size_t mapbits
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
 {
        size_t mapbits;
 
@@ -770,7 +823,7 @@ arena_mapbits_unallocated_size_get(arena
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
 {
        size_t mapbits;
 
@@ -781,7 +834,7 @@ arena_mapbits_large_size_get(arena_chunk
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
 {
        size_t mapbits;
 
@@ -792,7 +845,7 @@ arena_mapbits_small_runind_get(arena_chu
 }
 
 JEMALLOC_ALWAYS_INLINE szind_t
-arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
 {
        size_t mapbits;
        szind_t binind;
@@ -804,7 +857,7 @@ arena_mapbits_binind_get(arena_chunk_t *
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
 {
        size_t mapbits;
 
@@ -815,7 +868,7 @@ arena_mapbits_dirty_get(arena_chunk_t *c
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
 {
        size_t mapbits;
 
@@ -826,7 +879,7 @@ arena_mapbits_unzeroed_get(arena_chunk_t
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
 {
        size_t mapbits;
 
@@ -837,7 +890,7 @@ arena_mapbits_decommitted_get(arena_chun
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
 {
        size_t mapbits;
 
@@ -846,7 +899,7 @@ arena_mapbits_large_get(arena_chunk_t *c
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
 {
        size_t mapbits;
 
@@ -882,7 +935,7 @@ JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t 
size,
     size_t flags)
 {
-       size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
 
        assert((size & PAGE_MASK) == 0);
        assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@@ -896,7 +949,7 @@ JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
     size_t size)
 {
-       size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
        size_t mapbits = arena_mapbitsp_read(mapbitsp);
 
        assert((size & PAGE_MASK) == 0);
@@ -908,7 +961,7 @@ arena_mapbits_unallocated_size_set(arena
 JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
 {
-       size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
 
        assert((flags & CHUNK_MAP_UNZEROED) == flags);
        arena_mapbitsp_write(mapbitsp, flags);
@@ -918,7 +971,7 @@ JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
     size_t flags)
 {
-       size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
 
        assert((size & PAGE_MASK) == 0);
        assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@@ -933,7 +986,7 @@ JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
     szind_t binind)
 {
-       size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
        size_t mapbits = arena_mapbitsp_read(mapbitsp);
 
        assert(binind <= BININD_INVALID);
@@ -947,7 +1000,7 @@ JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
     szind_t binind, size_t flags)
 {
-       size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
 
        assert(binind < BININD_INVALID);
        assert(pageind - runind >= map_bias);
@@ -1004,7 +1057,7 @@ arena_prof_accum_locked(arena_t *arena, 
 }
 
 JEMALLOC_INLINE bool
-arena_prof_accum(arena_t *arena, uint64_t accumbytes)
+arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
 {
 
        cassert(config_prof);
@@ -1015,9 +1068,9 @@ arena_prof_accum(arena_t *arena, uint64_
        {
                bool ret;
 
-               malloc_mutex_lock(&arena->lock);
+               malloc_mutex_lock(tsdn, &arena->lock);
                ret = arena_prof_accum_impl(arena, accumbytes);
-               malloc_mutex_unlock(&arena->lock);
+               malloc_mutex_unlock(tsdn, &arena->lock);
                return (ret);
        }
 }
@@ -1035,12 +1088,12 @@ arena_ptr_small_binind_get(const void *p
                size_t pageind;
                size_t actual_mapbits;
                size_t rpages_ind;
-               arena_run_t *run;
+               const arena_run_t *run;
                arena_bin_t *bin;
                szind_t run_binind, actual_binind;
                arena_bin_info_t *bin_info;
-               arena_chunk_map_misc_t *miscelm;
-               void *rpages;
+               const arena_chunk_map_misc_t *miscelm;
+               const void *rpages;
 
                assert(binind != BININD_INVALID);
                assert(binind < NBINS);
@@ -1053,7 +1106,7 @@ arena_ptr_small_binind_get(const void *p
                assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
                rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
                    pageind);
-               miscelm = arena_miscelm_get(chunk, rpages_ind);
+               miscelm = arena_miscelm_get_const(chunk, rpages_ind);
                run = &miscelm->run;
                run_binind = run->binind;
                bin = &arena->bins[run_binind];
@@ -1153,7 +1206,7 @@ arena_run_regind(arena_run_t *run, arena
 }
 
 JEMALLOC_INLINE prof_tctx_t *
-arena_prof_tctx_get(const void *ptr)
+arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
 {
        prof_tctx_t *ret;
        arena_chunk_t *chunk;
@@ -1169,18 +1222,19 @@ arena_prof_tctx_get(const void *ptr)
                if (likely((mapbits & CHUNK_MAP_LARGE) == 0))

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to