Author: jasone
Date: Mon Jul  3 23:27:57 2017
New Revision: 320623
URL: https://svnweb.freebsd.org/changeset/base/320623

Log:
  Update jemalloc to 5.0.1.

Modified:
  head/contrib/jemalloc/ChangeLog
  head/contrib/jemalloc/VERSION
  head/contrib/jemalloc/doc/jemalloc.3
  head/contrib/jemalloc/include/jemalloc/internal/arena_externs.h
  head/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h
  head/contrib/jemalloc/include/jemalloc/internal/base_externs.h
  head/contrib/jemalloc/include/jemalloc/internal/ctl.h
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
  head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
  head/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h
  head/contrib/jemalloc/include/jemalloc/internal/tsd.h
  head/contrib/jemalloc/include/jemalloc/jemalloc.h
  head/contrib/jemalloc/src/arena.c
  head/contrib/jemalloc/src/background_thread.c
  head/contrib/jemalloc/src/base.c
  head/contrib/jemalloc/src/ctl.c
  head/contrib/jemalloc/src/extent.c
  head/contrib/jemalloc/src/jemalloc.c
  head/contrib/jemalloc/src/prof.c
  head/contrib/jemalloc/src/tcache.c
  head/contrib/jemalloc/src/tsd.c

Modified: head/contrib/jemalloc/ChangeLog
==============================================================================
--- head/contrib/jemalloc/ChangeLog     Mon Jul  3 22:21:44 2017        
(r320622)
+++ head/contrib/jemalloc/ChangeLog     Mon Jul  3 23:27:57 2017        
(r320623)
@@ -4,6 +4,41 @@ brevity.  Much more detail can be found in the git rev
 
     https://github.com/jemalloc/jemalloc
 
+* 5.0.1 (July 1, 2017)
+
+  This bugfix release fixes several issues, most of which are obscure enough
+  that typical applications are not impacted.
+
+  Bug fixes:
+  - Update decay->nunpurged before purging, in order to avoid potential update
+    races and subsequent incorrect purging volume.  (@interwq)
+  - Only abort on dlsym(3) error if the failure impacts an enabled feature 
(lazy
+    locking and/or background threads).  This mitigates an initialization
+    failure bug for which we still do not have a clear reproduction test case.
+    (@interwq)
+  - Modify tsd management so that it neither crashes nor leaks if a thread's
+    only allocation activity is to call free() after TLS destructors have been
+    executed.  This behavior was observed when operating with GNU libc, and is
+    unlikely to be an issue with other libc implementations.  (@interwq)
+  - Mask signals during background thread creation.  This prevents signals from
+    being inadvertently delivered to background threads.  (@jasone,
+    @davidtgoldblatt, @interwq)
+  - Avoid inactivity checks within background threads, in order to prevent
+    recursive mutex acquisition.  (@interwq)
+  - Fix extent_grow_retained() to use the specified hooks when the
+    arena.<i>.extent_hooks mallctl is used to override the default hooks.
+    (@interwq)
+  - Add missing reentrancy support for custom extent hooks which allocate.
+    (@interwq)
+  - Post-fork(2), re-initialize the list of tcaches associated with each arena
+    to contain no tcaches except the forking thread's.  (@interwq)
+  - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx.  This
+    fixes potential deadlocks after fork(2).  (@interwq)
+  - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to
+    generate corrupt configure scripts.  (@jasone)
+  - Ensure that the configured page size (--with-lg-page) is no larger than the
+    configured huge page size (--with-lg-hugepage).  (@jasone)
+
 * 5.0.0 (June 13, 2017)
 
   Unlike all previous jemalloc releases, this release does not use naturally

Modified: head/contrib/jemalloc/VERSION
==============================================================================
--- head/contrib/jemalloc/VERSION       Mon Jul  3 22:21:44 2017        
(r320622)
+++ head/contrib/jemalloc/VERSION       Mon Jul  3 23:27:57 2017        
(r320623)
@@ -1 +1 @@
-5.0.0-4-g84f6c2cae0fb1399377ef6aea9368444c4987cc6
+5.0.1-0-g896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb

Modified: head/contrib/jemalloc/doc/jemalloc.3
==============================================================================
--- head/contrib/jemalloc/doc/jemalloc.3        Mon Jul  3 22:21:44 2017        
(r320622)
+++ head/contrib/jemalloc/doc/jemalloc.3        Mon Jul  3 23:27:57 2017        
(r320623)
@@ -2,12 +2,12 @@
 .\"     Title: JEMALLOC
 .\"    Author: Jason Evans
 .\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\"      Date: 06/29/2017
+.\"      Date: 07/01/2017
 .\"    Manual: User Manual
-.\"    Source: jemalloc 5.0.0-4-g84f6c2cae0fb1399377ef6aea9368444c4987cc6
+.\"    Source: jemalloc 5.0.1-0-g896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb
 .\"  Language: English
 .\"
-.TH "JEMALLOC" "3" "06/29/2017" "jemalloc 5.0.0-4-g84f6c2cae0fb" "User Manual"
+.TH "JEMALLOC" "3" "07/01/2017" "jemalloc 5.0.1-0-g896ed3a8b3f4" "User Manual"
 .\" -----------------------------------------------------------------
 .\" * Define some portability stuff
 .\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
 jemalloc \- general purpose memory allocation functions
 .SH "LIBRARY"
 .PP
-This manual describes jemalloc 
5\&.0\&.0\-4\-g84f6c2cae0fb1399377ef6aea9368444c4987cc6\&. More information can 
be found at the
+This manual describes jemalloc 
5\&.0\&.1\-0\-g896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb\&. More information can 
be found at the
 \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
 .PP
 The following configuration options are enabled in libc\*(Aqs built\-in 
jemalloc:

Modified: head/contrib/jemalloc/include/jemalloc/internal/arena_externs.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/arena_externs.h     Mon Jul 
 3 22:21:44 2017        (r320622)
+++ head/contrib/jemalloc/include/jemalloc/internal/arena_externs.h     Mon Jul 
 3 23:27:57 2017        (r320623)
@@ -90,6 +90,7 @@ void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
 void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
 void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
 void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
 void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
 void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
 

Modified: 
head/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h 
Mon Jul  3 22:21:44 2017        (r320622)
+++ head/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h 
Mon Jul  3 23:27:57 2017        (r320623)
@@ -41,8 +41,9 @@ background_thread_indefinite_sleep(background_thread_i
 }
 
 JEMALLOC_ALWAYS_INLINE void
-arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena) {
-       if (!background_thread_enabled()) {
+arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
+    bool is_background_thread) {
+       if (!background_thread_enabled() || is_background_thread) {
                return;
        }
        background_thread_info_t *info =

Modified: head/contrib/jemalloc/include/jemalloc/internal/base_externs.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/base_externs.h      Mon Jul 
 3 22:21:44 2017        (r320622)
+++ head/contrib/jemalloc/include/jemalloc/internal/base_externs.h      Mon Jul 
 3 23:27:57 2017        (r320623)
@@ -3,7 +3,7 @@
 
 base_t *b0get(void);
 base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-void base_delete(base_t *base);
+void base_delete(tsdn_t *tsdn, base_t *base);
 extent_hooks_t *base_extent_hooks_get(base_t *base);
 extent_hooks_t *base_extent_hooks_set(base_t *base,
     extent_hooks_t *extent_hooks);

Modified: head/contrib/jemalloc/include/jemalloc/internal/ctl.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/ctl.h       Mon Jul  3 
22:21:44 2017        (r320622)
+++ head/contrib/jemalloc/include/jemalloc/internal/ctl.h       Mon Jul  3 
23:27:57 2017        (r320623)
@@ -91,8 +91,7 @@ typedef struct ctl_arenas_s {
 
 int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
     void *newp, size_t newlen);
-int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
-    size_t *miblenp);
+int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
 
 int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
     size_t *oldlenp, void *newp, size_t newlen);

Modified: 
head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h   
Mon Jul  3 22:21:44 2017        (r320622)
+++ head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h   
Mon Jul  3 23:27:57 2017        (r320623)
@@ -25,6 +25,7 @@
 #    include <sys/uio.h>
 #  endif
 #  include <pthread.h>
+#  include <signal.h>
 #  ifdef JEMALLOC_OS_UNFAIR_LOCK
 #    include <os/lock.h>
 #  endif

Modified: 
head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h    
Mon Jul  3 22:21:44 2017        (r320622)
+++ head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h    
Mon Jul  3 23:27:57 2017        (r320623)
@@ -99,6 +99,9 @@
 /* Defined if pthread_atfork(3) is available. */
 #define JEMALLOC_HAVE_PTHREAD_ATFORK 
 
+/* Defined if pthread_setname_np(3) is available. */
+/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
+
 /*
  * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
  */

Modified: 
head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
==============================================================================
--- 
head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h   
    Mon Jul  3 22:21:44 2017        (r320622)
+++ 
head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h   
    Mon Jul  3 23:27:57 2017        (r320623)
@@ -146,7 +146,10 @@ tcache_get(tsd_t *tsd) {
 }
 
 static inline void
-pre_reentrancy(tsd_t *tsd) {
+pre_reentrancy(tsd_t *tsd, arena_t *arena) {
+       /* arena is the current context.  Reentry from a0 is not allowed. */
+       assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
+
        bool fast = tsd_fast(tsd);
        ++*tsd_reentrancy_levelp_get(tsd);
        if (fast) {

Modified: head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h Mon Jul 
 3 22:21:44 2017        (r320622)
+++ head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h Mon Jul 
 3 23:27:57 2017        (r320623)
@@ -69,6 +69,7 @@
 #define arena_prefork4 JEMALLOC_N(arena_prefork4)
 #define arena_prefork5 JEMALLOC_N(arena_prefork5)
 #define arena_prefork6 JEMALLOC_N(arena_prefork6)
+#define arena_prefork7 JEMALLOC_N(arena_prefork7)
 #define arena_prof_promote JEMALLOC_N(arena_prof_promote)
 #define arena_ralloc JEMALLOC_N(arena_ralloc)
 #define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)

Modified: head/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h    Mon Jul 
 3 22:21:44 2017        (r320622)
+++ head/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h    Mon Jul 
 3 23:27:57 2017        (r320623)
@@ -48,7 +48,7 @@ void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tc
 void tcache_prefork(tsdn_t *tsdn);
 void tcache_postfork_parent(tsdn_t *tsdn);
 void tcache_postfork_child(tsdn_t *tsdn);
-void tcache_flush(void);
+void tcache_flush(tsd_t *tsd);
 bool tsd_tcache_data_init(tsd_t *tsd);
 bool tsd_tcache_enabled_data_init(tsd_t *tsd);
 

Modified: head/contrib/jemalloc/include/jemalloc/internal/tsd.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/tsd.h       Mon Jul  3 
22:21:44 2017        (r320622)
+++ head/contrib/jemalloc/include/jemalloc/internal/tsd.h       Mon Jul  3 
23:27:57 2017        (r320623)
@@ -99,9 +99,10 @@ enum {
        tsd_state_nominal_slow = 1, /* Initialized but on slow path. */
        /* the above 2 nominal states should be lower values. */
        tsd_state_nominal_max = 1, /* used for comparison only. */
-       tsd_state_purgatory = 2,
-       tsd_state_reincarnated = 3,
-       tsd_state_uninitialized = 4
+       tsd_state_minimal_initialized = 2,
+       tsd_state_purgatory = 3,
+       tsd_state_reincarnated = 4,
+       tsd_state_uninitialized = 5
 };
 
 /* Manually limit tsd_state_t to a single byte. */
@@ -190,7 +191,8 @@ JEMALLOC_ALWAYS_INLINE t *                                  
        \
 tsd_##n##p_get(tsd_t *tsd) {                                           \
        assert(tsd->state == tsd_state_nominal ||                       \
            tsd->state == tsd_state_nominal_slow ||                     \
-           tsd->state == tsd_state_reincarnated);                      \
+           tsd->state == tsd_state_reincarnated ||                     \
+           tsd->state == tsd_state_minimal_initialized);               \
        return tsd_##n##p_get_unsafe(tsd);                              \
 }
 MALLOC_TSD
@@ -225,7 +227,8 @@ MALLOC_TSD
 #define O(n, t, nt)                                                    \
 JEMALLOC_ALWAYS_INLINE void                                            \
 tsd_##n##_set(tsd_t *tsd, t val) {                                     \
-       assert(tsd->state != tsd_state_reincarnated);                   \
+       assert(tsd->state != tsd_state_reincarnated &&                  \
+           tsd->state != tsd_state_minimal_initialized);               \
        *tsd_##n##p_get(tsd) = val;                                     \
 }
 MALLOC_TSD
@@ -248,7 +251,7 @@ tsd_fast(tsd_t *tsd) {
 }
 
 JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_impl(bool init, bool internal) {
+tsd_fetch_impl(bool init, bool minimal) {
        tsd_t *tsd = tsd_get(init);
 
        if (!init && tsd_get_allocates() && tsd == NULL) {
@@ -257,7 +260,7 @@ tsd_fetch_impl(bool init, bool internal) {
        assert(tsd != NULL);
 
        if (unlikely(tsd->state != tsd_state_nominal)) {
-               return tsd_fetch_slow(tsd, internal);
+               return tsd_fetch_slow(tsd, minimal);
        }
        assert(tsd_fast(tsd));
        tsd_assert_fast(tsd);
@@ -265,9 +268,20 @@ tsd_fetch_impl(bool init, bool internal) {
        return tsd;
 }
 
+/* Get a minimal TSD that requires no cleanup.  See comments in free(). */
 JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_internal_fetch(void) {
+tsd_fetch_min(void) {
        return tsd_fetch_impl(true, true);
+}
+
+/* For internal background threads use only. */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_internal_fetch(void) {
+       tsd_t *tsd = tsd_fetch_min();
+       /* Use reincarnated state to prevent full initialization. */
+       tsd->state = tsd_state_reincarnated;
+
+       return tsd;
 }
 
 JEMALLOC_ALWAYS_INLINE tsd_t *

Modified: head/contrib/jemalloc/include/jemalloc/jemalloc.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/jemalloc.h   Mon Jul  3 22:21:44 
2017        (r320622)
+++ head/contrib/jemalloc/include/jemalloc/jemalloc.h   Mon Jul  3 23:27:57 
2017        (r320623)
@@ -87,12 +87,12 @@ extern "C" {
 #include <limits.h>
 #include <strings.h>
 
-#define JEMALLOC_VERSION "5.0.0-4-g84f6c2cae0fb1399377ef6aea9368444c4987cc6"
+#define JEMALLOC_VERSION "5.0.1-0-g896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb"
 #define JEMALLOC_VERSION_MAJOR 5
 #define JEMALLOC_VERSION_MINOR 0
-#define JEMALLOC_VERSION_BUGFIX 0
-#define JEMALLOC_VERSION_NREV 4
-#define JEMALLOC_VERSION_GID "84f6c2cae0fb1399377ef6aea9368444c4987cc6"
+#define JEMALLOC_VERSION_BUGFIX 1
+#define JEMALLOC_VERSION_NREV 0
+#define JEMALLOC_VERSION_GID "896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb"
 
 #define MALLOCX_LG_ALIGN(la)   ((int)(la))
 #if LG_SIZEOF_PTR == 2

Modified: head/contrib/jemalloc/src/arena.c
==============================================================================
--- head/contrib/jemalloc/src/arena.c   Mon Jul  3 22:21:44 2017        
(r320622)
+++ head/contrib/jemalloc/src/arena.c   Mon Jul  3 23:27:57 2017        
(r320623)
@@ -61,7 +61,8 @@ const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
  */
 
 static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
-    arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit);
+    arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
+    bool is_background_thread);
 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
     bool is_background_thread, bool all);
 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
@@ -378,7 +379,7 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *aren
        if (arena_dirty_decay_ms_get(arena) == 0) {
                arena_decay_dirty(tsdn, arena, false, true);
        } else {
-               arena_background_thread_inactivity_check(tsdn, arena);
+               arena_background_thread_inactivity_check(tsdn, arena, false);
        }
 }
 
@@ -687,10 +688,11 @@ arena_decay_backlog_update(arena_decay_t *decay, uint6
 
 static void
 arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
-    extents_t *extents, size_t current_npages, size_t npages_limit) {
+    extents_t *extents, size_t current_npages, size_t npages_limit,
+    bool is_background_thread) {
        if (current_npages > npages_limit) {
                arena_decay_to_limit(tsdn, arena, decay, extents, false,
-                   npages_limit);
+                   npages_limit, is_background_thread);
        }
 }
 
@@ -720,7 +722,7 @@ arena_decay_epoch_advance_helper(arena_decay_t *decay,
 
 static void
 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
-    extents_t *extents, const nstime_t *time, bool purge) {
+    extents_t *extents, const nstime_t *time, bool is_background_thread) {
        size_t current_npages = extents_npages_get(extents);
        arena_decay_epoch_advance_helper(decay, time, current_npages);
 
@@ -728,9 +730,10 @@ arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena
        /* We may unlock decay->mtx when try_purge(). Finish logging first. */
        decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
            current_npages;
-       if (purge) {
+
+       if (!background_thread_enabled() || is_background_thread) {
                arena_decay_try_purge(tsdn, arena, decay, extents,
-                   current_npages, npages_limit);
+                   current_npages, npages_limit, is_background_thread);
        }
 }
 
@@ -795,7 +798,7 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_
        if (decay_ms <= 0) {
                if (decay_ms == 0) {
                        arena_decay_to_limit(tsdn, arena, decay, extents, false,
-                           0);
+                           0, is_background_thread);
                }
                return false;
        }
@@ -830,14 +833,13 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_
         */
        bool advance_epoch = arena_decay_deadline_reached(decay, &time);
        if (advance_epoch) {
-               bool should_purge = is_background_thread ||
-                   !background_thread_enabled();
                arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
-                   should_purge);
+                   is_background_thread);
        } else if (is_background_thread) {
                arena_decay_try_purge(tsdn, arena, decay, extents,
                    extents_npages_get(extents),
-                   arena_decay_backlog_npages_limit(decay));
+                   arena_decay_backlog_npages_limit(decay),
+                   is_background_thread);
        }
 
        return advance_epoch;
@@ -916,7 +918,7 @@ arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
 static size_t
 arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
     extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
-    bool all, extent_list_t *decay_extents) {
+    bool all, extent_list_t *decay_extents, bool is_background_thread) {
        UNUSED size_t nmadvise, nunmapped;
        size_t npurged;
 
@@ -946,7 +948,7 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
                                extents_dalloc(tsdn, arena, r_extent_hooks,
                                    &arena->extents_muzzy, extent);
                                arena_background_thread_inactivity_check(tsdn,
-                                   arena);
+                                   arena, is_background_thread);
                                break;
                        }
                        /* Fall through. */
@@ -985,7 +987,8 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
  */
 static void
 arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
-    extents_t *extents, bool all, size_t npages_limit) {
+    extents_t *extents, bool all, size_t npages_limit,
+    bool is_background_thread) {
        witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
            WITNESS_RANK_CORE, 1);
        malloc_mutex_assert_owner(tsdn, &decay->mtx);
@@ -1005,7 +1008,8 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, are
            npages_limit, &decay_extents);
        if (npurge != 0) {
                UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
-                   &extent_hooks, decay, extents, all, &decay_extents);
+                   &extent_hooks, decay, extents, all, &decay_extents,
+                   is_background_thread);
                assert(npurged == npurge);
        }
 
@@ -1018,7 +1022,8 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_d
     extents_t *extents, bool is_background_thread, bool all) {
        if (all) {
                malloc_mutex_lock(tsdn, &decay->mtx);
-               arena_decay_to_limit(tsdn, arena, decay, extents, all, 0);
+               arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
+                   is_background_thread);
                malloc_mutex_unlock(tsdn, &decay->mtx);
 
                return false;
@@ -1252,7 +1257,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
         * Destroy the base allocator, which manages all metadata ever mapped by
         * this arena.
         */
-       base_delete(arena->base);
+       base_delete(tsd_tsdn(tsd), arena->base);
 }
 
 static extent_t *
@@ -2046,7 +2051,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *
                 * is done enough that we should have tsd.
                 */
                assert(!tsdn_null(tsdn));
-               pre_reentrancy(tsdn_tsd(tsdn));
+               pre_reentrancy(tsdn_tsd(tsdn), arena);
                if (hooks_arena_new_hook) {
                        hooks_arena_new_hook();
                }
@@ -2056,7 +2061,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *
        return arena;
 label_error:
        if (ind != 0) {
-               base_delete(base);
+               base_delete(tsdn, base);
        }
        return NULL;
 }
@@ -2082,28 +2087,33 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
 
 void
 arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
+       malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
+}
+
+void
+arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
        extents_prefork(tsdn, &arena->extents_dirty);
        extents_prefork(tsdn, &arena->extents_muzzy);
        extents_prefork(tsdn, &arena->extents_retained);
 }
 
 void
-arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
+arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
        malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
 }
 
 void
-arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
+arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
        base_prefork(tsdn, arena->base);
 }
 
 void
-arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
+arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
        malloc_mutex_prefork(tsdn, &arena->large_mtx);
 }
 
 void
-arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
+arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
        for (unsigned i = 0; i < NBINS; i++) {
                malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
        }
@@ -2122,6 +2132,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
        extents_postfork_parent(tsdn, &arena->extents_dirty);
        extents_postfork_parent(tsdn, &arena->extents_muzzy);
        extents_postfork_parent(tsdn, &arena->extents_retained);
+       malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
        malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
        malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
        if (config_stats) {
@@ -2133,6 +2144,23 @@ void
 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
        unsigned i;
 
+       atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
+       atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
+       if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
+               arena_nthreads_inc(arena, false);
+       }
+       if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
+               arena_nthreads_inc(arena, true);
+       }
+       if (config_stats) {
+               ql_new(&arena->tcache_ql);
+               tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
+               if (tcache != NULL && tcache->arena == arena) {
+                       ql_elm_new(tcache, link);
+                       ql_tail_insert(&arena->tcache_ql, tcache, link);
+               }
+       }
+
        for (i = 0; i < NBINS; i++) {
                malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
        }
@@ -2142,6 +2170,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
        extents_postfork_child(tsdn, &arena->extents_dirty);
        extents_postfork_child(tsdn, &arena->extents_muzzy);
        extents_postfork_child(tsdn, &arena->extents_retained);
+       malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
        malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
        malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
        if (config_stats) {

Modified: head/contrib/jemalloc/src/background_thread.c
==============================================================================
--- head/contrib/jemalloc/src/background_thread.c       Mon Jul  3 22:21:44 
2017        (r320622)
+++ head/contrib/jemalloc/src/background_thread.c       Mon Jul  3 23:27:57 
2017        (r320623)
@@ -316,7 +316,7 @@ background_threads_disable_single(tsd_t *tsd, backgrou
                    &background_thread_lock);
        }
 
-       pre_reentrancy(tsd);
+       pre_reentrancy(tsd, NULL);
        malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
        bool has_thread;
        assert(info->state != background_thread_paused);
@@ -347,6 +347,38 @@ background_threads_disable_single(tsd_t *tsd, backgrou
 
 static void *background_thread_entry(void *ind_arg);
 
+static int
+background_thread_create_signals_masked(pthread_t *thread,
+    const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
+       /*
+        * Mask signals during thread creation so that the thread inherits
+        * an empty signal set.
+        */
+       sigset_t set;
+       sigfillset(&set);
+       sigset_t oldset;
+       int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
+       if (mask_err != 0) {
+               return mask_err;
+       }
+       int create_err = pthread_create_wrapper(thread, attr, start_routine,
+           arg);
+       /*
+        * Restore the signal mask.  Failure to restore the signal mask here
+        * changes program behavior.
+        */
+       int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+       if (restore_err != 0) {
+               malloc_printf("<jemalloc>: background thread creation "
+                   "failed (%d), and signal mask restoration failed "
+                   "(%d)\n", create_err, restore_err);
+               if (opt_abort) {
+                       abort();
+               }
+       }
+       return create_err;
+}
+
 static void
 check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
     bool *created_threads) {
@@ -376,9 +408,9 @@ label_restart:
                 */
                malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
 
-               pre_reentrancy(tsd);
-               int err = pthread_create_wrapper(&info->thread, NULL,
-                   background_thread_entry, (void *)(uintptr_t)i);
+               pre_reentrancy(tsd, NULL);
+               int err = background_thread_create_signals_masked(&info->thread,
+                   NULL, background_thread_entry, (void *)(uintptr_t)i);
                post_reentrancy(tsd);
 
                if (err == 0) {
@@ -467,7 +499,9 @@ static void *
 background_thread_entry(void *ind_arg) {
        unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
        assert(thread_ind < ncpus);
-
+#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
+       pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
+#endif
        if (opt_percpu_arena != percpu_arena_disabled) {
                set_current_thread_affinity((int)thread_ind);
        }
@@ -523,12 +557,12 @@ background_thread_create(tsd_t *tsd, unsigned arena_in
                return false;
        }
 
-       pre_reentrancy(tsd);
+       pre_reentrancy(tsd, NULL);
        /*
         * To avoid complications (besides reentrancy), create internal
         * background threads with the underlying pthread_create.
         */
-       int err = pthread_create_wrapper(&info->thread, NULL,
+       int err = background_thread_create_signals_masked(&info->thread, NULL,
            background_thread_entry, (void *)thread_ind);
        post_reentrancy(tsd);
 

Modified: head/contrib/jemalloc/src/base.c
==============================================================================
--- head/contrib/jemalloc/src/base.c    Mon Jul  3 22:21:44 2017        
(r320622)
+++ head/contrib/jemalloc/src/base.c    Mon Jul  3 23:27:57 2017        
(r320623)
@@ -15,7 +15,7 @@ static base_t *b0;
 
/******************************************************************************/
 
 static void *
-base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
+base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t 
size) {
        void *addr;
        bool zero = true;
        bool commit = true;
@@ -25,15 +25,19 @@ base_map(extent_hooks_t *extent_hooks, unsigned ind, s
        if (extent_hooks == &extent_hooks_default) {
                addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
        } else {
+               /* No arena context as we are creating new arenas. */
+               tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+               pre_reentrancy(tsd, NULL);
                addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
                    &zero, &commit, ind);
+               post_reentrancy(tsd);
        }
 
        return addr;
 }
 
 static void
-base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr,
+base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void 
*addr,
     size_t size) {
        /*
         * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
@@ -61,27 +65,32 @@ base_unmap(extent_hooks_t *extent_hooks, unsigned ind,
                /* Nothing worked.  This should never happen. */
                not_reached();
        } else {
+               tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+               pre_reentrancy(tsd, NULL);
                if (extent_hooks->dalloc != NULL &&
                    !extent_hooks->dalloc(extent_hooks, addr, size, true,
                    ind)) {
-                       return;
+                       goto label_done;
                }
                if (extent_hooks->decommit != NULL &&
                    !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
                    ind)) {
-                       return;
+                       goto label_done;
                }
                if (extent_hooks->purge_forced != NULL &&
                    !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
                    size, ind)) {
-                       return;
+                       goto label_done;
                }
                if (extent_hooks->purge_lazy != NULL &&
                    !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
                    ind)) {
-                       return;
+                       goto label_done;
                }
                /* Nothing worked.  That's the application's problem. */
+       label_done:
+               post_reentrancy(tsd);
+               return;
        }
 }
 
@@ -157,7 +166,7 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, ext
  * On success a pointer to the initialized base_block_t header is returned.
  */
 static base_block_t *
-base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
+base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind,
     pszind_t *pind_last, size_t *extent_sn_next, size_t size,
     size_t alignment) {
        alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
@@ -179,7 +188,7 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigne
        size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
        size_t block_size = (min_block_size > next_block_size) ? min_block_size
            : next_block_size;
-       base_block_t *block = (base_block_t *)base_map(extent_hooks, ind,
+       base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
            block_size);
        if (block == NULL) {
                return NULL;
@@ -207,8 +216,9 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t s
         * called.
         */
        malloc_mutex_unlock(tsdn, &base->mtx);
-       base_block_t *block = base_block_alloc(extent_hooks, base_ind_get(base),
-           &base->pind_last, &base->extent_sn_next, size, alignment);
+       base_block_t *block = base_block_alloc(tsdn, extent_hooks,
+           base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
+           alignment);
        malloc_mutex_lock(tsdn, &base->mtx);
        if (block == NULL) {
                return NULL;
@@ -234,8 +244,8 @@ base_t *
 base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
        pszind_t pind_last = 0;
        size_t extent_sn_next = 0;
-       base_block_t *block = base_block_alloc(extent_hooks, ind, &pind_last,
-           &extent_sn_next, sizeof(base_t), QUANTUM);
+       base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind,
+           &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
        if (block == NULL) {
                return NULL;
        }
@@ -249,7 +259,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *e
        atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
        if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
            malloc_mutex_rank_exclusive)) {
-               base_unmap(extent_hooks, ind, block, block->size);
+               base_unmap(tsdn, extent_hooks, ind, block, block->size);
                return NULL;
        }
        base->pind_last = pind_last;
@@ -272,13 +282,13 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *e
 }
 
 void
-base_delete(base_t *base) {
+base_delete(tsdn_t *tsdn, base_t *base) {
        extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
        base_block_t *next = base->blocks;
        do {
                base_block_t *block = next;
                next = block->next;
-               base_unmap(extent_hooks, base_ind_get(base), block,
+               base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
                    block->size);
        } while (next != NULL);
 }

Modified: head/contrib/jemalloc/src/ctl.c
==============================================================================
--- head/contrib/jemalloc/src/ctl.c     Mon Jul  3 22:21:44 2017        
(r320622)
+++ head/contrib/jemalloc/src/ctl.c     Mon Jul  3 23:27:57 2017        
(r320623)
@@ -622,7 +622,7 @@ arenas_i2a(size_t i) {
 }
 
 static ctl_arena_t *
-arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) {
+arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
        ctl_arena_t *ret;
 
        assert(!compat || !init);
@@ -635,15 +635,15 @@ arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, boo
                                ctl_arena_stats_t       astats;
                        };
                        struct container_s *cont =
-                           (struct container_s *)base_alloc(tsdn, b0get(),
-                           sizeof(struct container_s), QUANTUM);
+                           (struct container_s *)base_alloc(tsd_tsdn(tsd),
+                           b0get(), sizeof(struct container_s), QUANTUM);
                        if (cont == NULL) {
                                return NULL;
                        }
                        ret = &cont->ctl_arena;
                        ret->astats = &cont->astats;
                } else {
-                       ret = (ctl_arena_t *)base_alloc(tsdn, b0get(),
+                       ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
                            sizeof(ctl_arena_t), QUANTUM);
                        if (ret == NULL) {
                                return NULL;
@@ -659,7 +659,7 @@ arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, boo
 
 static ctl_arena_t *
 arenas_i(size_t i) {
-       ctl_arena_t *ret = arenas_i_impl(TSDN_NULL, i, true, false);
+       ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
        assert(ret != NULL);
        return ret;
 }
@@ -863,7 +863,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_ar
 }
 
 static unsigned
-ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks) {
+ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
        unsigned arena_ind;
        ctl_arena_t *ctl_arena;
 
@@ -876,12 +876,12 @@ ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_ho
        }
 
        /* Trigger stats allocation. */
-       if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL) {
+       if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
                return UINT_MAX;
        }
 
        /* Initialize new arena. */
-       if (arena_init(tsdn, arena_ind, extent_hooks) == NULL) {
+       if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
                return UINT_MAX;
        }
 
@@ -975,8 +975,9 @@ ctl_refresh(tsdn_t *tsdn) {
 }
 
 static bool
-ctl_init(tsdn_t *tsdn) {
+ctl_init(tsd_t *tsd) {
        bool ret;
+       tsdn_t *tsdn = tsd_tsdn(tsd);
 
        malloc_mutex_lock(tsdn, &ctl_mtx);
        if (!ctl_initialized) {
@@ -1010,14 +1011,14 @@ ctl_init(tsdn_t *tsdn) {
                 * here rather than doing it lazily elsewhere, in order
                 * to limit when OOM-caused errors can occur.
                 */
-               if ((ctl_sarena = arenas_i_impl(tsdn, MALLCTL_ARENAS_ALL, false,
+               if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
                    true)) == NULL) {
                        ret = true;
                        goto label_return;
                }
                ctl_sarena->initialized = true;
 
-               if ((ctl_darena = arenas_i_impl(tsdn, MALLCTL_ARENAS_DESTROYED,
+               if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
                    false, true)) == NULL) {
                        ret = true;
                        goto label_return;
@@ -1031,7 +1032,7 @@ ctl_init(tsdn_t *tsdn) {
 
                ctl_arenas->narenas = narenas_total_get();
                for (i = 0; i < ctl_arenas->narenas; i++) {
-                       if (arenas_i_impl(tsdn, i, false, true) == NULL) {
+                       if (arenas_i_impl(tsd, i, false, true) == NULL) {
                                ret = true;
                                goto label_return;
                        }
@@ -1156,7 +1157,7 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, s
        size_t mib[CTL_MAX_DEPTH];
        const ctl_named_node_t *node;
 
-       if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
+       if (!ctl_initialized && ctl_init(tsd)) {
                ret = EAGAIN;
                goto label_return;
        }
@@ -1180,15 +1181,15 @@ label_return:
 }
 
 int
-ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) {
+ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
        int ret;
 
-       if (!ctl_initialized && ctl_init(tsdn)) {
+       if (!ctl_initialized && ctl_init(tsd)) {
                ret = EAGAIN;
                goto label_return;
        }
 
-       ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
+       ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
 label_return:
        return(ret);
 }
@@ -1200,7 +1201,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen
        const ctl_named_node_t *node;
        size_t i;
 
-       if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
+       if (!ctl_initialized && ctl_init(tsd)) {
                ret = EAGAIN;
                goto label_return;
        }
@@ -1696,7 +1697,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
        READONLY();
        WRITEONLY();
 
-       tcache_flush();
+       tcache_flush(tsd);
 
        ret = 0;
 label_return:
@@ -1970,7 +1971,7 @@ arena_reset_finish_background_thread(tsd_t *tsd, unsig
                        unsigned ind = arena_ind % ncpus;
                        background_thread_info_t *info =
                            &background_thread_info[ind];
-                       assert(info->state = background_thread_paused);
+                       assert(info->state == background_thread_paused);
                        malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
                        info->state = background_thread_started;
                        malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
@@ -2312,8 +2313,7 @@ arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_
 
        extent_hooks = (extent_hooks_t *)&extent_hooks_default;
        WRITE(extent_hooks, extent_hooks_t *);
-       if ((arena_ind = ctl_arena_init(tsd_tsdn(tsd), extent_hooks)) ==
-           UINT_MAX) {
+       if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
                ret = EAGAIN;
                goto label_return;
        }

Modified: head/contrib/jemalloc/src/extent.c
==============================================================================
--- head/contrib/jemalloc/src/extent.c  Mon Jul  3 22:21:44 2017        
(r320622)
+++ head/contrib/jemalloc/src/extent.c  Mon Jul  3 23:27:57 2017        
(r320623)
@@ -1025,6 +1025,18 @@ extent_alloc_default(extent_hooks_t *extent_hooks, voi
            alignment, zero, commit);
 }
 
+static void
+extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
+       tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+       pre_reentrancy(tsd, arena);
+}
+
+static void
+extent_hook_post_reentrancy(tsdn_t *tsdn) {
+       tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+       post_reentrancy(tsd);
+}
+
 /*
  * If virtual memory is retained, create increasingly larger extents from which
  * to split requested extents in order to limit the total number of disjoint
@@ -1073,9 +1085,11 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
                    &zeroed, &committed, (dss_prec_t)atomic_load_u(
                    &arena->dss_prec, ATOMIC_RELAXED));
        } else {
+               extent_hook_pre_reentrancy(tsdn, arena);
                ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
                    alloc_size, PAGE, &zeroed, &committed,
                    arena_ind_get(arena));
+               extent_hook_post_reentrancy(tsdn);
        }
 
        extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
@@ -1247,8 +1261,10 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena
                addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
                    alignment, zero, commit);
        } else {
+               extent_hook_pre_reentrancy(tsdn, arena);
                addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
                    esize, alignment, zero, commit, arena_ind_get(arena));
+               extent_hook_post_reentrancy(tsdn);
        }
        if (addr == NULL) {
                extent_dalloc(tsdn, arena, extent);
@@ -1486,10 +1502,12 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena
                err = extent_dalloc_default_impl(extent_base_get(extent),
                    extent_size_get(extent));
        } else {
+               extent_hook_pre_reentrancy(tsdn, arena);
                err = ((*r_extent_hooks)->dalloc == NULL ||
                    (*r_extent_hooks)->dalloc(*r_extent_hooks,
                    extent_base_get(extent), extent_size_get(extent),
                    extent_committed_get(extent), arena_ind_get(arena)));
+               extent_hook_post_reentrancy(tsdn);
        }
 
        if (!err) {
@@ -1515,6 +1533,9 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
        }
 
        extent_reregister(tsdn, extent);
+       if (*r_extent_hooks != &extent_hooks_default) {
+               extent_hook_pre_reentrancy(tsdn, arena);
+       }
        /* Try to decommit; purge if that fails. */
        bool zeroed;
        if (!extent_committed_get(extent)) {
@@ -1536,6 +1557,9 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
        } else {
                zeroed = false;
        }
+       if (*r_extent_hooks != &extent_hooks_default) {
+               extent_hook_post_reentrancy(tsdn);
+       }
        extent_zeroed_set(extent, zeroed);
 
        if (config_prof) {
@@ -1579,9 +1603,11 @@ extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
                extent_destroy_default_impl(extent_base_get(extent),
                    extent_size_get(extent));
        } else if ((*r_extent_hooks)->destroy != NULL) {
+               extent_hook_pre_reentrancy(tsdn, arena);
                (*r_extent_hooks)->destroy(*r_extent_hooks,
                    extent_base_get(extent), extent_size_get(extent),
                    extent_committed_get(extent), arena_ind_get(arena));
+               extent_hook_post_reentrancy(tsdn);
        }
 
        extent_dalloc(tsdn, arena, extent);
@@ -1602,9 +1628,15 @@ extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
            WITNESS_RANK_CORE, growing_retained ? 1 : 0);
 
        extent_hooks_assure_initialized(arena, r_extent_hooks);
+       if (*r_extent_hooks != &extent_hooks_default) {
+               extent_hook_pre_reentrancy(tsdn, arena);
+       }
        bool err = ((*r_extent_hooks)->commit == NULL ||
            (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
            extent_size_get(extent), offset, length, arena_ind_get(arena)));

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to