Author: jasone
Date: Thu Jun  9 06:10:20 2016
New Revision: 301718
URL: https://svnweb.freebsd.org/changeset/base/301718

Log:
  Update jemalloc to 4.2.1.

Modified:
  head/contrib/jemalloc/ChangeLog
  head/contrib/jemalloc/FREEBSD-diffs
  head/contrib/jemalloc/VERSION
  head/contrib/jemalloc/doc/jemalloc.3
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
  head/contrib/jemalloc/include/jemalloc/internal/prof.h
  head/contrib/jemalloc/include/jemalloc/jemalloc.h
  head/contrib/jemalloc/src/arena.c
  head/contrib/jemalloc/src/chunk.c
  head/contrib/jemalloc/src/chunk_mmap.c
  head/contrib/jemalloc/src/huge.c
  head/contrib/jemalloc/src/jemalloc.c
  head/contrib/jemalloc/src/nstime.c

Modified: head/contrib/jemalloc/ChangeLog
==============================================================================
--- head/contrib/jemalloc/ChangeLog     Thu Jun  9 05:48:34 2016        
(r301717)
+++ head/contrib/jemalloc/ChangeLog     Thu Jun  9 06:10:20 2016        
(r301718)
@@ -4,6 +4,17 @@ brevity.  Much more detail can be found 
 
     https://github.com/jemalloc/jemalloc
 
+* 4.2.1 (June 8, 2016)
+
+  Bug fixes:
+  - Fix bootstrapping issues for configurations that require allocation during
+    tsd initialization (e.g. --disable-tls).  (@cferris1000, @jasone)
+  - Fix gettimeofday() version of nstime_update().  (@ronawho)
+  - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper().  (@ronawho)
+  - Fix potential VM map fragmentation regression.  (@jasone)
+  - Fix opt_zero-triggered in-place huge reallocation zeroing.  (@jasone)
+  - Fix heap profiling context leaks in reallocation edge cases.  (@jasone)
+
 * 4.2.0 (May 12, 2016)
 
   New features:

Modified: head/contrib/jemalloc/FREEBSD-diffs
==============================================================================
--- head/contrib/jemalloc/FREEBSD-diffs Thu Jun  9 05:48:34 2016        
(r301717)
+++ head/contrib/jemalloc/FREEBSD-diffs Thu Jun  9 06:10:20 2016        
(r301718)
@@ -79,7 +79,7 @@ index b1de2b6..da6b6d2 100644
  
  JEMALLOC_ALWAYS_INLINE size_t
 diff --git a/include/jemalloc/internal/jemalloc_internal.h.in 
b/include/jemalloc/internal/jemalloc_internal.h.in
-index 51bf897..7de22ea 100644
+index 8f82edd..78e2df2 100644
 --- a/include/jemalloc/internal/jemalloc_internal.h.in
 +++ b/include/jemalloc/internal/jemalloc_internal.h.in
 @@ -8,6 +8,9 @@
@@ -335,7 +335,7 @@ index f943891..47d032c 100755
 +#include "jemalloc_FreeBSD.h"
  EOF
 diff --git a/src/jemalloc.c b/src/jemalloc.c
-index 40eb2ea..666c49d 100644
+index 5d1f493..46dd1d1 100644
 --- a/src/jemalloc.c
 +++ b/src/jemalloc.c
 @@ -4,6 +4,10 @@

Modified: head/contrib/jemalloc/VERSION
==============================================================================
--- head/contrib/jemalloc/VERSION       Thu Jun  9 05:48:34 2016        
(r301717)
+++ head/contrib/jemalloc/VERSION       Thu Jun  9 06:10:20 2016        
(r301718)
@@ -1 +1 @@
-4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc
+4.2.1-0-g3de035335255d553bdb344c32ffdb603816195d8

Modified: head/contrib/jemalloc/doc/jemalloc.3
==============================================================================
--- head/contrib/jemalloc/doc/jemalloc.3        Thu Jun  9 05:48:34 2016        
(r301717)
+++ head/contrib/jemalloc/doc/jemalloc.3        Thu Jun  9 06:10:20 2016        
(r301718)
@@ -2,12 +2,12 @@
 .\"     Title: JEMALLOC
 .\"    Author: Jason Evans
 .\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\"      Date: 05/12/2016
+.\"      Date: 06/08/2016
 .\"    Manual: User Manual
-.\"    Source: jemalloc 4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc
+.\"    Source: jemalloc 4.2.1-0-g3de035335255d553bdb344c32ffdb603816195d8
 .\"  Language: English
 .\"
-.TH "JEMALLOC" "3" "05/12/2016" "jemalloc 4.2.0-1-gdc7ff6306d7a" "User Manual"
+.TH "JEMALLOC" "3" "06/08/2016" "jemalloc 4.2.1-0-g3de035335255" "User Manual"
 .\" -----------------------------------------------------------------
 .\" * Define some portability stuff
 .\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
 jemalloc \- general purpose memory allocation functions
 .SH "LIBRARY"
 .PP
-This manual describes jemalloc 
4\&.2\&.0\-1\-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc\&. More information can 
be found at the
+This manual describes jemalloc 
4\&.2\&.1\-0\-g3de035335255d553bdb344c32ffdb603816195d8\&. More information can 
be found at the
 \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
 .PP
 The following configuration options are enabled in libc\*(Aqs built\-in 
jemalloc:

Modified: head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h Thu Jun 
 9 05:48:34 2016        (r301717)
+++ head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h Thu Jun 
 9 06:10:20 2016        (r301718)
@@ -751,7 +751,7 @@ sa2u(size_t size, size_t alignment)
                 * Calculate the size of the over-size run that arena_palloc()
                 * would need to allocate in order to guarantee the alignment.
                 */
-               if (usize + large_pad + alignment <= arena_maxrun)
+               if (usize + large_pad + alignment - PAGE <= arena_maxrun)
                        return (usize);
        }
 
@@ -781,7 +781,7 @@ sa2u(size_t size, size_t alignment)
         * Calculate the multi-chunk mapping that huge_palloc() would need in
         * order to guarantee the alignment.
         */
-       if (usize + alignment < usize) {
+       if (usize + alignment - PAGE < usize) {
                /* size_t overflow. */
                return (0);
        }

Modified: head/contrib/jemalloc/include/jemalloc/internal/prof.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/prof.h      Thu Jun  9 
05:48:34 2016        (r301717)
+++ head/contrib/jemalloc/include/jemalloc/internal/prof.h      Thu Jun  9 
06:10:20 2016        (r301718)
@@ -513,6 +513,7 @@ prof_realloc(tsd_t *tsd, const void *ptr
                         * though its actual usize was insufficient to cross the
                         * sample threshold.
                         */
+                       prof_alloc_rollback(tsd, tctx, true);
                        tctx = (prof_tctx_t *)(uintptr_t)1U;
                }
        }

Modified: head/contrib/jemalloc/include/jemalloc/jemalloc.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/jemalloc.h   Thu Jun  9 05:48:34 
2016        (r301717)
+++ head/contrib/jemalloc/include/jemalloc/jemalloc.h   Thu Jun  9 06:10:20 
2016        (r301718)
@@ -87,12 +87,12 @@ extern "C" {
 #include <limits.h>
 #include <strings.h>
 
-#define        JEMALLOC_VERSION 
"4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc"
+#define        JEMALLOC_VERSION 
"4.2.1-0-g3de035335255d553bdb344c32ffdb603816195d8"
 #define        JEMALLOC_VERSION_MAJOR 4
 #define        JEMALLOC_VERSION_MINOR 2
-#define        JEMALLOC_VERSION_BUGFIX 0
-#define        JEMALLOC_VERSION_NREV 1
-#define        JEMALLOC_VERSION_GID "dc7ff6306d7a15b53479e2fb8e5546404b82e6fc"
+#define        JEMALLOC_VERSION_BUGFIX 1
+#define        JEMALLOC_VERSION_NREV 0
+#define        JEMALLOC_VERSION_GID "3de035335255d553bdb344c32ffdb603816195d8"
 
 #  define MALLOCX_LG_ALIGN(la) ((int)(la))
 #  if LG_SIZEOF_PTR == 2

Modified: head/contrib/jemalloc/src/arena.c
==============================================================================
--- head/contrib/jemalloc/src/arena.c   Thu Jun  9 05:48:34 2016        
(r301717)
+++ head/contrib/jemalloc/src/arena.c   Thu Jun  9 06:10:20 2016        
(r301718)
@@ -2687,7 +2687,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t
                return (NULL);
 
        alignment = PAGE_CEILING(alignment);
-       alloc_size = usize + large_pad + alignment;
+       alloc_size = usize + large_pad + alignment - PAGE;
 
        malloc_mutex_lock(tsdn, &arena->lock);
        run = arena_run_alloc_large(tsdn, arena, alloc_size, false);

Modified: head/contrib/jemalloc/src/chunk.c
==============================================================================
--- head/contrib/jemalloc/src/chunk.c   Thu Jun  9 05:48:34 2016        
(r301717)
+++ head/contrib/jemalloc/src/chunk.c   Thu Jun  9 06:10:20 2016        
(r301718)
@@ -421,15 +421,11 @@ chunk_arena_get(tsdn_t *tsdn, unsigned a
 }
 
 static void *
-chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
-    bool *commit, unsigned arena_ind)
+chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+    size_t size, size_t alignment, bool *zero, bool *commit)
 {
        void *ret;
-       tsdn_t *tsdn;
-       arena_t *arena;
 
-       tsdn = tsdn_fetch();
-       arena = chunk_arena_get(tsdn, arena_ind);
        ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
            commit, arena->dss_prec);
        if (ret == NULL)
@@ -441,6 +437,20 @@ chunk_alloc_default(void *new_addr, size
 }
 
 static void *
+chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
+    bool *commit, unsigned arena_ind)
+{
+       tsdn_t *tsdn;
+       arena_t *arena;
+
+       tsdn = tsdn_fetch();
+       arena = chunk_arena_get(tsdn, arena_ind);
+
+       return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
+           zero, commit));
+}
+
+static void *
 chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
     void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
 {
@@ -472,14 +482,23 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_
        ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
            alignment, zero, commit);
        if (ret == NULL) {
-               ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
-                   commit, arena->ind);
+               if (chunk_hooks->alloc == chunk_alloc_default) {
+                       /* Call directly to propagate tsdn. */
+                       ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
+                           size, alignment, zero, commit);
+               } else {
+                       ret = chunk_hooks->alloc(new_addr, size, alignment,
+                           zero, commit, arena->ind);
+               }
+
                if (ret == NULL)
                        return (NULL);
+
+               if (config_valgrind && chunk_hooks->alloc !=
+                   chunk_alloc_default)
+                       JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
        }
 
-       if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
-               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
        return (ret);
 }
 
@@ -591,19 +610,30 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t
 }
 
 static bool
-chunk_dalloc_default(void *chunk, size_t size, bool committed,
-    unsigned arena_ind)
+chunk_dalloc_default_impl(tsdn_t *tsdn, void *chunk, size_t size)
 {
 
-       if (!have_dss || !chunk_in_dss(tsdn_fetch(), chunk))
+       if (!have_dss || !chunk_in_dss(tsdn, chunk))
                return (chunk_dalloc_mmap(chunk, size));
        return (true);
 }
 
+static bool
+chunk_dalloc_default(void *chunk, size_t size, bool committed,
+    unsigned arena_ind)
+{
+       tsdn_t *tsdn;
+
+       tsdn = tsdn_fetch();
+
+       return (chunk_dalloc_default_impl(tsdn, chunk, size));
+}
+
 void
 chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
     void *chunk, size_t size, bool zeroed, bool committed)
 {
+       bool err;
 
        assert(chunk != NULL);
        assert(CHUNK_ADDR2BASE(chunk) == chunk);
@@ -612,7 +642,13 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena
 
        chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
        /* Try to deallocate. */
-       if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
+       if (chunk_hooks->dalloc == chunk_dalloc_default) {
+               /* Call directly to propagate tsdn. */
+               err = chunk_dalloc_default_impl(tsdn, chunk, size);
+       } else
+               err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
+
+       if (!err)
                return;
        /* Try to decommit; purge if that fails. */
        if (committed) {
@@ -681,26 +717,34 @@ chunk_split_default(void *chunk, size_t 
 }
 
 static bool
-chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
-    bool committed, unsigned arena_ind)
+chunk_merge_default_impl(tsdn_t *tsdn, void *chunk_a, void *chunk_b)
 {
 
        if (!maps_coalesce)
                return (true);
-       if (have_dss) {
-               tsdn_t *tsdn = tsdn_fetch();
-               if (chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn, chunk_b))
-                       return (true);
-       }
+       if (have_dss && chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn,
+           chunk_b))
+               return (true);
 
        return (false);
 }
 
+static bool
+chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+    bool committed, unsigned arena_ind)
+{
+       tsdn_t *tsdn;
+
+       tsdn = tsdn_fetch();
+
+       return (chunk_merge_default_impl(tsdn, chunk_a, chunk_b));
+}
+
 static rtree_node_elm_t *
 chunks_rtree_node_alloc(size_t nelms)
 {
 
-       return ((rtree_node_elm_t *)base_alloc(tsdn_fetch(), nelms *
+       return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
            sizeof(rtree_node_elm_t)));
 }
 

Modified: head/contrib/jemalloc/src/chunk_mmap.c
==============================================================================
--- head/contrib/jemalloc/src/chunk_mmap.c      Thu Jun  9 05:48:34 2016        
(r301717)
+++ head/contrib/jemalloc/src/chunk_mmap.c      Thu Jun  9 06:10:20 2016        
(r301718)
@@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_
        void *ret;
        size_t alloc_size;
 
-       alloc_size = size + alignment;
+       alloc_size = size + alignment - PAGE;
        /* Beware size_t wrap-around. */
        if (alloc_size < size)
                return (NULL);

Modified: head/contrib/jemalloc/src/huge.c
==============================================================================
--- head/contrib/jemalloc/src/huge.c    Thu Jun  9 05:48:34 2016        
(r301717)
+++ head/contrib/jemalloc/src/huge.c    Thu Jun  9 06:10:20 2016        
(r301718)
@@ -262,19 +262,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn,
        malloc_mutex_unlock(tsdn, &arena->huge_mtx);
 
        /*
-        * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
-        * that it is possible to make correct junk/zero fill decisions below.
+        * Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
+        * update extent's zeroed field, and zero as necessary.
         */
-       is_zeroed_chunk = zero;
-
+       is_zeroed_chunk = false;
        if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
             &is_zeroed_chunk))
                return (true);
 
        malloc_mutex_lock(tsdn, &arena->huge_mtx);
-       /* Update the size of the huge allocation. */
        huge_node_unset(ptr, node);
        extent_node_size_set(node, usize);
+       extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
+           is_zeroed_chunk);
        huge_node_reset(tsdn, ptr, node);
        malloc_mutex_unlock(tsdn, &arena->huge_mtx);
 

Modified: head/contrib/jemalloc/src/jemalloc.c
==============================================================================
--- head/contrib/jemalloc/src/jemalloc.c        Thu Jun  9 05:48:34 2016        
(r301717)
+++ head/contrib/jemalloc/src/jemalloc.c        Thu Jun  9 06:10:20 2016        
(r301718)
@@ -1743,7 +1743,7 @@ je_calloc(size_t num, size_t size)
                ret = ialloc_body(num_size, true, &tsdn, &usize, true);
                ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
                UTRACE(0, num_size, ret);
-               JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
+               JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
        }
 
        return (ret);
@@ -2226,7 +2226,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr,
 
        prof_active = prof_active_get_unlocked();
        old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
-       tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
+       tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
        if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
                p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
                    alignment, zero, tcache, arena, tctx);
@@ -2235,7 +2235,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr,
                    tcache, arena);
        }
        if (unlikely(p == NULL)) {
-               prof_alloc_rollback(tsd, tctx, true);
+               prof_alloc_rollback(tsd, tctx, false);
                return (NULL);
        }
 
@@ -2250,7 +2250,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr,
                 */
                *usize = isalloc(tsd_tsdn(tsd), p, config_prof);
        }
-       prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
+       prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
            old_usize, old_tctx);
 
        return (p);

Modified: head/contrib/jemalloc/src/nstime.c
==============================================================================
--- head/contrib/jemalloc/src/nstime.c  Thu Jun  9 05:48:34 2016        
(r301717)
+++ head/contrib/jemalloc/src/nstime.c  Thu Jun  9 06:10:20 2016        
(r301718)
@@ -128,9 +128,11 @@ nstime_update(nstime_t *time)
                time->ns = ts.tv_sec * BILLION + ts.tv_nsec;
        }
 #else
-       struct timeval tv;
-       gettimeofday(&tv, NULL);
-       time->ns = tv.tv_sec * BILLION + tv.tv_usec * 1000;
+       {
+               struct timeval tv;
+               gettimeofday(&tv, NULL);
+               time->ns = tv.tv_sec * BILLION + tv.tv_usec * 1000;
+       }
 #endif
 
        /* Handle non-monotonic clocks. */
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to