On Fri, Jul 11, 2014 at 08:11, Ted Unangst wrote:

>> I also think there's one simple case that can be added: the MMAP call
>> at the bottom of map().

On further inspection, I think this needed a slight reordering to be
safe.

I have also been seeing random munmap errors running jruby:
java(3451) in free(): error: munmap 0x41cb4307000
Abort trap 

I don't see where the race is, but I am now running with this slightly
more conservative patch. The only enabled unlock sections are for
mmap() on the way to return when internal state won't change. (On the
bright side, I guess this means the malloc lock really does work
normally. :))

Index: malloc.c
===================================================================
RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
retrieving revision 1.170
diff -u -p -r1.170 malloc.c
--- malloc.c    9 Jul 2014 19:11:00 -0000       1.170
+++ malloc.c    12 Jul 2014 16:52:17 -0000
@@ -93,6 +93,15 @@
 #define MQUERY(a, sz)  mquery((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t)0)
 
+#define KERNENTER() if (__isthreaded) do { \
+       malloc_active--; \
+       _MALLOC_UNLOCK(); \
+} while (0)
+#define KERNEXIT() if (__isthreaded) do { \
+       _MALLOC_LOCK(); \
+       malloc_active++; \
+} while (0)
+
 struct region_info {
        void *p;                /* page; low bits used to mark chunks */
        uintptr_t size;         /* size for pages, or chunk_info pointer */
@@ -312,7 +321,10 @@ unmap(struct dir_info *d, void *p, size_
        }
 
        if (psz > mopts.malloc_cache) {
-               if (munmap(p, sz))
+               /* KERNENTER(); */
+               i = munmap(p, sz);
+               /* KERNEXIT(); */
+               if (i)
                        wrterror("munmap", p);
                STATS_SUB(d->malloc_used, sz);
                return;
@@ -396,7 +408,9 @@ map(struct dir_info *d, size_t sz, int z
                return MAP_FAILED;
        }
        if (psz > d->free_regions_size) {
+               KERNENTER();
                p = MMAP(sz);
+               KERNEXIT();
                if (p != MAP_FAILED)
                        STATS_ADD(d->malloc_used, sz);
                /* zero fill not needed */
@@ -408,18 +422,20 @@ map(struct dir_info *d, size_t sz, int z
                if (r->p != NULL) {
                        if (r->size == psz) {
                                p = r->p;
+                               r->p = NULL;
+                               r->size = 0;
+                               d->free_regions_size -= psz;
+                               /* KERNENTER(); */
                                if (mopts.malloc_freeunmap)
                                        mprotect(p, sz, PROT_READ | PROT_WRITE);
                                if (mopts.malloc_hint)
                                        madvise(p, sz, MADV_NORMAL);
-                               r->p = NULL;
-                               r->size = 0;
-                               d->free_regions_size -= psz;
                                if (zero_fill)
                                        memset(p, 0, sz);
                                else if (mopts.malloc_junk == 2 &&
                                    mopts.malloc_freeunmap)
                                        memset(p, SOME_FREEJUNK, sz);
+                               /* KERNEXIT(); */
                                return p;
                        } else if (r->size > psz)
                                big = r;
@@ -440,11 +456,13 @@ map(struct dir_info *d, size_t sz, int z
                        memset(p, SOME_FREEJUNK, sz);
                return p;
        }
+       if (d->free_regions_size > mopts.malloc_cache)
+               wrterror("malloc cache", NULL);
+       KERNENTER();
        p = MMAP(sz);
+       KERNEXIT();
        if (p != MAP_FAILED)
                STATS_ADD(d->malloc_used, sz);
-       if (d->free_regions_size > mopts.malloc_cache)
-               wrterror("malloc cache", NULL);
        /* zero fill not needed */
        return p;
 }

Reply via email to