Hi,

Curently, junking is done like this:

- for small chunk, the whole chunk junked on free

- for pages sized and larger, the first half a page is filled

- after a delayed free, the first 32 bytes of small chunks are
validated to not be overwritten

- page sized and larger allocations are not validated at all, even if
they end up in the cache.

This diff changes the following:

- I make use of the fact that I know how the chunks are aligned, and
write 8 bytes at the time by using a uint64_t pointer. For an
allocation a max of 4 such uint64_t's are written spread over the
allocation. For pages sized and larger, the first page is junked in
such a way.

- Delayed free of a small chunk checks the corresponiding way.

- Pages ending up in the cache are validated upon unmapping or re-use.

The last point is the real gain: we also check for write-after-free
for large allocations, which we did not do before.

So we are catching more writes-after-frees. A price to pay is that
larger chunks are not completely junked, but only a total of 32 bytes
are. I chose this number after comparing performance with the current
code: we still gain a bit in speed.

Junk mode 0 (j) and junk mode 2 (J) are not changed.

Please test and review,

        -Otto

Index: stdlib/malloc.c
===================================================================
RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
retrieving revision 1.267
diff -u -p -r1.267 malloc.c
--- stdlib/malloc.c     23 Nov 2020 15:42:11 -0000      1.267
+++ stdlib/malloc.c     6 Feb 2021 20:03:49 -0000
@@ -89,6 +89,7 @@
  */
 #define SOME_JUNK              0xdb    /* deadbeef */
 #define SOME_FREEJUNK          0xdf    /* dead, free */
+#define SOME_FREEJUNK_ULL      0xdfdfdfdfdfdfdfdfULL
 
 #define MMAP(sz,f)     mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
     MAP_ANON | MAP_PRIVATE | (f), -1, 0)
@@ -655,6 +656,46 @@ delete(struct dir_info *d, struct region
        }
 }
 
+static inline void
+junkfree(int junk, void *p, size_t sz)
+{
+       size_t byte, step = 1;
+
+       if (junk == 0 || sz == 0)
+               return;
+       if (junk == 1) {
+               if (sz > MALLOC_PAGESIZE)
+                       sz = MALLOC_PAGESIZE;
+               step = sz / 32;
+               if (step == 0)
+                       step = 1;
+       }
+       for (byte = 0; byte < sz; byte++)
+               ((unsigned char *)p)[byte] = SOME_FREEJUNK;
+}
+
+static inline void
+validate_junk(struct dir_info *pool, void *p, size_t sz)
+{
+       size_t byte, step = 1;
+
+       if (pool->malloc_junk == 0 || sz == 0)
+               return;
+       if (pool->malloc_junk == 1) {
+               if (sz > MALLOC_PAGESIZE)
+                       sz = MALLOC_PAGESIZE;
+               step = sz / 32;
+               if (step == 0)
+                       step = 1;
+       }
+       for (byte = 0; byte < sz; byte += step) {
+               if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
+                       wrterror(pool, "write after free %p %#zx@%#zx", p,
+                           byte, sz);
+       }
+}
+
+
 /*
  * Cache maintenance. We keep at most malloc_cache pages cached.
  * If the cache is becoming full, unmap pages in the cache for real,
@@ -663,7 +704,7 @@ delete(struct dir_info *d, struct region
  * cache are in MALLOC_PAGESIZE units.
  */
 static void
-unmap(struct dir_info *d, void *p, size_t sz, size_t clear, int junk)
+unmap(struct dir_info *d, void *p, size_t sz, size_t clear)
 {
        size_t psz = sz >> MALLOC_PAGESHIFT;
        size_t rsz;
@@ -716,12 +757,10 @@ unmap(struct dir_info *d, void *p, size_
                if (r->p == NULL) {
                        if (clear > 0)
                                memset(p, 0, clear);
-                       if (junk && !mopts.malloc_freeunmap) {
-                               size_t amt = junk == 1 ?  MALLOC_MAXCHUNK : sz;
-                               memset(p, SOME_FREEJUNK, amt);
-                       }
                        if (mopts.malloc_freeunmap)
                                mprotect(p, sz, PROT_NONE);
+                       else
+                               junkfree(d->malloc_junk, p, psz << 
MALLOC_PAGESHIFT);
                        r->p = p;
                        r->size = psz;
                        d->free_regions_size += psz;
@@ -760,15 +799,16 @@ map(struct dir_info *d, size_t sz, int z
                if (r->p != NULL) {
                        if (r->size == psz) {
                                p = r->p;
+                               if (!mopts.malloc_freeunmap)
+                                       validate_junk(d, p, psz << 
MALLOC_PAGESHIFT);
                                r->p = NULL;
                                d->free_regions_size -= psz;
                                if (mopts.malloc_freeunmap)
                                        mprotect(p, sz, PROT_READ | PROT_WRITE);
                                if (zero_fill)
                                        memset(p, 0, sz);
-                               else if (d->malloc_junk == 2 &&
-                                   mopts.malloc_freeunmap)
-                                       memset(p, SOME_FREEJUNK, sz);
+                               else if (mopts.malloc_freeunmap)
+                                       junkfree(d->malloc_junk, p, sz);
                                d->rotor += i + 1;
                                return p;
                        } else if (r->size > psz)
@@ -778,15 +818,19 @@ map(struct dir_info *d, size_t sz, int z
        if (big != NULL) {
                r = big;
                p = r->p;
+               if (!mopts.malloc_freeunmap)
+                       validate_junk(d, p, r->size << MALLOC_PAGESHIFT);
                r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
-               if (mopts.malloc_freeunmap)
-                       mprotect(p, sz, PROT_READ | PROT_WRITE);
                r->size -= psz;
                d->free_regions_size -= psz;
+               if (mopts.malloc_freeunmap)
+                       mprotect(p, sz, PROT_READ | PROT_WRITE);
+               else
+                       junkfree(d->malloc_junk, r->p, r->size << 
MALLOC_PAGESHIFT);
                if (zero_fill)
                        memset(p, 0, sz);
-               else if (d->malloc_junk == 2 && mopts.malloc_freeunmap)
-                       memset(p, SOME_FREEJUNK, sz);
+               else if (mopts.malloc_freeunmap)
+                       junkfree(d->malloc_junk, p, sz);
                return p;
        }
        if (d->free_regions_size > d->malloc_cache)
@@ -892,7 +936,7 @@ omalloc_make_chunks(struct dir_info *d, 
        return bp;
 
 err:
-       unmap(d, pp, MALLOC_PAGESIZE, 0, d->malloc_junk);
+       unmap(d, pp, MALLOC_PAGESIZE, 0);
        return NULL;
 }
 
@@ -1091,7 +1135,7 @@ free_bytes(struct dir_info *d, struct re
 
        if (info->size == 0 && !mopts.malloc_freeunmap)
                mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
-       unmap(d, info->page, MALLOC_PAGESIZE, 0, 0);
+       unmap(d, info->page, MALLOC_PAGESIZE, 0);
 
        delete(d, r);
        if (info->size != 0)
@@ -1122,7 +1166,7 @@ omalloc(struct dir_info *pool, size_t sz
                        return NULL;
                }
                if (insert(pool, p, sz, f)) {
-                       unmap(pool, p, psz, 0, 0);
+                       unmap(pool, p, psz, 0);
                        errno = ENOMEM;
                        return NULL;
                }
@@ -1282,27 +1326,6 @@ malloc_conceal(size_t size)
 }
 DEF_WEAK(malloc_conceal);
 
-static void
-validate_junk(struct dir_info *pool, void *p)
-{
-       struct region_info *r;
-       size_t byte, sz;
-
-       if (p == NULL)
-               return;
-       r = find(pool, p);
-       if (r == NULL)
-               wrterror(pool, "bogus pointer in validate_junk %p", p);
-       REALSIZE(sz, r);
-       if (sz > CHUNK_CHECK_LENGTH)
-               sz = CHUNK_CHECK_LENGTH;
-       for (byte = 0; byte < sz; byte++) {
-               if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
-                       wrterror(pool, "use after free %p", p);
-       }
-}
-
-
 static struct region_info *
 findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
     char **saved_function)
@@ -1402,8 +1425,7 @@ ofree(struct dir_info **argpool, void *p
                        }
                        STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
                }
-               unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0,
-                   pool->malloc_junk);
+               unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0);
                delete(pool, r);
        } else {
                /* Validate and optionally canary check */
@@ -1419,20 +1441,24 @@ ofree(struct dir_info **argpool, void *p
                                                wrterror(pool,
                                                    "double free %p", p);
                        }
-                       if (pool->malloc_junk && sz > 0)
-                               memset(p, SOME_FREEJUNK, sz);
+                       junkfree(pool->malloc_junk, p, sz);
                        i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
                        tmp = p;
                        p = pool->delayed_chunks[i];
                        if (tmp == p)
                                wrterror(pool, "double free %p", tmp);
                        pool->delayed_chunks[i] = tmp;
-                       if (pool->malloc_junk)
-                               validate_junk(pool, p);
-               } else if (argsz > 0)
+                       if (p != NULL) {
+                               r = find(pool, p);
+                               REALSIZE(sz, r);
+                               if (r != NULL)
+                                       validate_junk(pool, p, sz);
+                       }
+               } else if (argsz > 0) {
+                       r = find(pool, p);
                        memset(p, 0, argsz);
+               }
                if (p != NULL) {
-                       r = find(pool, p);
                        if (r == NULL)
                                wrterror(pool,
                                    "bogus pointer (double free?) %p", p);
@@ -1969,7 +1995,7 @@ omemalign(struct dir_info *pool, size_t 
        }
 
        if (insert(pool, p, sz, f)) {
-               unmap(pool, p, psz, 0, 0);
+               unmap(pool, p, psz, 0);
                errno = ENOMEM;
                return NULL;
        }

Reply via email to