With this patch on -current and
ln -sf CJ /etc/malloc.conf
a lot of things stopped working, like "man ls" and "tmux".
With only C or only J, the system seems to work ok, but with both it
doesn't.
Will check the resulting core files.
ntpd and tmux both bombed out on memset inside realloc if I read the dumps
ok:

Program terminated with signal 11, Segmentation fault.
(no debugging symbols found)
Loaded symbols for /usr/sbin/ntpd
Reading symbols from /usr/lib/libutil.so.12.1...done.
Loaded symbols for /usr/lib/libutil.so.12.1
Reading symbols from /usr/lib/libtls.so.9.0...done.
Loaded symbols for /usr/lib/libtls.so.9.0
Reading symbols from /usr/lib/libssl.so.37.0...done.
Loaded symbols for /usr/lib/libssl.so.37.0
Reading symbols from /usr/lib/libcrypto.so.36.0...done.
Loaded symbols for /usr/lib/libcrypto.so.36.0
Reading symbols from /usr/lib/libc.so.84.0...done.
Loaded symbols for /usr/lib/libc.so.84.0
Reading symbols from /usr/libexec/ld.so...done.
Loaded symbols for /usr/libexec/ld.so
#0  L1 () at /usr/src/lib/libc/arch/amd64/string/memset.S:51
51      L1:     rep
(gdb) bt full
#0  L1 () at /usr/src/lib/libc/arch/amd64/string/memset.S:51
No locals.
#1  0x000016e1a67748f2 in realloc (ptr=0x16e22de6fd20, size=28)
    at /usr/src/lib/libc/stdlib/malloc.c:1420
        r = Variable "r" is not available.



2015-10-23 7:18 GMT+02:00 Daniel Micay <danielmi...@gmail.com>:
>
> Hi,
>
> This patch adds an opt-in malloc configuration option placing canaries
after
> small allocations to detect heap overflows on free(...). It's intended to
be
> used alongside guard pages for large allocations. Since it's essentially
> adding extra padding to all small allocations, a small heap overflow will
be
> rendered harmless.
>
> The current implementation uses pointer-size canaries, but it could be
easily
> extended to allow bumping up the size of the canaries by passing the
option
> multiple times. The entry points into malloc account for the canary size
when
> it's enabled and then it's generated on allocation and checked on free.
Small
> allocations without room for a canary are simply turned into large
> allocations. Some care needs to be taken to avoid clobbering the canary
in the
> junk filling code and realloc copying.
>
> The canary is placed at the very end of the memory allocations so there
will
> often be slack space in between the real allocation and the canary
preventing
> small overflows from being detected. It would be much better at detecting
> corruption with finer-grained size classes. The extreme would be every
> multiple of the alignment, but logarithmic growth would be more realistic
(see
> jemalloc's size classes). Finer-grained size classes would also reduce the
> memory overhead caused by allocations being pushed into the next size
class by
> the canary.
>
> The canaries are currently generated with canary_value ^
hash(canary_address).
> It would be best to avoid involving addresses to avoid introducing address
> leaks via read overflows where there were none before, but it's the
easiest
> way to get unique canaries and is a minor issue to improve down the road.
>
> I implemented this feature after porting OpenBSD malloc to Android (in
> CopperheadOS) and it has found a few bugs in the app ecosystem. Note that
I've
> only heavily tested it there, not on OpenBSD itself. I'm not sure if you
want
> this feature but it seemed worth submitting.
>
> Hopefully you don't mind a patch generated with Git. :)
>
> diff --git a/stdlib/malloc.c b/stdlib/malloc.c
> index 424dd77..65b5027 100644
> --- a/stdlib/malloc.c
> +++ b/stdlib/malloc.c
> @@ -185,12 +185,14 @@ struct malloc_readonly {
>         int     malloc_move;            /* move allocations to end of
page? */
>         int     malloc_realloc;         /* always realloc? */
>         int     malloc_xmalloc;         /* xmalloc behaviour? */
> +       size_t  malloc_canaries;        /* use canaries after chunks? */
>         size_t  malloc_guard;           /* use guard pages after
allocations? */
>         u_int   malloc_cache;           /* free pages we cache */
>  #ifdef MALLOC_STATS
>         int     malloc_stats;           /* dump statistics at end */
>  #endif
>         u_int32_t malloc_canary;        /* Matched against ones in
malloc_pool */
> +       uintptr_t malloc_chunk_canary;
>  };
>
>  /* This object is mapped PROT_READ after initialisation to prevent
tampering */
> @@ -526,6 +528,12 @@ omalloc_init(struct dir_info **dp)
>                         case 'A':
>                                 mopts.malloc_abort = 1;
>                                 break;
> +                       case 'c':
> +                               mopts.malloc_canaries = 0;
> +                               break;
> +                       case 'C':
> +                               mopts.malloc_canaries = sizeof(void *);
> +                               break;
>  #ifdef MALLOC_STATS
>                         case 'd':
>                                 mopts.malloc_stats = 0;
> @@ -619,6 +627,9 @@ omalloc_init(struct dir_info **dp)
>         while ((mopts.malloc_canary = arc4random()) == 0)
>                 ;
>
> +       arc4random_buf(&mopts.malloc_chunk_canary,
> +           sizeof(mopts.malloc_chunk_canary));
> +
>         /*
>          * Allocate dir_info with a guard page on either side. Also
>          * randomise offset inside the page at which the dir_info
> @@ -984,8 +995,15 @@ malloc_bytes(struct dir_info *d, size_t size, void
*f)
>         k += (lp - bp->bits) * MALLOC_BITS;
>         k <<= bp->shift;
>
> +       if (mopts.malloc_canaries && bp->size > 0) {
> +               char *end = (char *)bp->page + k + bp->size;
> +               uintptr_t *canary = (uintptr_t *)(end -
mopts.malloc_canaries);
> +               *canary = mopts.malloc_chunk_canary ^ hash(canary);
> +       }
> +
>         if (mopts.malloc_junk == 2 && bp->size > 0)
> -               memset((char *)bp->page + k, SOME_JUNK, bp->size);
> +               memset((char *)bp->page + k, SOME_JUNK,
> +                   bp->size - mopts.malloc_canaries);
>         return ((char *)bp->page + k);
>  }
>
> @@ -999,6 +1017,13 @@ find_chunknum(struct dir_info *d, struct
region_info *r, void *ptr)
>         if (info->canary != d->canary1)
>                 wrterror("chunk info corrupted", NULL);
>
> +       if (mopts.malloc_canaries && info->size > 0) {
> +               char *end = (char *)ptr + info->size;
> +               uintptr_t *canary = (uintptr_t *)(end -
mopts.malloc_canaries);
> +               if (*canary != (mopts.malloc_chunk_canary ^ hash(canary)))
> +                       wrterror("chunk canary corrupted", ptr);
> +       }
> +
>         /* Find the chunk number on the page */
>         chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
>
> @@ -1121,7 +1146,7 @@ omalloc(size_t sz, int zero_fill, void *f)
>                 /* takes care of SOME_JUNK */
>                 p = malloc_bytes(pool, sz, f);
>                 if (zero_fill && p != NULL && sz > 0)
> -                       memset(p, 0, sz);
> +                       memset(p, 0, sz - mopts.malloc_canaries);
>         }
>
>         return p;
> @@ -1176,6 +1201,8 @@ malloc(size_t size)
>                 malloc_recurse();
>                 return NULL;
>         }
> +       if (size > 0 && size <= MALLOC_MAXCHUNK)
> +               size += mopts.malloc_canaries;
>         r = omalloc(size, 0, CALLER);
>         malloc_active--;
>         _MALLOC_UNLOCK();
> @@ -1242,7 +1269,7 @@ ofree(void *p)
>                 int i;
>
>                 if (mopts.malloc_junk && sz > 0)
> -                       memset(p, SOME_FREEJUNK, sz);
> +                       memset(p, SOME_FREEJUNK, sz -
mopts.malloc_canaries);
>                 if (!mopts.malloc_freenow) {
>                         if (find_chunknum(pool, r, p) == -1)
>                                 return;
> @@ -1386,16 +1413,24 @@ gotit:
>                 }
>         }
>         if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc)
{
> -               if (mopts.malloc_junk == 2 && newsz > 0)
> -                       memset((char *)p + newsz, SOME_JUNK, oldsz -
newsz);
> +               if (mopts.malloc_junk == 2 && newsz > 0 && newsz < oldsz)
{
> +                       size_t clearsz = oldsz - newsz;
> +                       if (oldsz <= MALLOC_MAXCHUNK)
> +                               clearsz -= mopts.malloc_canaries;
> +                       memset((char *)p + newsz, SOME_JUNK, clearsz);
> +               }
>                 STATS_SETF(r, f);
>                 return p;
>         } else if (newsz != oldsz || mopts.malloc_realloc) {
>                 q = omalloc(newsz, 0, f);
>                 if (q == NULL)
>                         return NULL;
> -               if (newsz != 0 && oldsz != 0)
> -                       memcpy(q, p, oldsz < newsz ? oldsz : newsz);
> +               if (newsz != 0 && oldsz != 0) {
> +                       size_t copysz = oldsz < newsz ? oldsz : newsz;
> +                       if (copysz <= MALLOC_MAXCHUNK)
> +                               copysz -= mopts.malloc_canaries;
> +                       memcpy(q, p, copysz);
> +               }
>                 ofree(p);
>                 return q;
>         } else {
> @@ -1420,6 +1455,8 @@ realloc(void *ptr, size_t size)
>                 malloc_recurse();
>                 return NULL;
>         }
> +       if (size > 0 && size <= MALLOC_MAXCHUNK)
> +               size += mopts.malloc_canaries;
>         r = orealloc(ptr, size, CALLER);
>
>         malloc_active--;
> @@ -1468,6 +1505,8 @@ calloc(size_t nmemb, size_t size)
>         }
>
>         size *= nmemb;
> +       if (size > 0 && size <= MALLOC_MAXCHUNK)
> +               size += mopts.malloc_canaries;
>         r = omalloc(size, 1, CALLER);
>
>         malloc_active--;
> @@ -1595,6 +1634,8 @@ posix_memalign(void **memptr, size_t alignment,
size_t size)
>                 malloc_recurse();
>                 goto err;
>         }
> +       if (size > 0 && size <= MALLOC_MAXCHUNK)
> +               size += mopts.malloc_canaries;
>         r = omemalign(alignment, size, 0, CALLER);
>         malloc_active--;
>         _MALLOC_UNLOCK();
>



--
May the most significant bit of your life be positive.

Reply via email to