This patch adds a form of use-after-free detection based on validating that the junk data is still in place when swapping out an allocation from the delayed chunk cache. It will probably nearly double the cost of the junk free feature that's enabled by default since it needs to do a whole extra pass over the data, thus splitting it out into a separate option instead of always doing it. It can catch UAF issues when Freeguard / Free unmap will not since those can only kick in when a whole page is cleared out.
This could be extended to also check that the data is *either* zeroed or junk when handing out a chunk as a new allocation. That would add an additional pass over the data, so perhaps this should be given 3 levels like the junk free feature (like this patch by default, disabled with v, also validating the data when doing allocation with V). diff --git a/stdlib/malloc.c b/stdlib/malloc.c index 424dd77..7c33a7a 100644 --- a/stdlib/malloc.c +++ b/stdlib/malloc.c @@ -182,6 +182,7 @@ struct malloc_readonly { int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ int malloc_hint; /* call madvice on free pages? */ int malloc_junk; /* junk fill? */ + int malloc_validate; /* validate junk */ int malloc_move; /* move allocations to end of page? */ int malloc_realloc; /* always realloc? */ int malloc_xmalloc; /* xmalloc behaviour? */ @@ -560,6 +561,12 @@ omalloc_init(struct dir_info **dp) case 'J': mopts.malloc_junk = 2; break; + case 'v': + mopts.malloc_validate = 0; + break; + case 'V': + mopts.malloc_validate = 1; + break; case 'n': case 'N': break; @@ -1253,6 +1260,17 @@ ofree(void *p) wrterror("double free", p); return; } + if (mopts.malloc_junk && mopts.malloc_validate && p != NULL) { + size_t byte; + r = find(pool, p); + REALSIZE(sz, r); + for (byte = 0; byte < sz; byte++) { + if (((char *)p)[byte] != SOME_FREEJUNK) { + wrterror("use after free", p); + return; + } + } + } pool->delayed_chunks[i] = tmp; } if (p != NULL) {