Module Name:    src
Committed By:   maxv
Date:           Mon Aug 20 15:04:52 UTC 2018

Modified Files:
        src/sys/arch/amd64/amd64: machdep.c
        src/sys/arch/amd64/conf: GENERIC Makefile.amd64 files.amd64
            kern.ldscript
        src/sys/arch/amd64/include: pmap.h types.h
        src/sys/arch/x86/include: pmap.h
        src/sys/arch/x86/x86: pmap.c
        src/sys/conf: files
        src/sys/kern: kern_malloc.c subr_kmem.c
        src/sys/lib/libkern: libkern.h
        src/sys/sys: Makefile
Added Files:
        src/sys/arch/amd64/amd64: asan.c
        src/sys/sys: asan.h

Log Message:
Add support for kASan on amd64. Written by me, with some parts inspired
from Siddharth Muralee's initial work. This feature can detect several
kinds of memory bugs, and it's an excellent feature.

It can be enabled by uncommenting these three lines in GENERIC:

        #makeoptions    KASAN=1         # Kernel Address Sanitizer
        #options        KASAN
        #no options     SVS

The kernel is compiled without SVS, without DMAP and without PCPU area.
A shadow area is created at boot time, and it can cover the upper 128TB
of the address space. This area is populated gradually as we allocate
memory. With this design the memory consumption is kept at its lowest
level.

The compiler calls the __asan_* functions each time a memory access is
done. We verify whether this access is legal by looking at the shadow
area.

We declare our own special memcpy/memset/etc functions, because the
compiler's builtins don't add the __asan_* instrumentation.

Initially all the mappings are marked as valid. During dynamic
allocations, we add a redzone, which we mark as invalid. Any access on
it will trigger a kASan error message. Additionally, the compiler adds
a redzone on global variables, and we mark these redzones as invalid too.
The illegal-access detection works with a 1-byte granularity.

For now, we cover three areas:

        - global variables
        - kmem_alloc-ated areas
        - malloc-ated areas

More will come, but that's a good start.


To generate a diff of this commit:
cvs rdiff -u -r0 -r1.1 src/sys/arch/amd64/amd64/asan.c
cvs rdiff -u -r1.314 -r1.315 src/sys/arch/amd64/amd64/machdep.c
cvs rdiff -u -r1.503 -r1.504 src/sys/arch/amd64/conf/GENERIC
cvs rdiff -u -r1.71 -r1.72 src/sys/arch/amd64/conf/Makefile.amd64
cvs rdiff -u -r1.105 -r1.106 src/sys/arch/amd64/conf/files.amd64
cvs rdiff -u -r1.26 -r1.27 src/sys/arch/amd64/conf/kern.ldscript
cvs rdiff -u -r1.54 -r1.55 src/sys/arch/amd64/include/pmap.h
cvs rdiff -u -r1.56 -r1.57 src/sys/arch/amd64/include/types.h
cvs rdiff -u -r1.84 -r1.85 src/sys/arch/x86/include/pmap.h
cvs rdiff -u -r1.303 -r1.304 src/sys/arch/x86/x86/pmap.c
cvs rdiff -u -r1.1203 -r1.1204 src/sys/conf/files
cvs rdiff -u -r1.147 -r1.148 src/sys/kern/kern_malloc.c
cvs rdiff -u -r1.68 -r1.69 src/sys/kern/subr_kmem.c
cvs rdiff -u -r1.127 -r1.128 src/sys/lib/libkern/libkern.h
cvs rdiff -u -r1.166 -r1.167 src/sys/sys/Makefile
cvs rdiff -u -r0 -r1.1 src/sys/sys/asan.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.314 src/sys/arch/amd64/amd64/machdep.c:1.315
--- src/sys/arch/amd64/amd64/machdep.c:1.314	Sun Aug 12 15:31:01 2018
+++ src/sys/arch/amd64/amd64/machdep.c	Mon Aug 20 15:04:51 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.314 2018/08/12 15:31:01 maxv Exp $	*/
+/*	$NetBSD: machdep.c,v 1.315 2018/08/20 15:04:51 maxv Exp $	*/
 
 /*
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -110,7 +110,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.314 2018/08/12 15:31:01 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.315 2018/08/20 15:04:51 maxv Exp $");
 
 #include "opt_modular.h"
 #include "opt_user_ldt.h"
@@ -122,6 +122,7 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 
 #include "opt_xen.h"
 #include "opt_svs.h"
 #include "opt_kaslr.h"
+#include "opt_kasan.h"
 #ifndef XEN
 #include "opt_physmem.h"
 #endif
@@ -1656,6 +1657,15 @@ init_slotspace(void)
 	slotspace.area[SLAREA_HYPV].dropmax = false;
 #endif
 
+#ifdef KASAN
+	/* ASAN. */
+	slotspace.area[SLAREA_ASAN].sslot = L4_SLOT_KASAN;
+	slotspace.area[SLAREA_ASAN].mslot = NL4_SLOT_KASAN;
+	slotspace.area[SLAREA_ASAN].nslot = NL4_SLOT_KASAN;
+	slotspace.area[SLAREA_ASAN].active = true;
+	slotspace.area[SLAREA_ASAN].dropmax = false;
+#endif
+
 	/* Kernel. */
 	slotspace.area[SLAREA_KERN].sslot = L4_SLOT_KERNBASE;
 	slotspace.area[SLAREA_KERN].mslot = 1;
@@ -1781,6 +1791,11 @@ init_x86_64(paddr_t first_avail)
 
 	init_x86_msgbuf();
 
+#ifdef KASAN
+	void kasan_init(void);
+	kasan_init();
+#endif
+
 	pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 32 * 1024 * 1024);
 
 	kpreempt_disable();

Index: src/sys/arch/amd64/conf/GENERIC
diff -u src/sys/arch/amd64/conf/GENERIC:1.503 src/sys/arch/amd64/conf/GENERIC:1.504
--- src/sys/arch/amd64/conf/GENERIC:1.503	Tue Aug 14 06:37:59 2018
+++ src/sys/arch/amd64/conf/GENERIC	Mon Aug 20 15:04:51 2018
@@ -1,4 +1,4 @@
-# $NetBSD: GENERIC,v 1.503 2018/08/14 06:37:59 maxv Exp $
+# $NetBSD: GENERIC,v 1.504 2018/08/20 15:04:51 maxv Exp $
 #
 # GENERIC machine description file
 #
@@ -22,7 +22,7 @@ include 	"arch/amd64/conf/std.amd64"
 
 options 	INCLUDE_CONFIG_FILE	# embed config file in kernel binary
 
-#ident		"GENERIC-$Revision: 1.503 $"
+#ident		"GENERIC-$Revision: 1.504 $"
 
 maxusers	64		# estimated number of users
 
@@ -117,6 +117,11 @@ makeoptions	DEBUG="-g"	# compile full sy
 #options 	SYSCALL_TIMES_HASCOUNTER	# use 'broken' rdtsc (soekris)
 options 	KDTRACE_HOOKS	# kernel DTrace hooks
 
+# Kernel Address Sanitizer (kASan). You need to disable SVS to use it.
+#makeoptions 	KASAN=1		# Kernel Address Sanitizer
+#options 	KASAN
+#no options	SVS
+
 # Compatibility options
 # x86_64 never shipped with a.out binaries; the two options below are
 # only relevant to 32-bit i386 binaries

Index: src/sys/arch/amd64/conf/Makefile.amd64
diff -u src/sys/arch/amd64/conf/Makefile.amd64:1.71 src/sys/arch/amd64/conf/Makefile.amd64:1.72
--- src/sys/arch/amd64/conf/Makefile.amd64:1.71	Sat Jun  2 15:09:37 2018
+++ src/sys/arch/amd64/conf/Makefile.amd64	Mon Aug 20 15:04:51 2018
@@ -1,4 +1,4 @@
-#	$NetBSD: Makefile.amd64,v 1.71 2018/06/02 15:09:37 christos Exp $
+#	$NetBSD: Makefile.amd64,v 1.72 2018/08/20 15:04:51 maxv Exp $
 
 # Makefile for NetBSD
 #
@@ -49,6 +49,11 @@ CFLAGS+=      -mindirect-branch=thunk-in
 CFLAGS+=      -mindirect-branch-register
 .endif
 
+.if ${KASAN:U0} > 0 && ${HAVE_GCC:U0} > 0
+CFLAGS+=	-fsanitize=kernel-address --param asan-globals=1
+COPTS.asan.c+=	-fno-sanitize=kernel-address
+.endif
+
 ##
 ## (3) libkern and compat
 ##

Index: src/sys/arch/amd64/conf/files.amd64
diff -u src/sys/arch/amd64/conf/files.amd64:1.105 src/sys/arch/amd64/conf/files.amd64:1.106
--- src/sys/arch/amd64/conf/files.amd64:1.105	Fri Jul 13 09:37:32 2018
+++ src/sys/arch/amd64/conf/files.amd64	Mon Aug 20 15:04:51 2018
@@ -1,4 +1,4 @@
-#	$NetBSD: files.amd64,v 1.105 2018/07/13 09:37:32 maxv Exp $
+#	$NetBSD: files.amd64,v 1.106 2018/08/20 15:04:51 maxv Exp $
 #
 # new style config file for amd64 architecture
 #
@@ -39,6 +39,7 @@ file	arch/amd64/amd64/spl.S			machdep
 
 file	arch/amd64/amd64/amd64func.S		machdep
 file	arch/amd64/amd64/amd64_trap.S		machdep
+file	arch/amd64/amd64/asan.c			kasan
 file	arch/amd64/amd64/autoconf.c		machdep
 file	arch/amd64/amd64/busfunc.S		machdep
 file	arch/amd64/amd64/cpu_in_cksum.S		(inet | inet6) & cpu_in_cksum

Index: src/sys/arch/amd64/conf/kern.ldscript
diff -u src/sys/arch/amd64/conf/kern.ldscript:1.26 src/sys/arch/amd64/conf/kern.ldscript:1.27
--- src/sys/arch/amd64/conf/kern.ldscript:1.26	Sun Jan 21 11:21:40 2018
+++ src/sys/arch/amd64/conf/kern.ldscript	Mon Aug 20 15:04:51 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern.ldscript,v 1.26 2018/01/21 11:21:40 maxv Exp $	*/
+/*	$NetBSD: kern.ldscript,v 1.27 2018/08/20 15:04:51 maxv Exp $	*/
 
 #include "assym.h"
 
@@ -48,6 +48,10 @@ SECTIONS
 	{
 		*(.rodata)
 		*(.rodata.*)
+		. = ALIGN(COHERENCY_UNIT);
+		__CTOR_LIST__ = .;
+		*(.ctors)
+		__CTOR_END__ = .;
 	}
 
 	. = ALIGN(__LARGE_PAGE_SIZE);

Index: src/sys/arch/amd64/include/pmap.h
diff -u src/sys/arch/amd64/include/pmap.h:1.54 src/sys/arch/amd64/include/pmap.h:1.55
--- src/sys/arch/amd64/include/pmap.h:1.54	Fri Aug 17 14:39:51 2018
+++ src/sys/arch/amd64/include/pmap.h	Mon Aug 20 15:04:51 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.54 2018/08/17 14:39:51 maxv Exp $	*/
+/*	$NetBSD: pmap.h,v 1.55 2018/08/20 15:04:51 maxv Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -67,6 +67,7 @@
 
 #if defined(_KERNEL_OPT)
 #include "opt_xen.h"
+#include "opt_kasan.h"
 #endif
 
 #include <sys/atomic.h>
@@ -91,6 +92,11 @@
 /* XXXfvdl this one's not right. */
 #define VA_SIGN_POS(va)		((va) & ~VA_SIGN_MASK)
 
+#ifdef KASAN
+#define L4_SLOT_KASAN		256
+#define NL4_SLOT_KASAN		32
+#endif
+
 #ifndef XEN
 #define L4_SLOT_PTE		slotspace.area[SLAREA_PTE].sslot
 #else

Index: src/sys/arch/amd64/include/types.h
diff -u src/sys/arch/amd64/include/types.h:1.56 src/sys/arch/amd64/include/types.h:1.57
--- src/sys/arch/amd64/include/types.h:1.56	Thu Jul 12 10:46:41 2018
+++ src/sys/arch/amd64/include/types.h	Mon Aug 20 15:04:51 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: types.h,v 1.56 2018/07/12 10:46:41 maxv Exp $	*/
+/*	$NetBSD: types.h,v 1.57 2018/08/20 15:04:51 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1990 The Regents of the University of California.
@@ -99,11 +99,14 @@ typedef	unsigned char		__cpu_simple_lock
 #define	__HAVE_RAS
 
 #include "opt_xen.h"
+#include "opt_kasan.h"
 #if defined(__x86_64__) && !defined(XEN)
+#if !defined(KASAN)
 #define	__HAVE_PCPU_AREA 1
 #define	__HAVE_DIRECT_MAP 1
 #define	__HAVE_MM_MD_DIRECT_MAPPED_IO
 #define	__HAVE_MM_MD_DIRECT_MAPPED_PHYS
+#endif
 #if !defined(NO_PCI_MSI_MSIX)
 #define	__HAVE_PCI_MSI_MSIX
 #endif

Index: src/sys/arch/x86/include/pmap.h
diff -u src/sys/arch/x86/include/pmap.h:1.84 src/sys/arch/x86/include/pmap.h:1.85
--- src/sys/arch/x86/include/pmap.h:1.84	Sun Aug 12 13:31:16 2018
+++ src/sys/arch/x86/include/pmap.h	Mon Aug 20 15:04:52 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.84 2018/08/12 13:31:16 maxv Exp $	*/
+/*	$NetBSD: pmap.h,v 1.85 2018/08/20 15:04:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -161,8 +161,9 @@ struct bootspace {
 #define SLAREA_PCPU	4
 #define SLAREA_DMAP	5
 #define SLAREA_HYPV	6
-#define SLAREA_KERN	7
-#define SLSPACE_NAREAS	8
+#define SLAREA_ASAN	7
+#define SLAREA_KERN	8
+#define SLSPACE_NAREAS	9
 
 struct slotspace {
 	struct {
@@ -552,6 +553,8 @@ int	pmap_enter_ma(struct pmap *, vaddr_t
 bool	pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
 void	pmap_free_ptps(struct vm_page *);
 
+paddr_t pmap_get_physpage(void);
+
 /*
  * Hooks for the pool allocator.
  */

Index: src/sys/arch/x86/x86/pmap.c
diff -u src/sys/arch/x86/x86/pmap.c:1.303 src/sys/arch/x86/x86/pmap.c:1.304
--- src/sys/arch/x86/x86/pmap.c:1.303	Sat Aug 18 08:45:55 2018
+++ src/sys/arch/x86/x86/pmap.c	Mon Aug 20 15:04:52 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.303 2018/08/18 08:45:55 maxv Exp $	*/
+/*	$NetBSD: pmap.c,v 1.304 2018/08/20 15:04:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@@ -157,13 +157,14 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.303 2018/08/18 08:45:55 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.304 2018/08/20 15:04:52 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
 #include "opt_multiprocessor.h"
 #include "opt_xen.h"
 #include "opt_svs.h"
+#include "opt_kasan.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -570,7 +571,6 @@ static bool pmap_remove_pte(struct pmap 
 static void pmap_remove_ptes(struct pmap *, struct vm_page *, vaddr_t, vaddr_t,
     vaddr_t, struct pv_entry **);
 
-static paddr_t pmap_get_physpage(void);
 static void pmap_alloc_level(struct pmap *, vaddr_t, long *);
 
 static void pmap_reactivate(struct pmap *);
@@ -1386,7 +1386,7 @@ pmap_pagetree_nentries_range(vaddr_t sta
 }
 #endif
 
-#if defined(__HAVE_DIRECT_MAP)
+#if defined(__HAVE_DIRECT_MAP) || defined(KASAN)
 static inline void
 slotspace_copy(int type, pd_entry_t *dst, pd_entry_t *src)
 {
@@ -2377,6 +2377,9 @@ pmap_pdp_ctor(void *arg, void *v, int fl
 #ifdef __HAVE_DIRECT_MAP
 	slotspace_copy(SLAREA_DMAP, pdir, PDP_BASE);
 #endif
+#ifdef KASAN
+	slotspace_copy(SLAREA_ASAN, pdir, PDP_BASE);
+#endif
 #endif /* XEN  && __x86_64__*/
 
 #ifdef XEN
@@ -4470,7 +4473,7 @@ out:
 	return error;
 }
 
-static paddr_t
+paddr_t
 pmap_get_physpage(void)
 {
 	struct vm_page *ptp;
@@ -4649,6 +4652,12 @@ pmap_growkernel(vaddr_t maxkvaddr)
 	}
 #endif
 
+#ifdef KASAN
+	void kasan_shadow_map(void *, size_t);
+	kasan_shadow_map((void *)pmap_maxkvaddr,
+	    (size_t)(maxkvaddr - pmap_maxkvaddr));
+#endif
+
 	pmap_alloc_level(cpm, pmap_maxkvaddr, needed_kptp);
 
 	/*

Index: src/sys/conf/files
diff -u src/sys/conf/files:1.1203 src/sys/conf/files:1.1204
--- src/sys/conf/files:1.1203	Tue Aug 14 14:49:13 2018
+++ src/sys/conf/files	Mon Aug 20 15:04:52 2018
@@ -1,4 +1,4 @@
-#	$NetBSD: files,v 1.1203 2018/08/14 14:49:13 maxv Exp $
+#	$NetBSD: files,v 1.1204 2018/08/20 15:04:52 maxv Exp $
 #	@(#)files.newconf	7.5 (Berkeley) 5/10/93
 
 version 	20171118
@@ -29,6 +29,7 @@ defflag				KEYLOCK
 defparam opt_syslimits.h	CHILD_MAX OPEN_MAX
 defflag opt_diagnostic.h	_DIAGNOSTIC
 defflag				GPROF
+defflag				KASAN
 
 defparam opt_copy_symtab.h	makeoptions_COPY_SYMTAB
 

Index: src/sys/kern/kern_malloc.c
diff -u src/sys/kern/kern_malloc.c:1.147 src/sys/kern/kern_malloc.c:1.148
--- src/sys/kern/kern_malloc.c:1.147	Mon Aug 20 11:46:44 2018
+++ src/sys/kern/kern_malloc.c	Mon Aug 20 15:04:52 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_malloc.c,v 1.147 2018/08/20 11:46:44 maxv Exp $	*/
+/*	$NetBSD: kern_malloc.c,v 1.148 2018/08/20 15:04:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 1987, 1991, 1993
@@ -70,12 +70,18 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.147 2018/08/20 11:46:44 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.148 2018/08/20 15:04:52 maxv Exp $");
+
+#include "opt_kasan.h"
 
 #include <sys/param.h>
 #include <sys/malloc.h>
 #include <sys/kmem.h>
 
+#ifdef KASAN
+#include <sys/asan.h>
+#endif
+
 /*
  * Built-in malloc types.  Note: ought to be removed.
  */
@@ -100,10 +106,17 @@ void *
 kern_malloc(unsigned long size, int flags)
 {
 	const int kmflags = (flags & M_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
+#ifdef KASAN
+	size_t origsize = size;
+#endif
 	size_t allocsize, hdroffset;
 	struct malloc_header *mh;
 	void *p;
 
+#ifdef KASAN
+	kasan_add_redzone(&size);
+#endif
+
 	if (size >= PAGE_SIZE) {
 		if (size > (ULONG_MAX-PAGE_SIZE))
 			allocsize = ULONG_MAX;	/* this will fail later */
@@ -126,6 +139,10 @@ kern_malloc(unsigned long size, int flag
 	mh->mh_size = allocsize - hdroffset;
 	mh++;
 
+#ifdef KASAN
+	kasan_alloc(mh, origsize, size);
+#endif
+
 	return mh;
 }
 
@@ -137,6 +154,10 @@ kern_free(void *addr)
 	mh = addr;
 	mh--;
 
+#ifdef KASAN
+	kasan_free(addr, mh->mh_size);
+#endif
+
 	if (mh->mh_size >= PAGE_SIZE + sizeof(struct malloc_header))
 		kmem_intr_free((char *)addr - PAGE_SIZE,
 		    mh->mh_size + PAGE_SIZE - sizeof(struct malloc_header));

Index: src/sys/kern/subr_kmem.c
diff -u src/sys/kern/subr_kmem.c:1.68 src/sys/kern/subr_kmem.c:1.69
--- src/sys/kern/subr_kmem.c:1.68	Mon Aug 20 11:46:44 2018
+++ src/sys/kern/subr_kmem.c	Mon Aug 20 15:04:52 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: subr_kmem.c,v 1.68 2018/08/20 11:46:44 maxv Exp $	*/
+/*	$NetBSD: subr_kmem.c,v 1.69 2018/08/20 15:04:52 maxv Exp $	*/
 
 /*-
  * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
@@ -92,10 +92,11 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.68 2018/08/20 11:46:44 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.69 2018/08/20 15:04:52 maxv Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_kmem.h"
+#include "opt_kasan.h"
 #endif
 
 #include <sys/param.h>
@@ -106,6 +107,10 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,
 #include <sys/lockdebug.h>
 #include <sys/cpu.h>
 
+#ifdef KASAN
+#include <sys/asan.h>
+#endif
+
 #include <uvm/uvm_extern.h>
 #include <uvm/uvm_map.h>
 
@@ -222,6 +227,9 @@ CTASSERT(KM_NOSLEEP == PR_NOWAIT);
 void *
 kmem_intr_alloc(size_t requested_size, km_flag_t kmflags)
 {
+#ifdef KASAN
+	size_t origsize = requested_size;
+#endif
 	size_t allocsz, index;
 	size_t size;
 	pool_cache_t pc;
@@ -239,6 +247,10 @@ kmem_intr_alloc(size_t requested_size, k
 	}
 #endif
 
+#ifdef KASAN
+	kasan_add_redzone(&requested_size);
+#endif
+
 	size = kmem_roundup_size(requested_size);
 	allocsz = size + SIZE_SIZE;
 
@@ -266,7 +278,9 @@ kmem_intr_alloc(size_t requested_size, k
 		FREECHECK_OUT(&kmem_freecheck, p);
 		kmem_size_set(p, requested_size);
 		p += SIZE_SIZE;
-
+#ifdef KASAN
+		kasan_alloc(p, origsize, size);
+#endif
 		return p;
 	}
 	return p;
@@ -309,9 +323,17 @@ kmem_intr_free(void *p, size_t requested
 	}
 #endif
 
+#ifdef KASAN
+	kasan_add_redzone(&requested_size);
+#endif
+
 	size = kmem_roundup_size(requested_size);
 	allocsz = size + SIZE_SIZE;
 
+#ifdef KASAN
+	kasan_free(p, size);
+#endif
+
 	if ((index = ((allocsz -1) >> KMEM_SHIFT))
 	    < kmem_cache_maxidx) {
 		pc = kmem_cache[index];

Index: src/sys/lib/libkern/libkern.h
diff -u src/sys/lib/libkern/libkern.h:1.127 src/sys/lib/libkern/libkern.h:1.128
--- src/sys/lib/libkern/libkern.h:1.127	Sun Jul  8 17:54:42 2018
+++ src/sys/lib/libkern/libkern.h	Mon Aug 20 15:04:52 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: libkern.h,v 1.127 2018/07/08 17:54:42 christos Exp $	*/
+/*	$NetBSD: libkern.h,v 1.128 2018/08/20 15:04:52 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1992, 1993
@@ -36,6 +36,7 @@
 
 #ifdef _KERNEL_OPT
 #include "opt_diagnostic.h"
+#include "opt_kasan.h"
 #endif
 
 #include <sys/types.h>
@@ -368,11 +369,18 @@ int	 memcmp(const void *, const void *, 
 void	*memset(void *, int, size_t);
 void	*memmem(const void *, size_t, const void *, size_t);
 #if __GNUC_PREREQ__(2, 95) && !defined(_STANDALONE)
+#if defined(_KERNEL) && defined(KASAN)
+void	*kasan_memset(void *, int, size_t);
+int	kasan_memcmp(const void *, const void *, size_t);
+void	*kasan_memcpy(void *, const void *, size_t);
+#define	memcpy(d, s, l)		kasan_memcpy(d, s, l)
+#define	memcmp(a, b, l)		kasan_memcmp(a, b, l)
+#define	memset(d, v, l)		kasan_memset(d, v, l)
+#else
 #define	memcpy(d, s, l)		__builtin_memcpy(d, s, l)
 #define	memcmp(a, b, l)		__builtin_memcmp(a, b, l)
-#endif
-#if __GNUC_PREREQ__(2, 95) && !defined(_STANDALONE)
 #define	memset(d, v, l)		__builtin_memset(d, v, l)
+#endif /* _KERNEL && KASAN */
 #endif
 
 char	*strcpy(char *, const char *);

Index: src/sys/sys/Makefile
diff -u src/sys/sys/Makefile:1.166 src/sys/sys/Makefile:1.167
--- src/sys/sys/Makefile:1.166	Thu Jul 12 10:46:48 2018
+++ src/sys/sys/Makefile	Mon Aug 20 15:04:52 2018
@@ -1,10 +1,11 @@
-#	$NetBSD: Makefile,v 1.166 2018/07/12 10:46:48 maxv Exp $
+#	$NetBSD: Makefile,v 1.167 2018/08/20 15:04:52 maxv Exp $
 
 .include <bsd.own.mk>
 
 INCSDIR= /usr/include/sys
 
-INCS=	acct.h agpio.h aio.h ansi.h aout_mids.h ataio.h atomic.h audioio.h \
+INCS=	acct.h agpio.h aio.h ansi.h aout_mids.h asan.h ataio.h atomic.h \
+	audioio.h \
 	bitops.h bootblock.h bswap.h buf.h \
 	callback.h callout.h cdbr.h cdefs.h cdefs_aout.h \
 	cdefs_elf.h cdio.h chio.h clock.h clockctl.h \

Added files:

Index: src/sys/arch/amd64/amd64/asan.c
diff -u /dev/null src/sys/arch/amd64/amd64/asan.c:1.1
--- /dev/null	Mon Aug 20 15:04:52 2018
+++ src/sys/arch/amd64/amd64/asan.c	Mon Aug 20 15:04:51 2018
@@ -0,0 +1,591 @@
+/*	$NetBSD: asan.c,v 1.1 2018/08/20 15:04:51 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard, and Siddharth Muralee.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: asan.c,v 1.1 2018/08/20 15:04:51 maxv Exp $");
+
+#include <sys/param.h>
+#include <sys/device.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/asan.h>
+
+#include <uvm/uvm.h>
+#include <amd64/pmap.h>
+#include <amd64/vmparam.h>
+
+#define VIRTUAL_SHIFT		47	/* 48bit address space, cut half */
+#define CANONICAL_BASE		0xFFFF800000000000
+
+#define KASAN_SHADOW_SCALE_SHIFT	3
+#define KASAN_SHADOW_SCALE_SIZE		(1UL << KASAN_SHADOW_SCALE_SHIFT)
+#define KASAN_SHADOW_MASK		(KASAN_SHADOW_SCALE_SIZE - 1)
+
+#define KASAN_SHADOW_SIZE	(1ULL << (VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
+#define KASAN_SHADOW_START	(VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4)))
+#define KASAN_SHADOW_END	(KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+#define __RET_ADDR	(unsigned long)__builtin_return_address(0)
+
+void kasan_shadow_map(void *, size_t);
+void kasan_init(void);
+
+static bool kasan_enabled __read_mostly = false;
+
+static inline int8_t *kasan_addr_to_shad(const void *addr)
+{
+	vaddr_t va = (vaddr_t)addr;
+	return (int8_t *)(KASAN_SHADOW_START +
+	    ((va - CANONICAL_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
+}
+
+static __always_inline bool
+kasan_unsupported(vaddr_t addr)
+{
+	return (addr >= (vaddr_t)PTE_BASE &&
+	    addr < ((vaddr_t)PTE_BASE + NBPD_L4));
+}
+
+static void
+kasan_shadow_map_page(vaddr_t va)
+{
+	paddr_t pa;
+
+	if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
+		pa = pmap_get_physpage();
+		L4_BASE[pl4_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V;
+	}
+	if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
+		pa = pmap_get_physpage();
+		L3_BASE[pl3_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V;
+	}
+	if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
+		pa = pmap_get_physpage();
+		L2_BASE[pl2_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V;
+	}
+	if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
+		pa = pmap_get_physpage();
+		L1_BASE[pl1_i(va)] = pa | PG_KW | pmap_pg_g | pmap_pg_nx | PG_V;
+	}
+}
+
+/*
+ * Allocate the necessary stuff in the shadow, so that we can monitor the
+ * passed area.
+ */
+void
+kasan_shadow_map(void *addr, size_t size)
+{
+	size_t sz, npages, i;
+	vaddr_t va;
+
+	va = (vaddr_t)kasan_addr_to_shad(addr);
+	sz = roundup(size, KASAN_SHADOW_SCALE_SIZE) / KASAN_SHADOW_SCALE_SIZE;
+	va = rounddown(va, PAGE_SIZE);
+	npages = roundup(sz, PAGE_SIZE) / PAGE_SIZE;
+
+	KASSERT(va >= KASAN_SHADOW_START && va < KASAN_SHADOW_END);
+
+	for (i = 0; i < npages; i++) {
+		kasan_shadow_map_page(va + i * PAGE_SIZE);
+	}
+}
+
+/* -------------------------------------------------------------------------- */
+
+#ifdef __HAVE_PCPU_AREA
+#error "PCPU area not allowed with KASAN"
+#endif
+#ifdef __HAVE_DIRECT_MAP
+#error "DMAP not allowed with KASAN"
+#endif
+
+static void
+kasan_ctors(void)
+{
+	extern uint64_t __CTOR_LIST__, __CTOR_END__;
+	size_t nentries, i;
+	uint64_t *ptr;
+
+	nentries = ((size_t)&__CTOR_END__ - (size_t)&__CTOR_LIST__) /
+	    sizeof(uintptr_t);
+
+	ptr = &__CTOR_LIST__;
+	for (i = 0; i < nentries; i++) {
+		void (*func)(void);
+
+		func = (void *)(*ptr);
+		(*func)();
+
+		ptr++;
+	}
+}
+
+/*
+ * Create the shadow mapping. We don't create the 'User' area, because we
+ * exclude it from the monitoring. The 'Main' area is created dynamically
+ * in pmap_growkernel.
+ */
+void
+kasan_init(void)
+{
+	extern struct bootspace bootspace;
+	size_t i;
+
+	CTASSERT((KASAN_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN);
+
+	/* Kernel. */
+	for (i = 0; i < BTSPACE_NSEGS; i++) {
+		if (bootspace.segs[i].type == BTSEG_NONE) {
+			continue;
+		}
+		kasan_shadow_map((void *)bootspace.segs[i].va,
+		    bootspace.segs[i].sz);
+	}
+
+	/* Boot region. */
+	kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
+
+	/* Module map. */
+	kasan_shadow_map((void *)bootspace.smodule,
+	    (size_t)(bootspace.emodule - bootspace.smodule));
+
+	/* The bootstrap spare va. */
+	kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
+
+	kasan_enabled = true;
+
+	/* Call the ASAN constructors. */
+	kasan_ctors();
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+kasan_report(unsigned long addr, size_t size, bool write, unsigned long rip)
+{
+	printf("kASan: Unauthorized Access In %p: Addr %p [%zu byte%s, %s]\n",
+	    (void *)rip, (void *)addr, size, (size > 1 ? "s" : ""),
+	    (write ? "write" : "read"));
+}
+
+/* -------------------------------------------------------------------------- */
+
+/* Our redzone values. */
+#define KASAN_GLOBAL_REDZONE	0xFA
+#define KASAN_MEMORY_REDZONE	0xFB
+
+/* Stack redzone shadow values. Part of the compiler ABI. */
+#define KASAN_STACK_LEFT	0xF1
+#define KASAN_STACK_MID		0xF2
+#define KASAN_STACK_RIGHT	0xF3
+#define KASAN_STACK_PARTIAL	0xF4
+#define KASAN_USE_AFTER_SCOPE	0xF8
+
+static void
+kasan_shadow_fill(const void *addr, size_t size, uint8_t val)
+{
+	void *shad;
+
+	if (__predict_false(!kasan_enabled))
+		return;
+	if (__predict_false(size == 0))
+		return;
+	if (__predict_false(kasan_unsupported((vaddr_t)addr)))
+		return;
+
+	KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
+	KASSERT(size % KASAN_SHADOW_SCALE_SIZE == 0);
+
+	shad = (void *)kasan_addr_to_shad(addr);
+	size = size >> KASAN_SHADOW_SCALE_SHIFT;
+
+	__builtin_memset(shad, val, size);
+}
+
+static __always_inline void
+kasan_shadow_1byte_markvalid(unsigned long addr)
+{
+	int8_t *byte = kasan_addr_to_shad((void *)addr);
+	int8_t last = (addr & KASAN_SHADOW_MASK) + 1;
+
+	*byte = last;
+}
+
+void
+kasan_add_redzone(size_t *size)
+{
+	*size = roundup(*size, KASAN_SHADOW_SCALE_SIZE);
+	*size += KASAN_SHADOW_SCALE_SIZE;
+}
+
+static void
+kasan_markmem(const void *addr, size_t size, bool valid)
+{
+	size_t i;
+
+	KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
+
+	if (valid) {
+		for (i = 0; i < size; i++) {
+			kasan_shadow_1byte_markvalid((unsigned long)addr+i);
+		}
+	} else {
+		KASSERT(size % KASAN_SHADOW_SCALE_SIZE == 0);
+		kasan_shadow_fill(addr, size, KASAN_MEMORY_REDZONE);
+	}
+}
+
+void
+kasan_alloc(const void *addr, size_t size, size_t sz_with_redz)
+{
+	kasan_markmem(addr, sz_with_redz, false);
+	kasan_markmem(addr, size, true);
+}
+
+void
+kasan_free(const void *addr, size_t sz_with_redz)
+{
+	kasan_markmem(addr, sz_with_redz, true);
+}
+
+/* -------------------------------------------------------------------------- */
+
+#define ADDR_CROSSES_SCALE_BOUNDARY(addr, size) 		\
+	(addr >> KASAN_SHADOW_SCALE_SHIFT) !=			\
+	    ((addr + size - 1) >> KASAN_SHADOW_SCALE_SHIFT)
+
+static __always_inline bool
+kasan_shadow_1byte_isvalid(unsigned long addr)
+{
+	int8_t *byte = kasan_addr_to_shad((void *)addr);
+	int8_t last = (addr & KASAN_SHADOW_MASK) + 1;
+
+	return __predict_true(*byte == 0 || last <= *byte);
+}
+
+static __always_inline bool
+kasan_shadow_2byte_isvalid(unsigned long addr)
+{
+	int8_t *byte, last;
+
+	if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 2)) {
+		return (kasan_shadow_1byte_isvalid(addr) &&
+		    kasan_shadow_1byte_isvalid(addr+1));
+	}
+
+	byte = kasan_addr_to_shad((void *)addr);
+	last = ((addr + 1) & KASAN_SHADOW_MASK) + 1;
+
+	return __predict_true(*byte == 0 || last <= *byte);
+}
+
+static __always_inline bool
+kasan_shadow_4byte_isvalid(unsigned long addr)
+{
+	int8_t *byte, last;
+
+	if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 4)) {
+		return (kasan_shadow_2byte_isvalid(addr) &&
+		    kasan_shadow_2byte_isvalid(addr+2));
+	}
+
+	byte = kasan_addr_to_shad((void *)addr);
+	last = ((addr + 3) & KASAN_SHADOW_MASK) + 1;
+
+	return __predict_true(*byte == 0 || last <= *byte);
+}
+
+static __always_inline bool
+kasan_shadow_8byte_isvalid(unsigned long addr)
+{
+	int8_t *byte, last;
+
+	if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 8)) {
+		return (kasan_shadow_4byte_isvalid(addr) &&
+		    kasan_shadow_4byte_isvalid(addr+4));
+	}
+
+	byte = kasan_addr_to_shad((void *)addr);
+	last = ((addr + 7) & KASAN_SHADOW_MASK) + 1;
+
+	return __predict_true(*byte == 0 || last <= *byte);
+}
+
+static __always_inline bool
+kasan_shadow_Nbyte_isvalid(unsigned long addr, size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; i++) {
+		if (!kasan_shadow_1byte_isvalid(addr+i))
+			return false;
+	}
+
+	return true;
+}
+
+static __always_inline void
+kasan_shadow_check(unsigned long addr, size_t size, bool write,
+    unsigned long retaddr)
+{
+	bool valid;
+
+	if (__predict_false(!kasan_enabled))
+		return;
+	if (__predict_false(size == 0))
+		return;
+	if (__predict_false(kasan_unsupported(addr)))
+		return;
+
+	if (__builtin_constant_p(size)) {
+		switch (size) {
+		case 1:
+			valid = kasan_shadow_1byte_isvalid(addr);
+			break;
+		case 2:
+			valid = kasan_shadow_2byte_isvalid(addr);
+			break;
+		case 4:
+			valid = kasan_shadow_4byte_isvalid(addr);
+			break;
+		case 8:
+			valid = kasan_shadow_8byte_isvalid(addr);
+			break;
+		default:
+			valid = kasan_shadow_Nbyte_isvalid(addr, size);
+			break;
+		}
+	} else {
+		valid = kasan_shadow_Nbyte_isvalid(addr, size);
+	}
+
+	if (__predict_false(!valid)) {
+		kasan_report(addr, size, write, retaddr);
+	}
+}
+
+/* -------------------------------------------------------------------------- */
+
+void *
+kasan_memcpy(void *dst, const void *src, size_t len)
+{
+	kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR);
+	kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR);
+	return __builtin_memcpy(dst, src, len);
+}
+
+int
+kasan_memcmp(const void *b1, const void *b2, size_t len)
+{
+	kasan_shadow_check((unsigned long)b1, len, false, __RET_ADDR);
+	kasan_shadow_check((unsigned long)b2, len, false, __RET_ADDR);
+	return __builtin_memcmp(b1, b2, len);
+}
+
+void *
+kasan_memset(void *b, int c, size_t len)
+{
+	kasan_shadow_check((unsigned long)b, len, true, __RET_ADDR);
+	return __builtin_memset(b, c, len);
+}
+
+/* -------------------------------------------------------------------------- */
+
+#if defined(__clang__) && (__clang_major__ - 0 >= 6)
+#define ASAN_ABI_VERSION	8
+#elif __GNUC_PREREQ__(7, 1) && !defined(__clang__)
+#define ASAN_ABI_VERSION	8
+#elif __GNUC_PREREQ__(6, 1) && !defined(__clang__)
+#define ASAN_ABI_VERSION	6
+#else
+#error "Unsupported compiler version"
+#endif
+
+/*
+ * Part of the compiler ABI.
+ */
+struct __asan_global_source_location {
+	const char *filename;
+	int line_no;
+	int column_no;
+};
+struct __asan_global {
+	const void *beg;		/* address of the global variable */
+	size_t size;			/* size of the global variable */
+	size_t size_with_redzone;	/* size with the redzone */
+	const void *name;		/* name of the variable */
+	const void *module_name;	/* name of the module where the var is declared */
+	unsigned long has_dynamic_init;	/* the var has dyn initializer (c++) */
+	struct __asan_global_source_location *location;
+#if ASAN_ABI_VERSION >= 7
+	uintptr_t odr_indicator;	/* the address of the ODR indicator symbol */
+#endif
+};
+
+void __asan_register_globals(struct __asan_global *, size_t);
+void __asan_unregister_globals(struct __asan_global *, size_t);
+
+static void
+kasan_register_global(struct __asan_global *global)
+{
+	size_t aligned_size = roundup(global->size, KASAN_SHADOW_SCALE_SIZE);
+
+	/* Poison the redzone following the var. */
+	kasan_shadow_fill((void *)((uintptr_t)global->beg + aligned_size),
+	    global->size_with_redzone - aligned_size, KASAN_GLOBAL_REDZONE);
+}
+
+void
+__asan_register_globals(struct __asan_global *globals, size_t size)
+{
+	size_t i;
+	for (i = 0; i < size; i++) {
+		kasan_register_global(&globals[i]);
+	}
+}
+
+void
+__asan_unregister_globals(struct __asan_global *globals, size_t size)
+{
+}
+
+#define ASAN_LOAD_STORE(size)					\
+	void __asan_load##size(unsigned long);			\
+	void __asan_load##size(unsigned long addr)		\
+	{							\
+		kasan_shadow_check(addr, size, false, __RET_ADDR);\
+	} 							\
+	void __asan_load##size##_noabort(unsigned long);	\
+	void __asan_load##size##_noabort(unsigned long addr)	\
+	{							\
+		kasan_shadow_check(addr, size, false, __RET_ADDR);\
+	}							\
+	void __asan_store##size(unsigned long);			\
+	void __asan_store##size(unsigned long addr)		\
+	{							\
+		kasan_shadow_check(addr, size, true, __RET_ADDR);\
+	}							\
+	void __asan_store##size##_noabort(unsigned long);	\
+	void __asan_store##size##_noabort(unsigned long addr)	\
+	{							\
+		kasan_shadow_check(addr, size, true, __RET_ADDR);\
+	}
+
+ASAN_LOAD_STORE(1);
+ASAN_LOAD_STORE(2);
+ASAN_LOAD_STORE(4);
+ASAN_LOAD_STORE(8);
+ASAN_LOAD_STORE(16);
+
+void __asan_loadN(unsigned long, size_t);
+void __asan_loadN_noabort(unsigned long, size_t);
+void __asan_storeN(unsigned long, size_t);
+void __asan_storeN_noabort(unsigned long, size_t);
+void __asan_handle_no_return(void);
+void __asan_poison_stack_memory(const void *, size_t);
+void __asan_unpoison_stack_memory(const void *, size_t);
+void __asan_alloca_poison(unsigned long, size_t);
+void __asan_allocas_unpoison(const void *, const void *);
+
+void
+__asan_loadN(unsigned long addr, size_t size)
+{
+	kasan_shadow_check(addr, size, false, __RET_ADDR);
+}
+
+void
+__asan_loadN_noabort(unsigned long addr, size_t size)
+{
+	kasan_shadow_check(addr, size, false, __RET_ADDR);
+}
+
+void
+__asan_storeN(unsigned long addr, size_t size)
+{
+	kasan_shadow_check(addr, size, true, __RET_ADDR);
+}
+
+void
+__asan_storeN_noabort(unsigned long addr, size_t size)
+{
+	kasan_shadow_check(addr, size, true, __RET_ADDR);
+}
+
+void
+__asan_handle_no_return(void)
+{
+	/* nothing */
+}
+
+void
+__asan_poison_stack_memory(const void *addr, size_t size)
+{
+	KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
+	kasan_shadow_fill(addr, size, KASAN_USE_AFTER_SCOPE);
+}
+
+void
+__asan_unpoison_stack_memory(const void *addr, size_t size)
+{
+	KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
+	kasan_shadow_fill(addr, size, 0);
+}
+
+void
+__asan_alloca_poison(unsigned long addr, size_t size)
+{
+	panic("%s: impossible!", __func__);
+}
+
+void
+__asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
+{
+	panic("%s: impossible!", __func__);
+}
+
+#define ASAN_SET_SHADOW(byte) \
+	void __asan_set_shadow_##byte(void *, size_t);			\
+	void __asan_set_shadow_##byte(void *addr, size_t size)		\
+	{								\
+		__builtin_memset((void *)addr, 0x##byte, size);		\
+	}
+
+ASAN_SET_SHADOW(00);
+ASAN_SET_SHADOW(f1);
+ASAN_SET_SHADOW(f2);
+ASAN_SET_SHADOW(f3);
+ASAN_SET_SHADOW(f5);
+ASAN_SET_SHADOW(f8);

Index: src/sys/sys/asan.h
diff -u /dev/null src/sys/sys/asan.h:1.1
--- /dev/null	Mon Aug 20 15:04:52 2018
+++ src/sys/sys/asan.h	Mon Aug 20 15:04:52 2018
@@ -0,0 +1,41 @@
+/*	$NetBSD: asan.h,v 1.1 2018/08/20 15:04:52 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SYS_ASAN_H_
+#define _SYS_ASAN_H_
+
+#include <sys/types.h>
+
+void kasan_add_redzone(size_t *);
+void kasan_alloc(const void *, size_t, size_t);
+void kasan_free(const void *, size_t);
+
+#endif /* !_SYS_ASAN_H_ */

Reply via email to