Module Name:    src
Committed By:   kamil
Date:           Sun Mar 10 12:54:39 UTC 2019

Modified Files:
        src/share/man/man4: kcov.4
        src/sys/kern: subr_kcov.c
        src/tests/modules: t_kcov.c

Log Message:
Add support for multiple threads in kcov(4)

Reuse the fd_clone() API to associate kcov descriptors (KD) with a file
descriptor. Each fd (/dev/kcov) can be reused for a single LWP.

Add new ATF regression tests and cleanup existing code there. All tests
pass.

Refresh the kcov(4) man page documentation.

Developed with help from <maxv>.


To generate a diff of this commit:
cvs rdiff -u -r1.2 -r1.3 src/share/man/man4/kcov.4
cvs rdiff -u -r1.3 -r1.4 src/sys/kern/subr_kcov.c
cvs rdiff -u -r1.4 -r1.5 src/tests/modules/t_kcov.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/share/man/man4/kcov.4
diff -u src/share/man/man4/kcov.4:1.2 src/share/man/man4/kcov.4:1.3
--- src/share/man/man4/kcov.4:1.2	Sat Feb 23 17:33:01 2019
+++ src/share/man/man4/kcov.4	Sun Mar 10 12:54:39 2019
@@ -1,4 +1,4 @@
-.\"	$NetBSD: kcov.4,v 1.2 2019/02/23 17:33:01 wiz Exp $
+.\"	$NetBSD: kcov.4,v 1.3 2019/03/10 12:54:39 kamil Exp $
 .\"
 .\" Copyright (c) 2018 Anton Lindqvist <an...@openbsd.org>
 .\"
@@ -14,7 +14,7 @@
 .\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 .\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 .\"
-.Dd November 16, 2018
+.Dd March 10, 2019
 .Dt KCOV 4
 .Os
 .Sh NAME
@@ -28,12 +28,35 @@
 The
 .Nm
 driver implements collection of code coverage inside the kernel.
-It can be enabled on a per process basis from userland,
+It can be enabled on a per thread basis from userland,
 allowing the kernel program counter to be collected during syscalls triggered by
-the same process.
+the same thread.
+.Pp
+The
+.Nm
+descriptors (KD) are allocated during
+.Xr open 2 ,
+and are associated with a file descriptor.
+A thread can enable the
+.Nm
+device.
+When this happens,
+this thread becomes the owner of the
+.Nm
+descriptors (KD),
+and no thread can disable this KD except the owner.
+.Pp
+A
+.Nm
+descriptor (KD)
+is freed when its file descriptor is closed iff the KD is not active on a thread.
+If it is,
+we ask the thread to free it when it exits.
+.Pp
 The collected coverage can be accessed by mapping the device
 using
 .Xr mmap 2 .
+The buffers are mapped without risk that the kernel frees a buffer still mapped in a process.
 .Pp
 By default,
 .Nm
@@ -94,7 +117,7 @@ int
 main(void)
 {
 	kcov_int_t *cover, i, n;
-	kcov_int_t size = 1024 * 100;
+	uint64_t size = 1024 * 100;
 	int fd;
 
 	fd = open("/dev/kcov", O_RDWR);
@@ -108,9 +131,9 @@ main(void)
 		err(1, "mmap");
 	if (ioctl(fd, KCOV_IOC_ENABLE) == -1)
 		err(1, "ioctl: KCOV_IOC_ENABLE");
-	__atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
+	KCOV_STORE(cover[0], 0);
 	read(-1, NULL, 0); /* syscall paths to be traced */
-	n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
+	n = KCOV_LOAD(cover[0]);
 	if (ioctl(fd, KCOV_IOC_DISABLE) == -1)
 		err(1, "ioctl: KCOV_IOC_DISABLE");
 	for (i = 0; i < cover[0]; i++)

Index: src/sys/kern/subr_kcov.c
diff -u src/sys/kern/subr_kcov.c:1.3 src/sys/kern/subr_kcov.c:1.4
--- src/sys/kern/subr_kcov.c:1.3	Sat Feb 23 12:07:40 2019
+++ src/sys/kern/subr_kcov.c	Sun Mar 10 12:54:39 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: subr_kcov.c,v 1.3 2019/02/23 12:07:40 kamil Exp $	*/
+/*	$NetBSD: subr_kcov.c,v 1.4 2019/03/10 12:54:39 kamil Exp $	*/
 
 /*
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -38,7 +38,10 @@
 
 #include <sys/conf.h>
 #include <sys/condvar.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
 #include <sys/kmem.h>
+#include <sys/mman.h>
 #include <sys/mutex.h>
 #include <sys/queue.h>
 
@@ -47,28 +50,67 @@
 
 #define KCOV_BUF_MAX_ENTRIES	(256 << 10)
 
+static dev_type_open(kcov_open);
+
+const struct cdevsw kcov_cdevsw = {
+	.d_open = kcov_open,
+	.d_close = noclose,
+	.d_read = noread,
+	.d_write = nowrite,
+	.d_ioctl = noioctl,
+	.d_stop = nostop,
+	.d_tty = notty,
+	.d_poll = nopoll,
+	.d_mmap = nommap,
+	.d_kqfilter = nokqfilter,
+	.d_discard = nodiscard,
+	.d_flag = D_OTHER | D_MPSAFE
+};
+
+static int kcov_fops_ioctl(file_t *, u_long, void *);
+static int kcov_fops_close(file_t *);
+static int kcov_fops_mmap(file_t *, off_t *, size_t, int, int *, int *,
+    struct uvm_object **, int *);
+
+const struct fileops kcov_fileops = {
+	.fo_read = fbadop_read,
+	.fo_write = fbadop_write,
+	.fo_ioctl = kcov_fops_ioctl,
+	.fo_fcntl = fnullop_fcntl,
+	.fo_poll = fnullop_poll,
+	.fo_stat = fbadop_stat,
+	.fo_close = kcov_fops_close,
+	.fo_kqfilter = fnullop_kqfilter,
+	.fo_restart = fnullop_restart,
+	.fo_mmap = kcov_fops_mmap,
+};
+
 /*
- * The KCOV descriptors are allocated during open(), and are associated with
- * the calling proc. They are freed lazily when their refcount reaches zero,
- * only when the process exits; this guarantees that kd->buf is not mmapped
- * in a currently running LWP. A KCOV descriptor is active on only one LWP
- * at the same time within the proc.
+ * The KCOV descriptors (KD) are allocated during open(), and are associated
+ * with a file descriptor.
+ *
+ * An LWP can 'enable' a KD. When this happens, this LWP becomes the owner of
+ * the KD, and no LWP can 'disable' this KD except the owner.
  *
- * In the refcount, one ref is for the proc, and one ref is for the LWP where
- * the descriptor is active. In each case, the descriptor is pointed to in
- * the proc's and LWP's specificdata.
+ * A KD is freed when its file descriptor is closed _iff_ the KD is not active
+ * on an LWP. If it is, we ask the LWP to free it when it exits.
+ *
+ * The buffers mmapped are in a dedicated uobj, therefore there is no risk
+ * that the kernel frees a buffer still mmapped in a process: the uobj
+ * refcount will be non-zero, so the backing is not freed until an munmap
+ * occurs on said process.
  */
 
 typedef struct kcov_desc {
 	kmutex_t lock;
-	int refcnt;
 	kcov_int_t *buf;
+	struct uvm_object *uobj;
 	size_t bufnent;
 	size_t bufsize;
-	TAILQ_ENTRY(kcov_desc) entry;
+	bool enabled;
+	bool lwpfree;
 } kcov_t;
 
-static specificdata_key_t kcov_proc_key;
 static specificdata_key_t kcov_lwp_key;
 
 static void
@@ -76,7 +118,6 @@ kcov_lock(kcov_t *kd)
 {
 
 	mutex_enter(&kd->lock);
-	KASSERT(kd->refcnt > 0);
 }
 
 static void
@@ -87,60 +128,38 @@ kcov_unlock(kcov_t *kd)
 }
 
 static void
-kcov_lwp_take(kcov_t *kd)
-{
-
-	kd->refcnt++;
-	KASSERT(kd->refcnt == 2);
-	lwp_setspecific(kcov_lwp_key, kd);
-}
-
-static void
-kcov_lwp_release(kcov_t *kd)
-{
-
-	KASSERT(kd->refcnt == 2);
-	kd->refcnt--;
-	lwp_setspecific(kcov_lwp_key, NULL);
-}
-
-static inline bool
-kcov_is_owned(kcov_t *kd)
+kcov_free(kcov_t *kd)
 {
 
-	return (kd->refcnt > 1);
+	KASSERT(kd != NULL);
+	if (kd->buf != NULL) {
+		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, kd->bufsize);
+	}
+	mutex_destroy(&kd->lock);
+	kmem_free(kd, sizeof(*kd));
 }
 
 static void
-kcov_free(void *arg)
+kcov_lwp_free(void *arg)
 {
 	kcov_t *kd = (kcov_t *)arg;
-	bool dofree;
 
 	if (kd == NULL) {
 		return;
 	}
-
 	kcov_lock(kd);
-	kd->refcnt--;
+	kd->enabled = false;
 	kcov_unlock(kd);
-	dofree = (kd->refcnt == 0);
-
-	if (!dofree) {
-		return;
-	}
-	if (kd->buf != NULL) {
-		uvm_km_free(kernel_map, (vaddr_t)kd->buf, kd->bufsize,
-		    UVM_KMF_WIRED);
+	if (kd->lwpfree) {
+		kcov_free(kd);
 	}
-	mutex_destroy(&kd->lock);
-	kmem_free(kd, sizeof(*kd));
 }
 
 static int
 kcov_allocbuf(kcov_t *kd, uint64_t nent)
 {
 	size_t size;
+	int error;
 
 	if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES)
 		return EINVAL;
@@ -148,13 +167,25 @@ kcov_allocbuf(kcov_t *kd, uint64_t nent)
 		return EEXIST;
 
 	size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE);
-	kd->buf = (kcov_int_t *)uvm_km_alloc(kernel_map, size, 0,
-	    UVM_KMF_WIRED|UVM_KMF_ZERO);
-	if (kd->buf == NULL)
-		return ENOMEM;
-
 	kd->bufnent = nent - 1;
 	kd->bufsize = size;
+	kd->uobj = uao_create(kd->bufsize, 0);
+
+	/* Map the uobj into the kernel address space, as wired. */
+	kd->buf = NULL;
+	error = uvm_map(kernel_map, (vaddr_t *)&kd->buf, kd->bufsize, kd->uobj,
+	    0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
+	    UVM_ADV_RANDOM, 0));
+	if (error) {
+		uao_detach(kd->uobj);
+		return error;
+	}
+	error = uvm_map_pageable(kernel_map, (vaddr_t)kd->buf,
+	    (vaddr_t)kd->buf + size, false, 0);
+	if (error) {
+		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, size);
+		return error;
+	}
 
 	return 0;
 }
@@ -164,50 +195,63 @@ kcov_allocbuf(kcov_t *kd, uint64_t nent)
 static int
 kcov_open(dev_t dev, int flag, int mode, struct lwp *l)
 {
-	struct proc *p = l->l_proc;
+	struct file *fp;
+	int error, fd;
 	kcov_t *kd;
 
-	kd = proc_getspecific(p, kcov_proc_key);
-	if (kd != NULL)
-		return EBUSY;
+	error = fd_allocfile(&fp, &fd);
+	if (error)
+		return error;
 
 	kd = kmem_zalloc(sizeof(*kd), KM_SLEEP);
 	mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
-	kd->refcnt = 1;
-	proc_setspecific(p, kcov_proc_key, kd);
 
-	return 0;
+	return fd_clone(fp, fd, flag, &kcov_fileops, kd);
 }
 
 static int
-kcov_close(dev_t dev, int flag, int mode, struct lwp *l)
+kcov_fops_close(file_t *fp)
 {
+	kcov_t *kd = fp->f_data;
+
+	kcov_lock(kd);
+	if (kd->enabled) {
+		kd->lwpfree = true;
+		kcov_unlock(kd);
+	} else {
+		kcov_unlock(kd);
+		kcov_free(kd);
+	}
+	fp->f_data = NULL;
 
    	return 0;
 }
 
 static int
-kcov_ioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
+kcov_fops_ioctl(file_t *fp, u_long cmd, void *addr)
 {
-	struct proc *p = l->l_proc;
 	int error = 0;
 	kcov_t *kd;
 
-	kd = proc_getspecific(p, kcov_proc_key);
+	kd = fp->f_data;
 	if (kd == NULL)
 		return ENXIO;
 	kcov_lock(kd);
 
 	switch (cmd) {
 	case KCOV_IOC_SETBUFSIZE:
-		if (kcov_is_owned(kd)) {
+		if (kd->enabled) {
 			error = EBUSY;
 			break;
 		}
 		error = kcov_allocbuf(kd, *((uint64_t *)addr));
 		break;
 	case KCOV_IOC_ENABLE:
-		if (kcov_is_owned(kd)) {
+		if (kd->enabled) {
+			error = EBUSY;
+			break;
+		}
+		if (lwp_getspecific(kcov_lwp_key) != NULL) {
 			error = EBUSY;
 			break;
 		}
@@ -215,16 +259,20 @@ kcov_ioctl(dev_t dev, u_long cmd, void *
 			error = ENOBUFS;
 			break;
 		}
-		KASSERT(l == curlwp);
-		kcov_lwp_take(kd);
+		lwp_setspecific(kcov_lwp_key, kd);
+		kd->enabled = true;
 		break;
 	case KCOV_IOC_DISABLE:
-		if (lwp_getspecific(kcov_lwp_key) == NULL) {
+		if (!kd->enabled) {
 			error = ENOENT;
 			break;
 		}
-		KASSERT(l == curlwp);
-		kcov_lwp_release(kd);
+		if (lwp_getspecific(kcov_lwp_key) != kd) {
+			error = ENOENT;
+			break;
+		}
+		lwp_setspecific(kcov_lwp_key, NULL);
+		kd->enabled = false;
 		break;
 	default:
 		error = EINVAL;
@@ -234,28 +282,42 @@ kcov_ioctl(dev_t dev, u_long cmd, void *
 	return error;
 }
 
-static paddr_t
-kcov_mmap(dev_t dev, off_t offset, int prot)
+static int
+kcov_fops_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
+    int *advicep, struct uvm_object **uobjp, int *maxprotp)
 {
+	off_t off = *offp;
 	kcov_t *kd;
-	paddr_t pa;
-	vaddr_t va;
+	int error = 0;
 
-	kd = proc_getspecific(curproc, kcov_proc_key);
-	KASSERT(kd != NULL);
+	if (prot & PROT_EXEC)
+		return EACCES;
+	if (off < 0)
+		return EINVAL;
+	if (size > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
+		return EINVAL;
+	if (off > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
+		return EINVAL;
 
-	if ((offset < 0) || (offset >= kd->bufnent * KCOV_ENTRY_SIZE)) {
-		return (paddr_t)-1;
-	}
-	if (offset & PAGE_MASK) {
-		return (paddr_t)-1;
-	}
-	va = (vaddr_t)kd->buf + offset;
-	if (!pmap_extract(pmap_kernel(), va, &pa)) {
-		return (paddr_t)-1;
+	kd = fp->f_data;
+	if (kd == NULL)
+		return ENXIO;
+	kcov_lock(kd);
+
+	if ((size + off) > kd->bufsize) {
+		error = ENOMEM;
+		goto out;
 	}
 
-	return atop(pa);
+	uao_reference(kd->uobj);
+
+	*uobjp = kd->uobj;
+	*maxprotp = prot;
+	*advicep = UVM_ADV_RANDOM;
+
+out:
+	kcov_unlock(kd);
+	return error;
 }
 
 static inline bool
@@ -289,38 +351,28 @@ __sanitizer_cov_trace_pc(void)
 		return;
 	}
 
-	idx = kd->buf[0];
+	if (!kd->enabled) {
+		/* Tracing not enabled */
+		return;
+	}
+
+	idx = KCOV_LOAD(kd->buf[0]);
 	if (idx < kd->bufnent) {
-		kd->buf[idx+1] = (intptr_t)__builtin_return_address(0);
-		kd->buf[0]++;
+		KCOV_STORE(kd->buf[idx+1],
+		    (intptr_t)__builtin_return_address(0));
+		KCOV_STORE(kd->buf[0], idx + 1);
 	}
 }
 
 /* -------------------------------------------------------------------------- */
 
-const struct cdevsw kcov_cdevsw = {
-	.d_open = kcov_open,
-	.d_close = kcov_close,
-	.d_read = noread,
-	.d_write = nowrite,
-	.d_ioctl = kcov_ioctl,
-	.d_stop = nostop,
-	.d_tty = notty,
-	.d_poll = nopoll,
-	.d_mmap = kcov_mmap,
-	.d_kqfilter = nokqfilter,
-	.d_discard = nodiscard,
-	.d_flag = D_OTHER | D_MPSAFE
-};
-
 MODULE(MODULE_CLASS_ANY, kcov, NULL);
 
 static void
 kcov_init(void)
 {
 
-	proc_specific_key_create(&kcov_proc_key, kcov_free);
-	lwp_specific_key_create(&kcov_lwp_key, kcov_free);
+	lwp_specific_key_create(&kcov_lwp_key, kcov_lwp_free);
 }
 
 static int

Index: src/tests/modules/t_kcov.c
diff -u src/tests/modules/t_kcov.c:1.4 src/tests/modules/t_kcov.c:1.5
--- src/tests/modules/t_kcov.c:1.4	Mon Feb 25 10:23:01 2019
+++ src/tests/modules/t_kcov.c	Sun Mar 10 12:54:39 2019
@@ -35,6 +35,7 @@
 #include <sys/kcov.h>
 #include <sys/mman.h>
 
+#include <errno.h>
 #include <fcntl.h>
 #include <pthread.h>
 #include <semaphore.h>
@@ -55,11 +56,37 @@ open_kcov(void)
 	return fd;
 }
 
+ATF_TC_WITHOUT_HEAD(kcov_multiopen);
+ATF_TC_BODY(kcov_multiopen, tc)
+{
+	int fd1, fd2;
+	fd1 = open_kcov();
+
+	fd2 = open("/dev/kcov", O_RDWR);
+	ATF_REQUIRE(fd2 != -1);
+
+	close(fd1);
+	close(fd2);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_open_close_open);
+ATF_TC_BODY(kcov_open_close_open, tc)
+{
+	int fd;
+
+	fd = open_kcov();
+	close(fd);
+	fd = open("/dev/kcov", O_RDWR);
+	ATF_REQUIRE(fd != -1);
+
+	close(fd);
+}
+
 ATF_TC_WITHOUT_HEAD(kcov_bufsize);
 ATF_TC_BODY(kcov_bufsize, tc)
 {
 	int fd;
-	kcov_int_t size;
+	uint64_t size;
 	fd = open_kcov();
 
 	size = 0;
@@ -75,7 +102,7 @@ ATF_TC_BODY(kcov_mmap, tc)
 {
 	void *data;
 	int fd;
-	kcov_int_t size = 2 * PAGE_SIZE / KCOV_ENTRY_SIZE;
+	uint64_t size = 2 * PAGE_SIZE / KCOV_ENTRY_SIZE;
 
 	fd = open_kcov();
 
@@ -97,7 +124,7 @@ ATF_TC_WITHOUT_HEAD(kcov_mmap_no_munmap)
 ATF_TC_BODY(kcov_mmap_no_munmap, tc)
 {
 	int fd;
-	kcov_int_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
+	uint64_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
 
 	fd = open_kcov();
 
@@ -113,7 +140,7 @@ ATF_TC_WITHOUT_HEAD(kcov_mmap_no_munmap_
 ATF_TC_BODY(kcov_mmap_no_munmap_no_close, tc)
 {
 	int fd;
-	kcov_int_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
+	uint64_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
 
 	fd = open_kcov();
 
@@ -129,7 +156,7 @@ static void *
 kcov_mmap_enable_thread(void *data)
 {
 	int fd;
-	kcov_int_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
+	uint64_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
 
 	fd = open_kcov();
 	*(int *)data = fd;
@@ -165,7 +192,7 @@ ATF_TC_WITHOUT_HEAD(kcov_enable);
 ATF_TC_BODY(kcov_enable, tc)
 {
 	int fd;
-	kcov_int_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
+	uint64_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
 
 	fd = open_kcov();
 
@@ -195,7 +222,7 @@ ATF_TC_WITHOUT_HEAD(kcov_enable_no_disab
 ATF_TC_BODY(kcov_enable_no_disable, tc)
 {
 	int fd;
-	kcov_int_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
+	uint64_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
 
 	fd = open_kcov();
 	ATF_REQUIRE(ioctl(fd, KCOV_IOC_SETBUFSIZE, &size) ==0);
@@ -207,7 +234,7 @@ ATF_TC_WITHOUT_HEAD(kcov_enable_no_disab
 ATF_TC_BODY(kcov_enable_no_disable_no_close, tc)
 {
 	int fd;
-	kcov_int_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
+	uint64_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
 
 	fd = open_kcov();
 	ATF_REQUIRE(ioctl(fd, KCOV_IOC_SETBUFSIZE, &size) ==0);
@@ -219,7 +246,7 @@ common_head(int *fdp)
 {
 	void *data;
 	int fd;
-	kcov_int_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
+	uint64_t size = PAGE_SIZE / KCOV_ENTRY_SIZE;
 
 	fd = open_kcov();
 
@@ -264,15 +291,68 @@ ATF_TC_BODY(kcov_basic, tc)
 	common_tail(fd, buf);
 }
 
+ATF_TC_WITHOUT_HEAD(kcov_multienable_on_the_same_thread);
+ATF_TC_BODY(kcov_multienable_on_the_same_thread, tc)
+{
+	kcov_int_t *buf1, *buf2;
+	int fd1, fd2;
+
+	buf1 = common_head(&fd1);
+	buf2 = common_head(&fd2);
+	ATF_REQUIRE_MSG(ioctl(fd1, KCOV_IOC_ENABLE) == 0,
+	    "Unable to enable kcov");
+	ATF_REQUIRE_ERRNO(EBUSY, ioctl(fd2, KCOV_IOC_ENABLE) != 0);
+
+	ATF_REQUIRE_MSG(ioctl(fd1, KCOV_IOC_DISABLE) == 0,
+	    "Unable to disable kcov");
+
+	common_tail(fd1, buf1);
+	common_tail(fd2, buf2);
+}
+
 static void *
-thread_test_helper(void *ptr)
+thread_buffer_access_test_helper(void *ptr)
 {
 	kcov_int_t *buf = ptr;
 
-	KCOV_STORE(buf[0], 0);
-	sleep(0);
-	ATF_REQUIRE_MSG(KCOV_LOAD(buf[0]) == 0,
-	    "Records changed in blocked thread");
+	/* Test mapped buffer access from a custom thread */
+	KCOV_STORE(buf[0], KCOV_LOAD(buf[0]));
+
+	return NULL;
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_buffer_access_from_custom_thread);
+ATF_TC_BODY(kcov_buffer_access_from_custom_thread, tc)
+{
+	pthread_t thread;
+	kcov_int_t *buf;
+	int fd;
+
+	buf = common_head(&fd);
+
+	ATF_REQUIRE_MSG(ioctl(fd, KCOV_IOC_ENABLE) == 0,
+	    "Unable to enable kcov ");
+
+	pthread_create(&thread, NULL, thread_buffer_access_test_helper,
+	    __UNVOLATILE(buf));
+	pthread_join(thread, NULL);
+
+	ATF_REQUIRE_MSG(ioctl(fd, KCOV_IOC_DISABLE) == 0,
+	    "Unable to disable kcov");
+
+	common_tail(fd, buf);
+}
+
+static void *
+thread_test_helper(void *ptr)
+{
+	volatile int i;
+
+	/* It does not matter what operation is in action. */
+	for (i = 0; i < 1000; i++) {
+		if (getpid() == 0)
+			break;
+	}
 
 	return NULL;
 }
@@ -283,24 +363,87 @@ ATF_TC_BODY(kcov_thread, tc)
 	pthread_t thread;
 	kcov_int_t *buf;
 	int fd;
+	volatile int i;
 
 	buf = common_head(&fd);
 
 	ATF_REQUIRE_MSG(ioctl(fd, KCOV_IOC_ENABLE) == 0,
 	    "Unable to enable kcov ");
 
+	/* The thread does something, does not matter what exactly. */
 	pthread_create(&thread, NULL, thread_test_helper, __UNVOLATILE(buf));
+
+	KCOV_STORE(buf[0], 0);
+	for (i = 0; i < 10000; i++)
+		continue;
+	ATF_REQUIRE_EQ_MSG(KCOV_LOAD(buf[0]), 0,
+	    "Records changed in blocked thread");
+
 	pthread_join(thread, NULL);
 
+	ATF_REQUIRE_EQ_MSG(ioctl(fd, KCOV_IOC_DISABLE), 0,
+	    "Unable to disable kcov");
+
+	common_tail(fd, buf);
+}
+
+static void *
+multiple_threads_helper(void *ptr __unused)
+{
+	kcov_int_t *buf;
+	int fd;
+
+	buf = common_head(&fd);
+	ATF_REQUIRE_MSG(ioctl(fd, KCOV_IOC_ENABLE) == 0,
+	    "Unable to enable kcov ");
+
+	KCOV_STORE(buf[0], 0);
+
+	sleep(0);
+	ATF_REQUIRE_MSG(KCOV_LOAD(buf[0]) != 0, "No records found");
+
 	ATF_REQUIRE_MSG(ioctl(fd, KCOV_IOC_DISABLE) == 0,
 	    "Unable to disable kcov");
 
 	common_tail(fd, buf);
+
+	return NULL;
 }
 
+static void
+kcov_multiple_threads(size_t N)
+{
+	pthread_t thread[32];
+	size_t i;
+
+	ATF_REQUIRE(__arraycount(thread) >= N);
+
+	for (i = 0; i < __arraycount(thread); i++)
+		pthread_create(&thread[i], NULL, multiple_threads_helper, NULL);
+
+	for (i = 0; i < __arraycount(thread); i++)
+		pthread_join(thread[i], NULL);
+}
+
+#define KCOV_MULTIPLE_THREADS(n)		\
+ATF_TC_WITHOUT_HEAD(kcov_multiple_threads##n);	\
+ATF_TC_BODY(kcov_multiple_threads##n, tc)	\
+{						\
+						\
+	kcov_multiple_threads(n);		\
+}
+
+KCOV_MULTIPLE_THREADS(2)
+KCOV_MULTIPLE_THREADS(4)
+KCOV_MULTIPLE_THREADS(8)
+KCOV_MULTIPLE_THREADS(16)
+KCOV_MULTIPLE_THREADS(32)
+
 ATF_TP_ADD_TCS(tp)
 {
 
+	ATF_TP_ADD_TC(tp, kcov_multiopen);
+	ATF_TP_ADD_TC(tp, kcov_open_close_open);
 	ATF_TP_ADD_TC(tp, kcov_bufsize);
 	ATF_TP_ADD_TC(tp, kcov_mmap);
 	ATF_TP_ADD_TC(tp, kcov_mmap_no_munmap);
@@ -310,6 +453,13 @@ ATF_TP_ADD_TCS(tp)
 	ATF_TP_ADD_TC(tp, kcov_enable_no_disable_no_close);
 	ATF_TP_ADD_TC(tp, kcov_mmap_enable_thread_close);
 	ATF_TP_ADD_TC(tp, kcov_basic);
+	ATF_TP_ADD_TC(tp, kcov_multienable_on_the_same_thread);
+	ATF_TP_ADD_TC(tp, kcov_buffer_access_from_custom_thread);
 	ATF_TP_ADD_TC(tp, kcov_thread);
+	ATF_TP_ADD_TC(tp, kcov_multiple_threads2);
+	ATF_TP_ADD_TC(tp, kcov_multiple_threads4);
+	ATF_TP_ADD_TC(tp, kcov_multiple_threads8);
+	ATF_TP_ADD_TC(tp, kcov_multiple_threads16);
+	ATF_TP_ADD_TC(tp, kcov_multiple_threads32);
 	return atf_no_error();
 }

Reply via email to