Module Name:    src
Committed By:   maxv
Date:           Thu Apr  4 17:33:47 UTC 2019

Modified Files:
        src/lib/libnvmm: libnvmm.3 libnvmm.c libnvmm_x86.c nvmm.h

Log Message:
Check the GPA permissions too in the Assists, because it is possible that
the guest traps on a page the virtualizer marked as read-only (even if it
appears as read-write in the HVA).


To generate a diff of this commit:
cvs rdiff -u -r1.12 -r1.13 src/lib/libnvmm/libnvmm.3
cvs rdiff -u -r1.7 -r1.8 src/lib/libnvmm/libnvmm.c
cvs rdiff -u -r1.27 -r1.28 src/lib/libnvmm/libnvmm_x86.c
cvs rdiff -u -r1.6 -r1.7 src/lib/libnvmm/nvmm.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/lib/libnvmm/libnvmm.3
diff -u src/lib/libnvmm/libnvmm.3:1.12 src/lib/libnvmm/libnvmm.3:1.13
--- src/lib/libnvmm/libnvmm.3:1.12	Thu Mar 21 20:21:40 2019
+++ src/lib/libnvmm/libnvmm.3	Thu Apr  4 17:33:47 2019
@@ -1,4 +1,4 @@
-.\"	$NetBSD: libnvmm.3,v 1.12 2019/03/21 20:21:40 maxv Exp $
+.\"	$NetBSD: libnvmm.3,v 1.13 2019/04/04 17:33:47 maxv Exp $
 .\"
 .\" Copyright (c) 2018, 2019 The NetBSD Foundation, Inc.
 .\" All rights reserved.
@@ -27,7 +27,7 @@
 .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 .\" POSSIBILITY OF SUCH DAMAGE.
 .\"
-.Dd March 19, 2019
+.Dd April 4, 2019
 .Dt LIBNVMM 3
 .Os
 .Sh NAME
@@ -77,7 +77,7 @@
     "gvaddr_t gva" "gpaddr_t *gpa" "nvmm_prot_t *prot"
 .Ft int
 .Fn nvmm_gpa_to_hva "struct nvmm_machine *mach" "gpaddr_t gpa" \
-    "uintptr_t *hva"
+    "uintptr_t *hva" "nvmm_prot_t *prot"
 .Ft void
 .Fn nvmm_callbacks_register "const struct nvmm_callbacks *cbs"
 .Ft int
@@ -241,6 +241,8 @@ the guest physical address indicated in
 .Fa gpa
 into a host virtual address returned in
 .Fa hva .
+The associated page premissions are returned in
+.Fa prot .
 .Fa gpa
 must be page-aligned.
 .Pp

Index: src/lib/libnvmm/libnvmm.c
diff -u src/lib/libnvmm/libnvmm.c:1.7 src/lib/libnvmm/libnvmm.c:1.8
--- src/lib/libnvmm/libnvmm.c:1.7	Thu Mar 21 20:21:40 2019
+++ src/lib/libnvmm/libnvmm.c	Thu Apr  4 17:33:47 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: libnvmm.c,v 1.7 2019/03/21 20:21:40 maxv Exp $	*/
+/*	$NetBSD: libnvmm.c,v 1.8 2019/04/04 17:33:47 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -50,6 +50,7 @@ typedef struct __area {
 	gpaddr_t gpa;
 	uintptr_t hva;
 	size_t size;
+	nvmm_prot_t prot;
 } area_t;
 
 typedef LIST_HEAD(, __area) area_list_t;
@@ -83,11 +84,21 @@ __area_isvalid(struct nvmm_machine *mach
 }
 
 static int
-__area_add(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa, size_t size)
+__area_add(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa, size_t size,
+    int prot)
 {
 	area_list_t *areas = mach->areas;
+	nvmm_prot_t nprot;
 	area_t *area;
 
+	nprot = 0;
+	if (prot & PROT_READ)
+		nprot |= NVMM_PROT_READ;
+	if (prot & PROT_WRITE)
+		nprot |= NVMM_PROT_WRITE;
+	if (prot & PROT_EXEC)
+		nprot |= NVMM_PROT_EXEC;
+
 	if (!__area_isvalid(mach, hva, gpa, size)) {
 		errno = EINVAL;
 		return -1;
@@ -99,6 +110,7 @@ __area_add(struct nvmm_machine *mach, ui
 	area->gpa = gpa;
 	area->hva = hva;
 	area->size = size;
+	area->prot = nprot;
 
 	LIST_INSERT_HEAD(areas, area, list);
 
@@ -383,7 +395,7 @@ nvmm_gpa_map(struct nvmm_machine *mach, 
 		return -1;
 	}
 
-	ret = __area_add(mach, hva, gpa, size);
+	ret = __area_add(mach, hva, gpa, size, prot);
 	if (ret == -1)
 		return -1;
 
@@ -477,7 +489,8 @@ nvmm_hva_unmap(struct nvmm_machine *mach
  */
 
 int
-nvmm_gpa_to_hva(struct nvmm_machine *mach, gpaddr_t gpa, uintptr_t *hva)
+nvmm_gpa_to_hva(struct nvmm_machine *mach, gpaddr_t gpa, uintptr_t *hva,
+    nvmm_prot_t *prot)
 {
 	area_list_t *areas = mach->areas;
 	area_t *ent;
@@ -485,6 +498,7 @@ nvmm_gpa_to_hva(struct nvmm_machine *mac
 	LIST_FOREACH(ent, areas, list) {
 		if (gpa >= ent->gpa && gpa < ent->gpa + ent->size) {
 			*hva = ent->hva + (gpa - ent->gpa);
+			*prot = ent->prot;
 			return 0;
 		}
 	}

Index: src/lib/libnvmm/libnvmm_x86.c
diff -u src/lib/libnvmm/libnvmm_x86.c:1.27 src/lib/libnvmm/libnvmm_x86.c:1.28
--- src/lib/libnvmm/libnvmm_x86.c:1.27	Thu Mar  7 15:47:34 2019
+++ src/lib/libnvmm/libnvmm_x86.c	Thu Apr  4 17:33:47 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: libnvmm_x86.c,v 1.27 2019/03/07 15:47:34 maxv Exp $	*/
+/*	$NetBSD: libnvmm_x86.c,v 1.28 2019/04/04 17:33:47 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -123,13 +123,14 @@ x86_gva_to_gpa_32bit(struct nvmm_machine
 	gpaddr_t L2gpa, L1gpa;
 	uintptr_t L2hva, L1hva;
 	pte_32bit_t *pdir, pte;
+	nvmm_prot_t pageprot;
 
 	/* We begin with an RWXU access. */
 	*prot = NVMM_PROT_ALL;
 
 	/* Parse L2. */
 	L2gpa = (cr3 & CR3_FRAME_32BIT);
-	if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva) == -1)
+	if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva, &pageprot) == -1)
 		return -1;
 	pdir = (pte_32bit_t *)L2hva;
 	pte = pdir[pte32_l2idx(gva)];
@@ -149,7 +150,7 @@ x86_gva_to_gpa_32bit(struct nvmm_machine
 
 	/* Parse L1. */
 	L1gpa = (pte & PG_FRAME);
-	if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva) == -1)
+	if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva, &pageprot) == -1)
 		return -1;
 	pdir = (pte_32bit_t *)L1hva;
 	pte = pdir[pte32_l1idx(gva)];
@@ -195,13 +196,14 @@ x86_gva_to_gpa_32bit_pae(struct nvmm_mac
 	gpaddr_t L3gpa, L2gpa, L1gpa;
 	uintptr_t L3hva, L2hva, L1hva;
 	pte_32bit_pae_t *pdir, pte;
+	nvmm_prot_t pageprot;
 
 	/* We begin with an RWXU access. */
 	*prot = NVMM_PROT_ALL;
 
 	/* Parse L3. */
 	L3gpa = (cr3 & CR3_FRAME_32BIT_PAE);
-	if (nvmm_gpa_to_hva(mach, L3gpa, &L3hva) == -1)
+	if (nvmm_gpa_to_hva(mach, L3gpa, &L3hva, &pageprot) == -1)
 		return -1;
 	pdir = (pte_32bit_pae_t *)L3hva;
 	pte = pdir[pte32_pae_l3idx(gva)];
@@ -214,7 +216,7 @@ x86_gva_to_gpa_32bit_pae(struct nvmm_mac
 
 	/* Parse L2. */
 	L2gpa = (pte & PG_FRAME);
-	if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva) == -1)
+	if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva, &pageprot) == -1)
 		return -1;
 	pdir = (pte_32bit_pae_t *)L2hva;
 	pte = pdir[pte32_pae_l2idx(gva)];
@@ -234,7 +236,7 @@ x86_gva_to_gpa_32bit_pae(struct nvmm_mac
 
 	/* Parse L1. */
 	L1gpa = (pte & PG_FRAME);
-	if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva) == -1)
+	if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva, &pageprot) == -1)
 		return -1;
 	pdir = (pte_32bit_pae_t *)L1hva;
 	pte = pdir[pte32_pae_l1idx(gva)];
@@ -294,6 +296,7 @@ x86_gva_to_gpa_64bit(struct nvmm_machine
 	gpaddr_t L4gpa, L3gpa, L2gpa, L1gpa;
 	uintptr_t L4hva, L3hva, L2hva, L1hva;
 	pte_64bit_t *pdir, pte;
+	nvmm_prot_t pageprot;
 
 	/* We begin with an RWXU access. */
 	*prot = NVMM_PROT_ALL;
@@ -303,7 +306,7 @@ x86_gva_to_gpa_64bit(struct nvmm_machine
 
 	/* Parse L4. */
 	L4gpa = (cr3 & CR3_FRAME_64BIT);
-	if (nvmm_gpa_to_hva(mach, L4gpa, &L4hva) == -1)
+	if (nvmm_gpa_to_hva(mach, L4gpa, &L4hva, &pageprot) == -1)
 		return -1;
 	pdir = (pte_64bit_t *)L4hva;
 	pte = pdir[pte64_l4idx(gva)];
@@ -320,7 +323,7 @@ x86_gva_to_gpa_64bit(struct nvmm_machine
 
 	/* Parse L3. */
 	L3gpa = (pte & PG_FRAME);
-	if (nvmm_gpa_to_hva(mach, L3gpa, &L3hva) == -1)
+	if (nvmm_gpa_to_hva(mach, L3gpa, &L3hva, &pageprot) == -1)
 		return -1;
 	pdir = (pte_64bit_t *)L3hva;
 	pte = pdir[pte64_l3idx(gva)];
@@ -340,7 +343,7 @@ x86_gva_to_gpa_64bit(struct nvmm_machine
 
 	/* Parse L2. */
 	L2gpa = (pte & PG_FRAME);
-	if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva) == -1)
+	if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva, &pageprot) == -1)
 		return -1;
 	pdir = (pte_64bit_t *)L2hva;
 	pte = pdir[pte64_l2idx(gva)];
@@ -360,7 +363,7 @@ x86_gva_to_gpa_64bit(struct nvmm_machine
 
 	/* Parse L1. */
 	L1gpa = (pte & PG_FRAME);
-	if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva) == -1)
+	if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva, &pageprot) == -1)
 		return -1;
 	pdir = (pte_64bit_t *)L1hva;
 	pte = pdir[pte64_l1idx(gva)];
@@ -568,7 +571,7 @@ read_guest_memory(struct nvmm_machine *m
 	}
 	size -= remain;
 
-	ret = nvmm_gpa_to_hva(mach, gpa, &hva);
+	ret = nvmm_gpa_to_hva(mach, gpa, &hva, &prot);
 	is_mmio = (ret == -1);
 
 	if (is_mmio) {
@@ -578,6 +581,10 @@ read_guest_memory(struct nvmm_machine *m
 		mem.size = size;
 		(*__callbacks.mem)(&mem);
 	} else {
+		if (__predict_false(!(prot & NVMM_PROT_READ))) {
+			errno = EFAULT;
+			return -1;
+		}
 		memcpy(data, (uint8_t *)hva, size);
 	}
 
@@ -618,7 +625,7 @@ write_guest_memory(struct nvmm_machine *
 	}
 	size -= remain;
 
-	ret = nvmm_gpa_to_hva(mach, gpa, &hva);
+	ret = nvmm_gpa_to_hva(mach, gpa, &hva, &prot);
 	is_mmio = (ret == -1);
 
 	if (is_mmio) {
@@ -628,6 +635,10 @@ write_guest_memory(struct nvmm_machine *
 		mem.size = size;
 		(*__callbacks.mem)(&mem);
 	} else {
+		if (__predict_false(!(prot & NVMM_PROT_WRITE))) {
+			errno = EFAULT;
+			return -1;
+		}
 		memcpy((uint8_t *)hva, data, size);
 	}
 

Index: src/lib/libnvmm/nvmm.h
diff -u src/lib/libnvmm/nvmm.h:1.6 src/lib/libnvmm/nvmm.h:1.7
--- src/lib/libnvmm/nvmm.h:1.6	Mon Jan  7 16:30:25 2019
+++ src/lib/libnvmm/nvmm.h	Thu Apr  4 17:33:47 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: nvmm.h,v 1.6 2019/01/07 16:30:25 maxv Exp $	*/
+/*	$NetBSD: nvmm.h,v 1.7 2019/04/04 17:33:47 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -92,7 +92,8 @@ int nvmm_hva_unmap(struct nvmm_machine *
 
 int nvmm_gva_to_gpa(struct nvmm_machine *, nvmm_cpuid_t, gvaddr_t, gpaddr_t *,
     nvmm_prot_t *);
-int nvmm_gpa_to_hva(struct nvmm_machine *, gpaddr_t, uintptr_t *);
+int nvmm_gpa_to_hva(struct nvmm_machine *, gpaddr_t, uintptr_t *,
+    nvmm_prot_t *);
 
 int nvmm_assist_io(struct nvmm_machine *, nvmm_cpuid_t, struct nvmm_exit *);
 int nvmm_assist_mem(struct nvmm_machine *, nvmm_cpuid_t, struct nvmm_exit *);

Reply via email to