Module Name:    src
Committed By:   maxv
Date:           Wed Oct 23 07:01:12 UTC 2019

Modified Files:
        src/lib/libnvmm: libnvmm.c libnvmm_x86.c nvmm.h
        src/sys/dev/nvmm: nvmm.c nvmm.h nvmm_internal.h nvmm_ioctl.h
        src/sys/dev/nvmm/x86: nvmm_x86.h nvmm_x86_svm.c nvmm_x86_vmx.c
        src/tests/lib/libnvmm: h_io_assist.c h_mem_assist.c

Log Message:
Miscellaneous changes in NVMM, to address several inconsistencies and
issues in the libnvmm API.

 - Rename NVMM_CAPABILITY_VERSION to NVMM_KERN_VERSION, and check it in
   libnvmm. Introduce NVMM_USER_VERSION, for future use.

 - In libnvmm, open "/dev/nvmm" as read-only and with O_CLOEXEC. This is to
   avoid sharing the VMs with the children if the process forks. In the
   NVMM driver, force O_CLOEXEC on open().

 - Rename the following things for consistency:
       nvmm_exit*              -> nvmm_vcpu_exit*
       nvmm_event*             -> nvmm_vcpu_event*
       NVMM_EXIT_*             -> NVMM_VCPU_EXIT_*
       NVMM_EVENT_INTERRUPT_HW -> NVMM_VCPU_EVENT_INTR
       NVMM_EVENT_EXCEPTION    -> NVMM_VCPU_EVENT_EXCP
   Delete NVMM_EVENT_INTERRUPT_SW, unused already.

 - Slightly reorganize the MI/MD definitions, for internal clarity.

 - Split NVMM_VCPU_EXIT_MSR in two: NVMM_VCPU_EXIT_{RD,WR}MSR. Also provide
   separate u.rdmsr and u.wrmsr fields. This is more consistent with the
   other exit reasons.

 - Change the types of several variables:
       event.type                  enum -> u_int
       event.vector                uint64_t -> uint8_t
       exit.u.*msr.msr:            uint64_t -> uint32_t
       exit.u.io.type:             enum -> bool
       exit.u.io.seg:              int -> int8_t
       cap.arch.mxcsr_mask:        uint64_t -> uint32_t
       cap.arch.conf_cpuid_maxops: uint64_t -> uint32_t

 - Delete NVMM_VCPU_EXIT_MWAIT_COND, it is AMD-only and confusing, and we
   already intercept 'monitor' so it is never armed.

 - Introduce vmx_exit_insn() for NVMM-Intel, similar to svm_exit_insn().
   The 'npc' field wasn't getting filled properly during certain VMEXITs.

 - Introduce nvmm_vcpu_configure(). Similar to nvmm_machine_configure(),
   but as its name indicates, the configuration is per-VCPU and not per-VM.
   Migrate and rename NVMM_MACH_CONF_X86_CPUID to NVMM_VCPU_CONF_CPUID.
   This becomes per-VCPU, which makes more sense than per-VM.

 - Extend the NVMM_VCPU_CONF_CPUID conf to allow triggering VMEXITs on
   specific leaves. Until now we could only mask the leaves. An uint32_t
   is added in the structure:
        uint32_t mask:1;
        uint32_t exit:1;
        uint32_t rsvd:30;
   The two first bits select the desired behavior on the leaf. Specifying
   zero on both resets the leaf to the default behavior. The new
   NVMM_VCPU_EXIT_CPUID exit reason is added.


To generate a diff of this commit:
cvs rdiff -u -r1.14 -r1.15 src/lib/libnvmm/libnvmm.c
cvs rdiff -u -r1.35 -r1.36 src/lib/libnvmm/libnvmm_x86.c
cvs rdiff -u -r1.12 -r1.13 src/lib/libnvmm/nvmm.h
cvs rdiff -u -r1.22 -r1.23 src/sys/dev/nvmm/nvmm.c
cvs rdiff -u -r1.10 -r1.11 src/sys/dev/nvmm/nvmm.h
cvs rdiff -u -r1.12 -r1.13 src/sys/dev/nvmm/nvmm_internal.h
cvs rdiff -u -r1.7 -r1.8 src/sys/dev/nvmm/nvmm_ioctl.h
cvs rdiff -u -r1.15 -r1.16 src/sys/dev/nvmm/x86/nvmm_x86.h
cvs rdiff -u -r1.50 -r1.51 src/sys/dev/nvmm/x86/nvmm_x86_svm.c
cvs rdiff -u -r1.39 -r1.40 src/sys/dev/nvmm/x86/nvmm_x86_vmx.c
cvs rdiff -u -r1.8 -r1.9 src/tests/lib/libnvmm/h_io_assist.c
cvs rdiff -u -r1.14 -r1.15 src/tests/lib/libnvmm/h_mem_assist.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/lib/libnvmm/libnvmm.c
diff -u src/lib/libnvmm/libnvmm.c:1.14 src/lib/libnvmm/libnvmm.c:1.15
--- src/lib/libnvmm/libnvmm.c:1.14	Sat Jun  8 07:27:44 2019
+++ src/lib/libnvmm/libnvmm.c	Wed Oct 23 07:01:11 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: libnvmm.c,v 1.14 2019/06/08 07:27:44 maxv Exp $	*/
+/*	$NetBSD: libnvmm.c,v 1.15 2019/10/23 07:01:11 maxv Exp $	*/
 
 /*
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -161,7 +161,7 @@ nvmm_init(void)
 {
 	if (nvmm_fd != -1)
 		return 0;
-	nvmm_fd = open("/dev/nvmm", O_RDWR);
+	nvmm_fd = open("/dev/nvmm", O_RDONLY | O_CLOEXEC);
 	if (nvmm_fd == -1)
 		return -1;
 	if (nvmm_capability(&__capability) == -1) {
@@ -169,6 +169,13 @@ nvmm_init(void)
 		nvmm_fd = -1;
 		return -1;
 	}
+	if (__capability.version != NVMM_KERN_VERSION) {
+		close(nvmm_fd);
+		nvmm_fd = -1;
+		errno = EPROGMISMATCH;
+		return -1;
+	}
+
 	return 0;
 }
 
@@ -322,6 +329,25 @@ nvmm_vcpu_destroy(struct nvmm_machine *m
 }
 
 int
+nvmm_vcpu_configure(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
+    uint64_t op, void *conf)
+{
+	struct nvmm_ioc_vcpu_configure args;
+	int ret;
+
+	args.machid = mach->machid;
+	args.cpuid = vcpu->cpuid;
+	args.op = op;
+	args.conf = conf;
+
+	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_CONFIGURE, &args);
+	if (ret == -1)
+		return -1;
+
+	return 0;
+}
+
+int
 nvmm_vcpu_setstate(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
     uint64_t flags)
 {

Index: src/lib/libnvmm/libnvmm_x86.c
diff -u src/lib/libnvmm/libnvmm_x86.c:1.35 src/lib/libnvmm/libnvmm_x86.c:1.36
--- src/lib/libnvmm/libnvmm_x86.c:1.35	Sat Oct 19 19:45:10 2019
+++ src/lib/libnvmm/libnvmm_x86.c	Wed Oct 23 07:01:11 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: libnvmm_x86.c,v 1.35 2019/10/19 19:45:10 maxv Exp $	*/
+/*	$NetBSD: libnvmm_x86.c,v 1.36 2019/10/23 07:01:11 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
@@ -706,7 +706,7 @@ int
 nvmm_assist_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
 	struct nvmm_x64_state *state = vcpu->state;
-	struct nvmm_exit *exit = vcpu->exit;
+	struct nvmm_vcpu_exit *exit = vcpu->exit;
 	struct nvmm_io io;
 	uint64_t cnt = 0; /* GCC */
 	uint8_t iobuf[8];
@@ -716,13 +716,13 @@ nvmm_assist_io(struct nvmm_machine *mach
 	int ret, seg;
 	bool psld = false;
 
-	if (__predict_false(exit->reason != NVMM_EXIT_IO)) {
+	if (__predict_false(exit->reason != NVMM_VCPU_EXIT_IO)) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	io.port = exit->u.io.port;
-	io.in = (exit->u.io.type == NVMM_EXIT_IO_IN);
+	io.in = exit->u.io.in;
 	io.size = exit->u.io.operand_size;
 	io.data = iobuf;
 
@@ -3107,7 +3107,7 @@ fetch_segment(struct nvmm_machine *mach,
 
 static int
 fetch_instruction(struct nvmm_machine *mach, struct nvmm_x64_state *state,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	size_t fetchsize;
 	gvaddr_t gva;
@@ -3170,7 +3170,7 @@ assist_mem_double(struct nvmm_machine *m
 
 static int
 assist_mem_single(struct nvmm_machine *mach, struct nvmm_x64_state *state,
-    struct x86_instr *instr, struct nvmm_exit *exit)
+    struct x86_instr *instr, struct nvmm_vcpu_exit *exit)
 {
 	struct nvmm_mem mem;
 	uint8_t membuf[8];
@@ -3292,12 +3292,12 @@ int
 nvmm_assist_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
 	struct nvmm_x64_state *state = vcpu->state;
-	struct nvmm_exit *exit = vcpu->exit;
+	struct nvmm_vcpu_exit *exit = vcpu->exit;
 	struct x86_instr instr;
 	uint64_t cnt = 0; /* GCC */
 	int ret;
 
-	if (__predict_false(exit->reason != NVMM_EXIT_MEMORY)) {
+	if (__predict_false(exit->reason != NVMM_VCPU_EXIT_MEMORY)) {
 		errno = EINVAL;
 		return -1;
 	}

Index: src/lib/libnvmm/nvmm.h
diff -u src/lib/libnvmm/nvmm.h:1.12 src/lib/libnvmm/nvmm.h:1.13
--- src/lib/libnvmm/nvmm.h:1.12	Sat Jun  8 07:27:44 2019
+++ src/lib/libnvmm/nvmm.h	Wed Oct 23 07:01:11 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: nvmm.h,v 1.12 2019/06/08 07:27:44 maxv Exp $	*/
+/*	$NetBSD: nvmm.h,v 1.13 2019/10/23 07:01:11 maxv Exp $	*/
 
 /*
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -38,6 +38,8 @@
 #include <dev/nvmm/nvmm.h>
 #include <dev/nvmm/nvmm_ioctl.h>
 
+#define NVMM_USER_VERSION	1
+
 struct nvmm_io {
 	uint64_t port;
 	bool in;
@@ -67,8 +69,8 @@ struct nvmm_machine {
 struct nvmm_vcpu {
 	nvmm_cpuid_t cpuid;
 	struct nvmm_vcpu_state *state;
-	struct nvmm_event *event;
-	struct nvmm_exit *exit;
+	struct nvmm_vcpu_event *event;
+	struct nvmm_vcpu_exit *exit;
 };
 
 #define NVMM_MACH_CONF_CALLBACKS	NVMM_MACH_CONF_LIBNVMM_BEGIN
@@ -88,6 +90,8 @@ int nvmm_machine_configure(struct nvmm_m
 
 int nvmm_vcpu_create(struct nvmm_machine *, nvmm_cpuid_t, struct nvmm_vcpu *);
 int nvmm_vcpu_destroy(struct nvmm_machine *, struct nvmm_vcpu *);
+int nvmm_vcpu_configure(struct nvmm_machine *, struct nvmm_vcpu *, uint64_t,
+    void *);
 int nvmm_vcpu_setstate(struct nvmm_machine *, struct nvmm_vcpu *, uint64_t);
 int nvmm_vcpu_getstate(struct nvmm_machine *, struct nvmm_vcpu *, uint64_t);
 int nvmm_vcpu_inject(struct nvmm_machine *, struct nvmm_vcpu *);

Index: src/sys/dev/nvmm/nvmm.c
diff -u src/sys/dev/nvmm/nvmm.c:1.22 src/sys/dev/nvmm/nvmm.c:1.23
--- src/sys/dev/nvmm/nvmm.c:1.22	Sat Jul  6 05:13:10 2019
+++ src/sys/dev/nvmm/nvmm.c	Wed Oct 23 07:01:11 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: nvmm.c,v 1.22 2019/07/06 05:13:10 maxv Exp $	*/
+/*	$NetBSD: nvmm.c,v 1.23 2019/10/23 07:01:11 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.22 2019/07/06 05:13:10 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.23 2019/10/23 07:01:11 maxv Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -247,7 +247,7 @@ nvmm_kill_machines(struct nvmm_owner *ow
 static int
 nvmm_capability(struct nvmm_owner *owner, struct nvmm_ioc_capability *args)
 {
-	args->cap.version = NVMM_CAPABILITY_VERSION;
+	args->cap.version = NVMM_KERN_VERSION;
 	args->cap.state_size = nvmm_impl->state_size;
 	args->cap.max_machines = NVMM_MAX_MACHINES;
 	args->cap.max_vcpus = NVMM_MAX_VCPUS;
@@ -343,11 +343,11 @@ nvmm_machine_configure(struct nvmm_owner
 	int error;
 
 	op = NVMM_MACH_CONF_MD(args->op);
-	if (__predict_false(op >= nvmm_impl->conf_max)) {
+	if (__predict_false(op >= nvmm_impl->mach_conf_max)) {
 		return EINVAL;
 	}
 
-	allocsz = nvmm_impl->conf_sizes[op];
+	allocsz = nvmm_impl->mach_conf_sizes[op];
 	data = kmem_alloc(allocsz, KM_SLEEP);
 
 	error = nvmm_machine_get(owner, args->machid, &mach, true);
@@ -443,6 +443,51 @@ out:
 }
 
 static int
+nvmm_vcpu_configure(struct nvmm_owner *owner,
+    struct nvmm_ioc_vcpu_configure *args)
+{
+	struct nvmm_machine *mach;
+	struct nvmm_cpu *vcpu;
+	size_t allocsz;
+	uint64_t op;
+	void *data;
+	int error;
+
+	op = NVMM_VCPU_CONF_MD(args->op);
+	if (__predict_false(op >= nvmm_impl->vcpu_conf_max))
+		return EINVAL;
+
+	allocsz = nvmm_impl->vcpu_conf_sizes[op];
+	data = kmem_alloc(allocsz, KM_SLEEP);
+
+	error = nvmm_machine_get(owner, args->machid, &mach, false);
+	if (error) {
+		kmem_free(data, allocsz);
+		return error;
+	}
+
+	error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
+	if (error) {
+		nvmm_machine_put(mach);
+		kmem_free(data, allocsz);
+		return error;
+	}
+
+	error = copyin(args->conf, data, allocsz);
+	if (error) {
+		goto out;
+	}
+
+	error = (*nvmm_impl->vcpu_configure)(vcpu, op, data);
+
+out:
+	nvmm_vcpu_put(vcpu);
+	nvmm_machine_put(mach);
+	kmem_free(data, allocsz);
+	return error;
+}
+
+static int
 nvmm_vcpu_setstate(struct nvmm_owner *owner,
     struct nvmm_ioc_vcpu_setstate *args)
 {
@@ -515,7 +560,7 @@ out:
 
 static int
 nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct vmspace *vm = mach->vm;
 	int ret;
@@ -526,7 +571,7 @@ nvmm_do_vcpu_run(struct nvmm_machine *ma
 			return ret;
 		}
 
-		if (__predict_true(exit->reason != NVMM_EXIT_MEMORY)) {
+		if (__predict_true(exit->reason != NVMM_VCPU_EXIT_MEMORY)) {
 			break;
 		}
 		if (exit->u.mem.gpa >= mach->gpa_end) {
@@ -996,6 +1041,8 @@ nvmm_open(dev_t dev, int flags, int type
 
 	if (minor(dev) != 0)
 		return EXDEV;
+	if (!(flags & O_CLOEXEC))
+		return EINVAL;
 	error = fd_allocfile(&fp, &fd);
 	if (error)
 		return error;
@@ -1073,6 +1120,8 @@ nvmm_ioctl(file_t *fp, u_long cmd, void 
 		return nvmm_vcpu_create(owner, data);
 	case NVMM_IOC_VCPU_DESTROY:
 		return nvmm_vcpu_destroy(owner, data);
+	case NVMM_IOC_VCPU_CONFIGURE:
+		return nvmm_vcpu_configure(owner, data);
 	case NVMM_IOC_VCPU_SETSTATE:
 		return nvmm_vcpu_setstate(owner, data);
 	case NVMM_IOC_VCPU_GETSTATE:

Index: src/sys/dev/nvmm/nvmm.h
diff -u src/sys/dev/nvmm/nvmm.h:1.10 src/sys/dev/nvmm/nvmm.h:1.11
--- src/sys/dev/nvmm/nvmm.h:1.10	Sat May 11 07:31:56 2019
+++ src/sys/dev/nvmm/nvmm.h	Wed Oct 23 07:01:11 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: nvmm.h,v 1.10 2019/05/11 07:31:56 maxv Exp $	*/
+/*	$NetBSD: nvmm.h,v 1.11 2019/10/23 07:01:11 maxv Exp $	*/
 
 /*
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -48,42 +48,7 @@ typedef uint32_t	nvmm_cpuid_t;
 #include <dev/nvmm/x86/nvmm_x86.h>
 #endif
 
-#define NVMM_EXIT_NONE		0x0000000000000000ULL
-#define NVMM_EXIT_MEMORY	0x0000000000000001ULL
-#define NVMM_EXIT_IO		0x0000000000000002ULL
-#define NVMM_EXIT_MSR		0x0000000000000003ULL /* x86 only? */
-#define NVMM_EXIT_INT_READY	0x0000000000000004ULL
-#define NVMM_EXIT_NMI_READY	0x0000000000000005ULL
-#define NVMM_EXIT_HALTED	0x0000000000000006ULL
-#define NVMM_EXIT_SHUTDOWN	0x0000000000000007ULL
-/* Range 0x1000-0x10000 is MD. */
-#define NVMM_EXIT_INVALID	0xFFFFFFFFFFFFFFFFULL
-
-struct nvmm_exit {
-	uint64_t reason;
-	union nvmm_exit_md u;
-	uint64_t exitstate[8];
-};
-
-enum nvmm_event_type {
-	NVMM_EVENT_INTERRUPT_HW,
-	NVMM_EVENT_INTERRUPT_SW,
-	NVMM_EVENT_EXCEPTION
-};
-
-struct nvmm_event {
-	enum nvmm_event_type type;
-	uint64_t vector;
-	union {
-		/* NVMM_EVENT_INTERRUPT_HW */
-		uint8_t prio;
-
-		/* NVMM_EVENT_EXCEPTION */
-		uint64_t error;
-	} u;
-};
-
-#define NVMM_CAPABILITY_VERSION		1
+#define NVMM_KERN_VERSION		1
 
 struct nvmm_capability {
 	uint64_t version;
@@ -94,13 +59,18 @@ struct nvmm_capability {
 	struct nvmm_cap_md arch;
 };
 
-/* Configuration slots. */
+/* Machine configuration slots. */
 #define NVMM_MACH_CONF_LIBNVMM_BEGIN	0
 #define NVMM_MACH_CONF_MI_BEGIN		100
 #define NVMM_MACH_CONF_MD_BEGIN		200
-
 #define NVMM_MACH_CONF_MD(op)		(op - NVMM_MACH_CONF_MD_BEGIN)
 
+/* VCPU configuration slots. */
+#define NVMM_VCPU_CONF_LIBNVMM_BEGIN	0
+#define NVMM_VCPU_CONF_MI_BEGIN		100
+#define NVMM_VCPU_CONF_MD_BEGIN		200
+#define NVMM_VCPU_CONF_MD(op)		(op - NVMM_VCPU_CONF_MD_BEGIN)
+
 struct nvmm_comm_page {
 	/* State. */
 	uint64_t state_wanted;
@@ -110,7 +80,7 @@ struct nvmm_comm_page {
 
 	/* Event. */
 	bool event_commit;
-	struct nvmm_event event;
+	struct nvmm_vcpu_event event;
 };
 
 /*

Index: src/sys/dev/nvmm/nvmm_internal.h
diff -u src/sys/dev/nvmm/nvmm_internal.h:1.12 src/sys/dev/nvmm/nvmm_internal.h:1.13
--- src/sys/dev/nvmm/nvmm_internal.h:1.12	Sat Jul  6 05:13:10 2019
+++ src/sys/dev/nvmm/nvmm_internal.h	Wed Oct 23 07:01:11 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: nvmm_internal.h,v 1.12 2019/07/06 05:13:10 maxv Exp $	*/
+/*	$NetBSD: nvmm_internal.h,v 1.13 2019/10/23 07:01:11 maxv Exp $	*/
 
 /*
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -95,8 +95,12 @@ struct nvmm_impl {
 	void (*fini)(void);
 	void (*capability)(struct nvmm_capability *);
 
-	size_t conf_max;
-	const size_t *conf_sizes;
+	size_t mach_conf_max;
+	const size_t *mach_conf_sizes;
+
+	size_t vcpu_conf_max;
+	const size_t *vcpu_conf_sizes;
+
 	size_t state_size;
 
 	void (*machine_create)(struct nvmm_machine *);
@@ -105,11 +109,12 @@ struct nvmm_impl {
 
 	int (*vcpu_create)(struct nvmm_machine *, struct nvmm_cpu *);
 	void (*vcpu_destroy)(struct nvmm_machine *, struct nvmm_cpu *);
+	int (*vcpu_configure)(struct nvmm_cpu *, uint64_t, void *);
 	void (*vcpu_setstate)(struct nvmm_cpu *);
 	void (*vcpu_getstate)(struct nvmm_cpu *);
 	int (*vcpu_inject)(struct nvmm_cpu *);
 	int (*vcpu_run)(struct nvmm_machine *, struct nvmm_cpu *,
-	    struct nvmm_exit *);
+	    struct nvmm_vcpu_exit *);
 };
 
 extern const struct nvmm_impl nvmm_x86_svm;

Index: src/sys/dev/nvmm/nvmm_ioctl.h
diff -u src/sys/dev/nvmm/nvmm_ioctl.h:1.7 src/sys/dev/nvmm/nvmm_ioctl.h:1.8
--- src/sys/dev/nvmm/nvmm_ioctl.h:1.7	Wed May  1 09:20:21 2019
+++ src/sys/dev/nvmm/nvmm_ioctl.h	Wed Oct 23 07:01:11 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: nvmm_ioctl.h,v 1.7 2019/05/01 09:20:21 maxv Exp $	*/
+/*	$NetBSD: nvmm_ioctl.h,v 1.8 2019/10/23 07:01:11 maxv Exp $	*/
 
 /*
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -62,6 +62,13 @@ struct nvmm_ioc_vcpu_destroy {
 	nvmm_cpuid_t cpuid;
 };
 
+struct nvmm_ioc_vcpu_configure {
+	nvmm_machid_t machid;
+	nvmm_cpuid_t cpuid;
+	uint64_t op;
+	void *conf;
+};
+
 struct nvmm_ioc_vcpu_setstate {
 	nvmm_machid_t machid;
 	nvmm_cpuid_t cpuid;
@@ -82,7 +89,7 @@ struct nvmm_ioc_vcpu_run {
 	nvmm_machid_t machid;
 	nvmm_cpuid_t cpuid;
 	/* output */
-	struct nvmm_exit exit;
+	struct nvmm_vcpu_exit exit;
 };
 
 struct nvmm_ioc_hva_map {
@@ -134,14 +141,15 @@ struct nvmm_ioc_ctl {
 #define NVMM_IOC_MACHINE_CONFIGURE	_IOW ('N',  3, struct nvmm_ioc_machine_configure)
 #define NVMM_IOC_VCPU_CREATE		_IOW ('N',  4, struct nvmm_ioc_vcpu_create)
 #define NVMM_IOC_VCPU_DESTROY		_IOW ('N',  5, struct nvmm_ioc_vcpu_destroy)
-#define NVMM_IOC_VCPU_SETSTATE		_IOW ('N',  6, struct nvmm_ioc_vcpu_setstate)
-#define NVMM_IOC_VCPU_GETSTATE		_IOW ('N',  7, struct nvmm_ioc_vcpu_getstate)
-#define NVMM_IOC_VCPU_INJECT		_IOW ('N',  8, struct nvmm_ioc_vcpu_inject)
-#define NVMM_IOC_VCPU_RUN		_IOWR('N',  9, struct nvmm_ioc_vcpu_run)
-#define NVMM_IOC_GPA_MAP		_IOW ('N', 10, struct nvmm_ioc_gpa_map)
-#define NVMM_IOC_GPA_UNMAP		_IOW ('N', 11, struct nvmm_ioc_gpa_unmap)
-#define NVMM_IOC_HVA_MAP		_IOW ('N', 12, struct nvmm_ioc_hva_map)
-#define NVMM_IOC_HVA_UNMAP		_IOW ('N', 13, struct nvmm_ioc_hva_unmap)
+#define NVMM_IOC_VCPU_CONFIGURE		_IOW ('N',  6, struct nvmm_ioc_vcpu_configure)
+#define NVMM_IOC_VCPU_SETSTATE		_IOW ('N',  7, struct nvmm_ioc_vcpu_setstate)
+#define NVMM_IOC_VCPU_GETSTATE		_IOW ('N',  8, struct nvmm_ioc_vcpu_getstate)
+#define NVMM_IOC_VCPU_INJECT		_IOW ('N',  9, struct nvmm_ioc_vcpu_inject)
+#define NVMM_IOC_VCPU_RUN		_IOWR('N', 10, struct nvmm_ioc_vcpu_run)
+#define NVMM_IOC_GPA_MAP		_IOW ('N', 11, struct nvmm_ioc_gpa_map)
+#define NVMM_IOC_GPA_UNMAP		_IOW ('N', 12, struct nvmm_ioc_gpa_unmap)
+#define NVMM_IOC_HVA_MAP		_IOW ('N', 13, struct nvmm_ioc_hva_map)
+#define NVMM_IOC_HVA_UNMAP		_IOW ('N', 14, struct nvmm_ioc_hva_unmap)
 
 #define NVMM_IOC_CTL			_IOW ('N', 20, struct nvmm_ioc_ctl)
 

Index: src/sys/dev/nvmm/x86/nvmm_x86.h
diff -u src/sys/dev/nvmm/x86/nvmm_x86.h:1.15 src/sys/dev/nvmm/x86/nvmm_x86.h:1.16
--- src/sys/dev/nvmm/x86/nvmm_x86.h:1.15	Sat May 11 07:31:56 2019
+++ src/sys/dev/nvmm/x86/nvmm_x86.h	Wed Oct 23 07:01:11 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: nvmm_x86.h,v 1.15 2019/05/11 07:31:56 maxv Exp $	*/
+/*	$NetBSD: nvmm_x86.h,v 1.16 2019/10/23 07:01:11 maxv Exp $	*/
 
 /*
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -32,26 +32,21 @@
 #ifndef _NVMM_X86_H_
 #define _NVMM_X86_H_
 
-/* --------------------------------------------------------------------- */
+/* -------------------------------------------------------------------------- */
 
 #ifndef ASM_NVMM
 
-struct nvmm_exit_memory {
+struct nvmm_x86_exit_memory {
 	int prot;
 	gpaddr_t gpa;
 	uint8_t inst_len;
 	uint8_t inst_bytes[15];
 };
 
-enum nvmm_exit_io_type {
-	NVMM_EXIT_IO_IN,
-	NVMM_EXIT_IO_OUT
-};
-
-struct nvmm_exit_io {
-	enum nvmm_exit_io_type type;
+struct nvmm_x86_exit_io {
+	bool in;
 	uint16_t port;
-	int seg;
+	int8_t seg;
 	uint8_t address_size;
 	uint8_t operand_size;
 	bool rep;
@@ -59,48 +54,84 @@ struct nvmm_exit_io {
 	uint64_t npc;
 };
 
-enum nvmm_exit_msr_type {
-	NVMM_EXIT_MSR_RDMSR,
-	NVMM_EXIT_MSR_WRMSR
+struct nvmm_x86_exit_rdmsr {
+	uint32_t msr;
+	uint64_t npc;
 };
 
-struct nvmm_exit_msr {
-	enum nvmm_exit_msr_type type;
-	uint64_t msr;
+struct nvmm_x86_exit_wrmsr {
+	uint32_t msr;
 	uint64_t val;
 	uint64_t npc;
 };
 
-struct nvmm_exit_insn {
+struct nvmm_x86_exit_insn {
 	uint64_t npc;
 };
 
-struct nvmm_exit_invalid {
+struct nvmm_x86_exit_invalid {
 	uint64_t hwcode;
 };
 
-union nvmm_exit_md {
-	struct nvmm_exit_memory mem;
-	struct nvmm_exit_io io;
-	struct nvmm_exit_msr msr;
-	struct nvmm_exit_insn insn;
-	struct nvmm_exit_invalid inv;
+/* Generic. */
+#define NVMM_VCPU_EXIT_NONE		0x0000000000000000ULL
+#define NVMM_VCPU_EXIT_INVALID		0xFFFFFFFFFFFFFFFFULL
+/* x86: operations. */
+#define NVMM_VCPU_EXIT_MEMORY		0x0000000000000001ULL
+#define NVMM_VCPU_EXIT_IO		0x0000000000000002ULL
+/* x86: changes in VCPU state. */
+#define NVMM_VCPU_EXIT_SHUTDOWN		0x0000000000001000ULL
+#define NVMM_VCPU_EXIT_INT_READY	0x0000000000001001ULL
+#define NVMM_VCPU_EXIT_NMI_READY	0x0000000000001002ULL
+#define NVMM_VCPU_EXIT_HALTED		0x0000000000001003ULL
+/* x86: instructions. */
+#define NVMM_VCPU_EXIT_RDMSR		0x0000000000002000ULL
+#define NVMM_VCPU_EXIT_WRMSR		0x0000000000002001ULL
+#define NVMM_VCPU_EXIT_MONITOR		0x0000000000002002ULL
+#define NVMM_VCPU_EXIT_MWAIT		0x0000000000002003ULL
+#define NVMM_VCPU_EXIT_CPUID		0x0000000000002004ULL
+
+struct nvmm_x86_exit {
+	uint64_t reason;
+	union {
+		struct nvmm_x86_exit_memory mem;
+		struct nvmm_x86_exit_io io;
+		struct nvmm_x86_exit_rdmsr rdmsr;
+		struct nvmm_x86_exit_wrmsr wrmsr;
+		struct nvmm_x86_exit_insn insn;
+		struct nvmm_x86_exit_invalid inv;
+	} u;
+	uint64_t exitstate[8];
+};
+
+#define NVMM_VCPU_EVENT_EXCP	0
+#define NVMM_VCPU_EVENT_INTR	1
+
+struct nvmm_x86_event {
+	u_int type;
+	uint8_t vector;
+	union {
+		struct {
+			uint64_t error;
+		} excp;
+	} u;
 };
 
-#define NVMM_EXIT_MONITOR	0x0000000000001000ULL
-#define NVMM_EXIT_MWAIT		0x0000000000001001ULL
-#define NVMM_EXIT_MWAIT_COND	0x0000000000001002ULL
-
 struct nvmm_cap_md {
 	uint64_t xcr0_mask;
-	uint64_t mxcsr_mask;
-	uint64_t conf_cpuid_maxops;
-	uint64_t rsvd[5];
+	uint32_t mxcsr_mask;
+	uint32_t conf_cpuid_maxops;
+	uint64_t rsvd[6];
 };
 
 #endif
 
-/* --------------------------------------------------------------------- */
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Segment state indexes. We use X64 as naming convention, not to confuse with
+ * X86 which originally implied 32bit.
+ */
 
 /* Segments. */
 #define NVMM_X64_SEG_ES			0
@@ -229,28 +260,43 @@ struct nvmm_x64_state {
 	struct fxsave fpu;
 };
 
-#define nvmm_vcpu_state nvmm_x64_state
+#define NVMM_VCPU_CONF_CPUID	NVMM_VCPU_CONF_MD_BEGIN
 
-#define NVMM_MACH_CONF_X86_CPUID	NVMM_MACH_CONF_MD_BEGIN
-#define NVMM_X86_NCONF			1
+struct nvmm_vcpu_conf_cpuid {
+	/* The options. */
+	uint32_t mask:1;
+	uint32_t exit:1;
+	uint32_t rsvd:30;
 
-struct nvmm_mach_conf_x86_cpuid {
+	/* The leaf. */
 	uint32_t leaf;
-	struct {
-		uint32_t eax;
-		uint32_t ebx;
-		uint32_t ecx;
-		uint32_t edx;
-	} set;
-	struct {
-		uint32_t eax;
-		uint32_t ebx;
-		uint32_t ecx;
-		uint32_t edx;
-	} del;
-};
+
+	/* The params. */
+	union {
+		struct {
+			struct {
+				uint32_t eax;
+				uint32_t ebx;
+				uint32_t ecx;
+				uint32_t edx;
+			} set;
+			struct {
+				uint32_t eax;
+				uint32_t ebx;
+				uint32_t ecx;
+				uint32_t edx;
+			} del;
+		} mask;
+	} u;
+};
+
+#define nvmm_vcpu_exit		nvmm_x86_exit
+#define nvmm_vcpu_event		nvmm_x86_event
+#define nvmm_vcpu_state		nvmm_x64_state
 
 #ifdef _KERNEL
+#define NVMM_X86_MACH_NCONF	0
+#define NVMM_X86_VCPU_NCONF	1
 struct nvmm_x86_cpuid_mask {
 	uint32_t eax;
 	uint32_t ebx;

Index: src/sys/dev/nvmm/x86/nvmm_x86_svm.c
diff -u src/sys/dev/nvmm/x86/nvmm_x86_svm.c:1.50 src/sys/dev/nvmm/x86/nvmm_x86_svm.c:1.51
--- src/sys/dev/nvmm/x86/nvmm_x86_svm.c:1.50	Sat Oct 12 06:31:04 2019
+++ src/sys/dev/nvmm/x86/nvmm_x86_svm.c	Wed Oct 23 07:01:11 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: nvmm_x86_svm.c,v 1.50 2019/10/12 06:31:04 maxv Exp $	*/
+/*	$NetBSD: nvmm_x86_svm.c,v 1.51 2019/10/23 07:01:11 maxv Exp $	*/
 
 /*
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.50 2019/10/12 06:31:04 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.51 2019/10/23 07:01:11 maxv Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -504,14 +504,12 @@ static uint64_t svm_xcr0_mask __read_mos
 /* -------------------------------------------------------------------------- */
 
 struct svm_machdata {
-	bool cpuidpresent[SVM_NCPUIDS];
-	struct nvmm_mach_conf_x86_cpuid cpuid[SVM_NCPUIDS];
 	volatile uint64_t mach_htlb_gen;
 };
 
-static const size_t svm_conf_sizes[NVMM_X86_NCONF] = {
-	[NVMM_MACH_CONF_MD(NVMM_MACH_CONF_X86_CPUID)] =
-	    sizeof(struct nvmm_mach_conf_x86_cpuid)
+static const size_t svm_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
+	[NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
+	    sizeof(struct nvmm_vcpu_conf_cpuid)
 };
 
 struct svm_cpudata {
@@ -553,6 +551,10 @@ struct svm_cpudata {
 	uint64_t drs[NVMM_X64_NDR];
 	uint64_t gtsc;
 	struct xsave_header gfpu __aligned(64);
+
+	/* VCPU configuration. */
+	bool cpuidpresent[SVM_NCPUIDS];
+	struct nvmm_vcpu_conf_cpuid cpuid[SVM_NCPUIDS];
 };
 
 static void
@@ -651,7 +653,7 @@ svm_event_waitexit_disable(struct nvmm_c
 }
 
 static inline int
-svm_event_has_error(uint64_t vector)
+svm_event_has_error(uint8_t vector)
 {
 	switch (vector) {
 	case 8:		/* #DF */
@@ -674,29 +676,18 @@ svm_vcpu_inject(struct nvmm_cpu *vcpu)
 	struct nvmm_comm_page *comm = vcpu->comm;
 	struct svm_cpudata *cpudata = vcpu->cpudata;
 	struct vmcb *vmcb = cpudata->vmcb;
-	enum nvmm_event_type evtype;
-	uint64_t vector, error;
+	u_int evtype;
+	uint8_t vector;
+	uint64_t error;
 	int type = 0, err = 0;
 
 	evtype = comm->event.type;
 	vector = comm->event.vector;
-	error = comm->event.u.error;
+	error = comm->event.u.excp.error;
 	__insn_barrier();
 
-	if (__predict_false(vector >= 256)) {
-		return EINVAL;
-	}
-
 	switch (evtype) {
-	case NVMM_EVENT_INTERRUPT_HW:
-		type = SVM_EVENT_TYPE_HW_INT;
-		if (vector == 2) {
-			type = SVM_EVENT_TYPE_NMI;
-			svm_event_waitexit_enable(vcpu, true);
-		}
-		err = 0;
-		break;
-	case NVMM_EVENT_EXCEPTION:
+	case NVMM_VCPU_EVENT_EXCP:
 		type = SVM_EVENT_TYPE_EXC;
 		if (vector == 2 || vector >= 32)
 			return EINVAL;
@@ -704,16 +695,24 @@ svm_vcpu_inject(struct nvmm_cpu *vcpu)
 			return EINVAL;
 		err = svm_event_has_error(vector);
 		break;
+	case NVMM_VCPU_EVENT_INTR:
+		type = SVM_EVENT_TYPE_HW_INT;
+		if (vector == 2) {
+			type = SVM_EVENT_TYPE_NMI;
+			svm_event_waitexit_enable(vcpu, true);
+		}
+		err = 0;
+		break;
 	default:
 		return EINVAL;
 	}
 
 	vmcb->ctrl.eventinj =
-	    __SHIFTIN(vector, VMCB_CTRL_EVENTINJ_VECTOR) |
-	    __SHIFTIN(type, VMCB_CTRL_EVENTINJ_TYPE) |
-	    __SHIFTIN(err, VMCB_CTRL_EVENTINJ_EV) |
-	    __SHIFTIN(1, VMCB_CTRL_EVENTINJ_V) |
-	    __SHIFTIN(error, VMCB_CTRL_EVENTINJ_ERRORCODE);
+	    __SHIFTIN((uint64_t)vector, VMCB_CTRL_EVENTINJ_VECTOR) |
+	    __SHIFTIN((uint64_t)type, VMCB_CTRL_EVENTINJ_TYPE) |
+	    __SHIFTIN((uint64_t)err, VMCB_CTRL_EVENTINJ_EV) |
+	    __SHIFTIN((uint64_t)1, VMCB_CTRL_EVENTINJ_V) |
+	    __SHIFTIN((uint64_t)error, VMCB_CTRL_EVENTINJ_ERRORCODE);
 
 	cpudata->evt_pending = true;
 
@@ -726,9 +725,9 @@ svm_inject_ud(struct nvmm_cpu *vcpu)
 	struct nvmm_comm_page *comm = vcpu->comm;
 	int ret __diagused;
 
-	comm->event.type = NVMM_EVENT_EXCEPTION;
+	comm->event.type = NVMM_VCPU_EVENT_EXCP;
 	comm->event.vector = 6;
-	comm->event.u.error = 0;
+	comm->event.u.excp.error = 0;
 
 	ret = svm_vcpu_inject(vcpu);
 	KASSERT(ret == 0);
@@ -740,9 +739,9 @@ svm_inject_gp(struct nvmm_cpu *vcpu)
 	struct nvmm_comm_page *comm = vcpu->comm;
 	int ret __diagused;
 
-	comm->event.type = NVMM_EVENT_EXCEPTION;
+	comm->event.type = NVMM_VCPU_EVENT_EXCP;
 	comm->event.vector = 13;
-	comm->event.u.error = 0;
+	comm->event.u.excp.error = 0;
 
 	ret = svm_vcpu_inject(vcpu);
 	KASSERT(ret == 0);
@@ -849,12 +848,18 @@ svm_inkernel_handle_cpuid(struct nvmm_cp
 }
 
 static void
+svm_exit_insn(struct vmcb *vmcb, struct nvmm_vcpu_exit *exit, uint64_t reason)
+{
+	exit->u.insn.npc = vmcb->ctrl.nrip;
+	exit->reason = reason;
+}
+
+static void
 svm_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
-	struct svm_machdata *machdata = mach->machdata;
 	struct svm_cpudata *cpudata = vcpu->cpudata;
-	struct nvmm_mach_conf_x86_cpuid *cpuid;
+	struct nvmm_vcpu_conf_cpuid *cpuid;
 	uint64_t eax, ecx;
 	u_int descs[4];
 	size_t i;
@@ -871,36 +876,42 @@ svm_exit_cpuid(struct nvmm_machine *mach
 	svm_inkernel_handle_cpuid(vcpu, eax, ecx);
 
 	for (i = 0; i < SVM_NCPUIDS; i++) {
-		cpuid = &machdata->cpuid[i];
-		if (!machdata->cpuidpresent[i]) {
+		if (!cpudata->cpuidpresent[i]) {
 			continue;
 		}
+		cpuid = &cpudata->cpuid[i];
 		if (cpuid->leaf != eax) {
 			continue;
 		}
 
+		if (cpuid->exit) {
+			svm_exit_insn(cpudata->vmcb, exit, NVMM_VCPU_EXIT_CPUID);
+			return;
+		}
+		KASSERT(cpuid->mask);
+
 		/* del */
-		cpudata->vmcb->state.rax &= ~cpuid->del.eax;
-		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->del.ebx;
-		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->del.ecx;
-		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->del.edx;
+		cpudata->vmcb->state.rax &= ~cpuid->u.mask.del.eax;
+		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
+		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
+		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
 
 		/* set */
-		cpudata->vmcb->state.rax |= cpuid->set.eax;
-		cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->set.ebx;
-		cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->set.ecx;
-		cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->set.edx;
+		cpudata->vmcb->state.rax |= cpuid->u.mask.set.eax;
+		cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
+		cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
+		cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
 
 		break;
 	}
 
 	svm_inkernel_advance(cpudata->vmcb);
-	exit->reason = NVMM_EXIT_NONE;
+	exit->reason = NVMM_VCPU_EXIT_NONE;
 }
 
 static void
 svm_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct svm_cpudata *cpudata = vcpu->cpudata;
 	struct vmcb *vmcb = cpudata->vmcb;
@@ -910,7 +921,7 @@ svm_exit_hlt(struct nvmm_machine *mach, 
 	}
 
 	svm_inkernel_advance(cpudata->vmcb);
-	exit->reason = NVMM_EXIT_HALTED;
+	exit->reason = NVMM_VCPU_EXIT_HALTED;
 }
 
 #define SVM_EXIT_IO_PORT	__BITS(31,16)
@@ -927,20 +938,15 @@ svm_exit_hlt(struct nvmm_machine *mach, 
 
 static void
 svm_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct svm_cpudata *cpudata = vcpu->cpudata;
 	uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
 	uint64_t nextpc = cpudata->vmcb->ctrl.exitinfo2;
 
-	exit->reason = NVMM_EXIT_IO;
-
-	if (info & SVM_EXIT_IO_IN) {
-		exit->u.io.type = NVMM_EXIT_IO_IN;
-	} else {
-		exit->u.io.type = NVMM_EXIT_IO_OUT;
-	}
+	exit->reason = NVMM_VCPU_EXIT_IO;
 
+	exit->u.io.in = (info & SVM_EXIT_IO_IN) != 0;
 	exit->u.io.port = __SHIFTOUT(info, SVM_EXIT_IO_PORT);
 
 	if (svm_decode_assist) {
@@ -984,54 +990,51 @@ static const uint64_t msr_ignore_list[] 
 
 static bool
 svm_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct svm_cpudata *cpudata = vcpu->cpudata;
 	struct vmcb *vmcb = cpudata->vmcb;
 	uint64_t val;
 	size_t i;
 
-	switch (exit->u.msr.type) {
-	case NVMM_EXIT_MSR_RDMSR:
-		if (exit->u.msr.msr == MSR_NB_CFG) {
+	if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
+		if (exit->u.rdmsr.msr == MSR_NB_CFG) {
 			val = NB_CFG_INITAPICCPUIDLO;
 			vmcb->state.rax = (val & 0xFFFFFFFF);
 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
 			goto handled;
 		}
 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
-			if (msr_ignore_list[i] != exit->u.msr.msr)
+			if (msr_ignore_list[i] != exit->u.rdmsr.msr)
 				continue;
 			val = 0;
 			vmcb->state.rax = (val & 0xFFFFFFFF);
 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
 			goto handled;
 		}
-		break;
-	case NVMM_EXIT_MSR_WRMSR:
-		if (exit->u.msr.msr == MSR_EFER) {
-			if (__predict_false(exit->u.msr.val & ~EFER_VALID)) {
+	} else {
+		if (exit->u.wrmsr.msr == MSR_EFER) {
+			if (__predict_false(exit->u.wrmsr.val & ~EFER_VALID)) {
 				goto error;
 			}
-			if ((vmcb->state.efer ^ exit->u.msr.val) &
+			if ((vmcb->state.efer ^ exit->u.wrmsr.val) &
 			     EFER_TLB_FLUSH) {
 				cpudata->gtlb_want_flush = true;
 			}
-			vmcb->state.efer = exit->u.msr.val | EFER_SVME;
+			vmcb->state.efer = exit->u.wrmsr.val | EFER_SVME;
 			svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_CR);
 			goto handled;
 		}
-		if (exit->u.msr.msr == MSR_TSC) {
-			cpudata->gtsc = exit->u.msr.val;
+		if (exit->u.wrmsr.msr == MSR_TSC) {
+			cpudata->gtsc = exit->u.wrmsr.val;
 			cpudata->gtsc_want_update = true;
 			goto handled;
 		}
 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
-			if (msr_ignore_list[i] != exit->u.msr.msr)
+			if (msr_ignore_list[i] != exit->u.wrmsr.msr)
 				continue;
 			goto handled;
 		}
-		break;
 	}
 
 	return false;
@@ -1045,49 +1048,69 @@ error:
 	return true;
 }
 
-static void
-svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+static inline void
+svm_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
+    struct nvmm_vcpu_exit *exit)
 {
 	struct svm_cpudata *cpudata = vcpu->cpudata;
-	uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
 
-	if (info == 0) {
-		exit->u.msr.type = NVMM_EXIT_MSR_RDMSR;
-	} else {
-		exit->u.msr.type = NVMM_EXIT_MSR_WRMSR;
+	exit->reason = NVMM_VCPU_EXIT_RDMSR;
+	exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
+	exit->u.rdmsr.npc = cpudata->vmcb->ctrl.nrip;
+
+	if (svm_inkernel_handle_msr(mach, vcpu, exit)) {
+		exit->reason = NVMM_VCPU_EXIT_NONE;
+		return;
 	}
 
-	exit->u.msr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
+	svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
+}
 
-	if (info == 1) {
-		uint64_t rdx, rax;
-		rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
-		rax = cpudata->vmcb->state.rax;
-		exit->u.msr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
-	} else {
-		exit->u.msr.val = 0;
-	}
+static inline void
+svm_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
+    struct nvmm_vcpu_exit *exit)
+{
+	struct svm_cpudata *cpudata = vcpu->cpudata;
+	uint64_t rdx, rax;
+
+	rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
+	rax = cpudata->vmcb->state.rax;
+
+	exit->reason = NVMM_VCPU_EXIT_WRMSR;
+	exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
+	exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
+	exit->u.wrmsr.npc = cpudata->vmcb->ctrl.nrip;
 
 	if (svm_inkernel_handle_msr(mach, vcpu, exit)) {
-		exit->reason = NVMM_EXIT_NONE;
+		exit->reason = NVMM_VCPU_EXIT_NONE;
 		return;
 	}
 
-	exit->reason = NVMM_EXIT_MSR;
-	exit->u.msr.npc = cpudata->vmcb->ctrl.nrip;
-
 	svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
 }
 
 static void
+svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
+    struct nvmm_vcpu_exit *exit)
+{
+	struct svm_cpudata *cpudata = vcpu->cpudata;
+	uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
+
+	if (info == 0) {
+		svm_exit_rdmsr(mach, vcpu, exit);
+	} else {
+		svm_exit_wrmsr(mach, vcpu, exit);
+	}
+}
+
+static void
 svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct svm_cpudata *cpudata = vcpu->cpudata;
 	gpaddr_t gpa = cpudata->vmcb->ctrl.exitinfo2;
 
-	exit->reason = NVMM_EXIT_MEMORY;
+	exit->reason = NVMM_VCPU_EXIT_MEMORY;
 	if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_W)
 		exit->u.mem.prot = PROT_WRITE;
 	else if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_X)
@@ -1105,21 +1128,14 @@ svm_exit_npf(struct nvmm_machine *mach, 
 }
 
 static void
-svm_exit_insn(struct vmcb *vmcb, struct nvmm_exit *exit, uint64_t reason)
-{
-	exit->u.insn.npc = vmcb->ctrl.nrip;
-	exit->reason = reason;
-}
-
-static void
 svm_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct svm_cpudata *cpudata = vcpu->cpudata;
 	struct vmcb *vmcb = cpudata->vmcb;
 	uint64_t val;
 
-	exit->reason = NVMM_EXIT_NONE;
+	exit->reason = NVMM_VCPU_EXIT_NONE;
 
 	val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
 	    (vmcb->state.rax & 0xFFFFFFFF);
@@ -1147,10 +1163,10 @@ error:
 }
 
 static void
-svm_exit_invalid(struct nvmm_exit *exit, uint64_t code)
+svm_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
 {
 	exit->u.inv.hwcode = code;
-	exit->reason = NVMM_EXIT_INVALID;
+	exit->reason = NVMM_VCPU_EXIT_INVALID;
 }
 
 /* -------------------------------------------------------------------------- */
@@ -1293,7 +1309,7 @@ svm_exit_evt(struct svm_cpudata *cpudata
 
 static int
 svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct nvmm_comm_page *comm = vcpu->comm;
 	struct svm_machdata *machdata = mach->machdata;
@@ -1353,15 +1369,15 @@ svm_vcpu_run(struct nvmm_machine *mach, 
 		switch (vmcb->ctrl.exitcode) {
 		case VMCB_EXITCODE_INTR:
 		case VMCB_EXITCODE_NMI:
-			exit->reason = NVMM_EXIT_NONE;
+			exit->reason = NVMM_VCPU_EXIT_NONE;
 			break;
 		case VMCB_EXITCODE_VINTR:
 			svm_event_waitexit_disable(vcpu, false);
-			exit->reason = NVMM_EXIT_INT_READY;
+			exit->reason = NVMM_VCPU_EXIT_INT_READY;
 			break;
 		case VMCB_EXITCODE_IRET:
 			svm_event_waitexit_disable(vcpu, true);
-			exit->reason = NVMM_EXIT_NMI_READY;
+			exit->reason = NVMM_VCPU_EXIT_NMI_READY;
 			break;
 		case VMCB_EXITCODE_CPUID:
 			svm_exit_cpuid(mach, vcpu, exit);
@@ -1376,7 +1392,7 @@ svm_vcpu_run(struct nvmm_machine *mach, 
 			svm_exit_msr(mach, vcpu, exit);
 			break;
 		case VMCB_EXITCODE_SHUTDOWN:
-			exit->reason = NVMM_EXIT_SHUTDOWN;
+			exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
 			break;
 		case VMCB_EXITCODE_RDPMC:
 		case VMCB_EXITCODE_RSM:
@@ -1390,16 +1406,14 @@ svm_vcpu_run(struct nvmm_machine *mach, 
 		case VMCB_EXITCODE_SKINIT:
 		case VMCB_EXITCODE_RDTSCP:
 			svm_inject_ud(vcpu);
-			exit->reason = NVMM_EXIT_NONE;
+			exit->reason = NVMM_VCPU_EXIT_NONE;
 			break;
 		case VMCB_EXITCODE_MONITOR:
-			svm_exit_insn(vmcb, exit, NVMM_EXIT_MONITOR);
+			svm_exit_insn(vmcb, exit, NVMM_VCPU_EXIT_MONITOR);
 			break;
 		case VMCB_EXITCODE_MWAIT:
-			svm_exit_insn(vmcb, exit, NVMM_EXIT_MWAIT);
-			break;
 		case VMCB_EXITCODE_MWAIT_CONDITIONAL:
-			svm_exit_insn(vmcb, exit, NVMM_EXIT_MWAIT_COND);
+			svm_exit_insn(vmcb, exit, NVMM_VCPU_EXIT_MWAIT);
 			break;
 		case VMCB_EXITCODE_XSETBV:
 			svm_exit_xsetbv(mach, vcpu, exit);
@@ -1423,7 +1437,7 @@ svm_vcpu_run(struct nvmm_machine *mach, 
 		if (curlwp->l_flag & LW_USERRET) {
 			break;
 		}
-		if (exit->reason != NVMM_EXIT_NONE) {
+		if (exit->reason != NVMM_VCPU_EXIT_NONE) {
 			break;
 		}
 	}
@@ -2114,6 +2128,67 @@ svm_vcpu_destroy(struct nvmm_machine *ma
 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
 }
 
+static int
+svm_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
+{
+	struct svm_cpudata *cpudata = vcpu->cpudata;
+	struct nvmm_vcpu_conf_cpuid *cpuid;
+	size_t i;
+
+	if (__predict_false(op != NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID))) {
+		return EINVAL;
+	}
+	cpuid = data;
+
+	if (__predict_false(cpuid->mask && cpuid->exit)) {
+		return EINVAL;
+	}
+	if (__predict_false(cpuid->mask &&
+	    ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
+	     (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
+	     (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
+	     (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
+		return EINVAL;
+	}
+
+	/* If unset, delete, to restore the default behavior. */
+	if (!cpuid->mask && !cpuid->exit) {
+		for (i = 0; i < SVM_NCPUIDS; i++) {
+			if (!cpudata->cpuidpresent[i]) {
+				continue;
+			}
+			if (cpudata->cpuid[i].leaf == cpuid->leaf) {
+				cpudata->cpuidpresent[i] = false;
+			}
+		}
+		return 0;
+	}
+
+	/* If already here, replace. */
+	for (i = 0; i < SVM_NCPUIDS; i++) {
+		if (!cpudata->cpuidpresent[i]) {
+			continue;
+		}
+		if (cpudata->cpuid[i].leaf == cpuid->leaf) {
+			memcpy(&cpudata->cpuid[i], cpuid,
+			    sizeof(struct nvmm_vcpu_conf_cpuid));
+			return 0;
+		}
+	}
+
+	/* Not here, insert. */
+	for (i = 0; i < SVM_NCPUIDS; i++) {
+		if (!cpudata->cpuidpresent[i]) {
+			cpudata->cpuidpresent[i] = true;
+			memcpy(&cpudata->cpuid[i], cpuid,
+			    sizeof(struct nvmm_vcpu_conf_cpuid));
+			return 0;
+		}
+	}
+
+	return ENOBUFS;
+}
+
 /* -------------------------------------------------------------------------- */
 
 static void
@@ -2153,44 +2228,7 @@ svm_machine_destroy(struct nvmm_machine 
 static int
 svm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
 {
-	struct nvmm_mach_conf_x86_cpuid *cpuid = data;
-	struct svm_machdata *machdata = (struct svm_machdata *)mach->machdata;
-	size_t i;
-
-	if (__predict_false(op != NVMM_MACH_CONF_MD(NVMM_MACH_CONF_X86_CPUID))) {
-		return EINVAL;
-	}
-
-	if (__predict_false((cpuid->set.eax & cpuid->del.eax) ||
-	    (cpuid->set.ebx & cpuid->del.ebx) ||
-	    (cpuid->set.ecx & cpuid->del.ecx) ||
-	    (cpuid->set.edx & cpuid->del.edx))) {
-		return EINVAL;
-	}
-
-	/* If already here, replace. */
-	for (i = 0; i < SVM_NCPUIDS; i++) {
-		if (!machdata->cpuidpresent[i]) {
-			continue;
-		}
-		if (machdata->cpuid[i].leaf == cpuid->leaf) {
-			memcpy(&machdata->cpuid[i], cpuid,
-			    sizeof(struct nvmm_mach_conf_x86_cpuid));
-			return 0;
-		}
-	}
-
-	/* Not here, insert. */
-	for (i = 0; i < SVM_NCPUIDS; i++) {
-		if (!machdata->cpuidpresent[i]) {
-			machdata->cpuidpresent[i] = true;
-			memcpy(&machdata->cpuid[i], cpuid,
-			    sizeof(struct nvmm_mach_conf_x86_cpuid));
-			return 0;
-		}
-	}
-
-	return ENOBUFS;
+	panic("%s: impossible", __func__);
 }
 
 /* -------------------------------------------------------------------------- */
@@ -2359,14 +2397,17 @@ const struct nvmm_impl nvmm_x86_svm = {
 	.init = svm_init,
 	.fini = svm_fini,
 	.capability = svm_capability,
-	.conf_max = NVMM_X86_NCONF,
-	.conf_sizes = svm_conf_sizes,
+	.mach_conf_max = NVMM_X86_MACH_NCONF,
+	.mach_conf_sizes = NULL,
+	.vcpu_conf_max = NVMM_X86_VCPU_NCONF,
+	.vcpu_conf_sizes = svm_vcpu_conf_sizes,
 	.state_size = sizeof(struct nvmm_x64_state),
 	.machine_create = svm_machine_create,
 	.machine_destroy = svm_machine_destroy,
 	.machine_configure = svm_machine_configure,
 	.vcpu_create = svm_vcpu_create,
 	.vcpu_destroy = svm_vcpu_destroy,
+	.vcpu_configure = svm_vcpu_configure,
 	.vcpu_setstate = svm_vcpu_setstate,
 	.vcpu_getstate = svm_vcpu_getstate,
 	.vcpu_inject = svm_vcpu_inject,

Index: src/sys/dev/nvmm/x86/nvmm_x86_vmx.c
diff -u src/sys/dev/nvmm/x86/nvmm_x86_vmx.c:1.39 src/sys/dev/nvmm/x86/nvmm_x86_vmx.c:1.40
--- src/sys/dev/nvmm/x86/nvmm_x86_vmx.c:1.39	Sat Oct 12 06:31:04 2019
+++ src/sys/dev/nvmm/x86/nvmm_x86_vmx.c	Wed Oct 23 07:01:11 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: nvmm_x86_vmx.c,v 1.39 2019/10/12 06:31:04 maxv Exp $	*/
+/*	$NetBSD: nvmm_x86_vmx.c,v 1.40 2019/10/23 07:01:11 maxv Exp $	*/
 
 /*
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.39 2019/10/12 06:31:04 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.40 2019/10/23 07:01:11 maxv Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -697,14 +697,12 @@ static uint64_t vmx_xcr0_mask __read_mos
 /* -------------------------------------------------------------------------- */
 
 struct vmx_machdata {
-	bool cpuidpresent[VMX_NCPUIDS];
-	struct nvmm_mach_conf_x86_cpuid cpuid[VMX_NCPUIDS];
 	volatile uint64_t mach_htlb_gen;
 };
 
-static const size_t vmx_conf_sizes[NVMM_X86_NCONF] = {
-	[NVMM_MACH_CONF_MD(NVMM_MACH_CONF_X86_CPUID)] =
-	    sizeof(struct nvmm_mach_conf_x86_cpuid)
+static const size_t vmx_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
+	[NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
+	    sizeof(struct nvmm_vcpu_conf_cpuid)
 };
 
 struct vmx_cpudata {
@@ -750,6 +748,10 @@ struct vmx_cpudata {
 	uint64_t drs[NVMM_X64_NDR];
 	uint64_t gtsc;
 	struct xsave_header gfpu __aligned(64);
+
+	/* VCPU configuration. */
+	bool cpuidpresent[VMX_NCPUIDS];
+	struct nvmm_vcpu_conf_cpuid cpuid[VMX_NCPUIDS];
 };
 
 static const struct {
@@ -973,7 +975,7 @@ vmx_event_waitexit_disable(struct nvmm_c
 }
 
 static inline int
-vmx_event_has_error(uint64_t vector)
+vmx_event_has_error(uint8_t vector)
 {
 	switch (vector) {
 	case 8:		/* #DF */
@@ -996,30 +998,19 @@ vmx_vcpu_inject(struct nvmm_cpu *vcpu)
 	struct nvmm_comm_page *comm = vcpu->comm;
 	struct vmx_cpudata *cpudata = vcpu->cpudata;
 	int type = 0, err = 0, ret = EINVAL;
-	enum nvmm_event_type evtype;
-	uint64_t info, vector, error;
+	u_int evtype;
+	uint8_t vector;
+	uint64_t info, error;
 
 	evtype = comm->event.type;
 	vector = comm->event.vector;
-	error = comm->event.u.error;
+	error = comm->event.u.excp.error;
 	__insn_barrier();
 
-	if (__predict_false(vector >= 256)) {
-		return EINVAL;
-	}
-
 	vmx_vmcs_enter(vcpu);
 
 	switch (evtype) {
-	case NVMM_EVENT_INTERRUPT_HW:
-		type = INTR_TYPE_EXT_INT;
-		if (vector == 2) {
-			type = INTR_TYPE_NMI;
-			vmx_event_waitexit_enable(vcpu, true);
-		}
-		err = 0;
-		break;
-	case NVMM_EVENT_EXCEPTION:
+	case NVMM_VCPU_EVENT_EXCP:
 		if (vector == 2 || vector >= 32)
 			goto out;
 		if (vector == 3 || vector == 0)
@@ -1027,15 +1018,23 @@ vmx_vcpu_inject(struct nvmm_cpu *vcpu)
 		type = INTR_TYPE_HW_EXC;
 		err = vmx_event_has_error(vector);
 		break;
+	case NVMM_VCPU_EVENT_INTR:
+		type = INTR_TYPE_EXT_INT;
+		if (vector == 2) {
+			type = INTR_TYPE_NMI;
+			vmx_event_waitexit_enable(vcpu, true);
+		}
+		err = 0;
+		break;
 	default:
 		goto out;
 	}
 
 	info =
-	    __SHIFTIN(vector, INTR_INFO_VECTOR) |
-	    __SHIFTIN(type, INTR_INFO_TYPE) |
-	    __SHIFTIN(err, INTR_INFO_ERROR) |
-	    __SHIFTIN(1, INTR_INFO_VALID);
+	    __SHIFTIN((uint64_t)vector, INTR_INFO_VECTOR) |
+	    __SHIFTIN((uint64_t)type, INTR_INFO_TYPE) |
+	    __SHIFTIN((uint64_t)err, INTR_INFO_ERROR) |
+	    __SHIFTIN((uint64_t)1, INTR_INFO_VALID);
 	vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
 	vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, error);
 
@@ -1053,9 +1052,9 @@ vmx_inject_ud(struct nvmm_cpu *vcpu)
 	struct nvmm_comm_page *comm = vcpu->comm;
 	int ret __diagused;
 
-	comm->event.type = NVMM_EVENT_EXCEPTION;
+	comm->event.type = NVMM_VCPU_EVENT_EXCP;
 	comm->event.vector = 6;
-	comm->event.u.error = 0;
+	comm->event.u.excp.error = 0;
 
 	ret = vmx_vcpu_inject(vcpu);
 	KASSERT(ret == 0);
@@ -1067,9 +1066,9 @@ vmx_inject_gp(struct nvmm_cpu *vcpu)
 	struct nvmm_comm_page *comm = vcpu->comm;
 	int ret __diagused;
 
-	comm->event.type = NVMM_EVENT_EXCEPTION;
+	comm->event.type = NVMM_VCPU_EVENT_EXCP;
 	comm->event.vector = 13;
-	comm->event.u.error = 0;
+	comm->event.u.excp.error = 0;
 
 	ret = vmx_vcpu_inject(vcpu);
 	KASSERT(ret == 0);
@@ -1104,15 +1103,15 @@ vmx_inkernel_advance(void)
 }
 
 static void
-vmx_exit_invalid(struct nvmm_exit *exit, uint64_t code)
+vmx_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
 {
 	exit->u.inv.hwcode = code;
-	exit->reason = NVMM_EXIT_INVALID;
+	exit->reason = NVMM_VCPU_EXIT_INVALID;
 }
 
 static void
 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	uint64_t qual;
 
@@ -1125,7 +1124,7 @@ vmx_exit_exc_nmi(struct nvmm_machine *ma
 		goto error;
 	}
 
-	exit->reason = NVMM_EXIT_NONE;
+	exit->reason = NVMM_VCPU_EXIT_NONE;
 	return;
 
 error:
@@ -1211,12 +1210,22 @@ vmx_inkernel_handle_cpuid(struct nvmm_cp
 }
 
 static void
+vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason)
+{
+	uint64_t inslen, rip;
+
+	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
+	rip = vmx_vmread(VMCS_GUEST_RIP);
+	exit->u.insn.npc = rip + inslen;
+	exit->reason = reason;
+}
+
+static void
 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
-	struct vmx_machdata *machdata = mach->machdata;
 	struct vmx_cpudata *cpudata = vcpu->cpudata;
-	struct nvmm_mach_conf_x86_cpuid *cpuid;
+	struct nvmm_vcpu_conf_cpuid *cpuid;
 	uint64_t eax, ecx;
 	u_int descs[4];
 	size_t i;
@@ -1233,36 +1242,42 @@ vmx_exit_cpuid(struct nvmm_machine *mach
 	vmx_inkernel_handle_cpuid(vcpu, eax, ecx);
 
 	for (i = 0; i < VMX_NCPUIDS; i++) {
-		cpuid = &machdata->cpuid[i];
-		if (!machdata->cpuidpresent[i]) {
+		if (!cpudata->cpuidpresent[i]) {
 			continue;
 		}
+		cpuid = &cpudata->cpuid[i];
 		if (cpuid->leaf != eax) {
 			continue;
 		}
 
+		if (cpuid->exit) {
+			vmx_exit_insn(exit, NVMM_VCPU_EXIT_CPUID);
+			return;
+		}
+		KASSERT(cpuid->mask);
+
 		/* del */
-		cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->del.eax;
-		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->del.ebx;
-		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->del.ecx;
-		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->del.edx;
+		cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->u.mask.del.eax;
+		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
+		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
+		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
 
 		/* set */
-		cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->set.eax;
-		cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->set.ebx;
-		cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->set.ecx;
-		cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->set.edx;
+		cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->u.mask.set.eax;
+		cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
+		cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
+		cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
 
 		break;
 	}
 
 	vmx_inkernel_advance();
-	exit->reason = NVMM_EXIT_NONE;
+	exit->reason = NVMM_VCPU_EXIT_NONE;
 }
 
 static void
 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct vmx_cpudata *cpudata = vcpu->cpudata;
 	uint64_t rflags;
@@ -1275,7 +1290,7 @@ vmx_exit_hlt(struct nvmm_machine *mach, 
 	}
 
 	vmx_inkernel_advance();
-	exit->reason = NVMM_EXIT_HALTED;
+	exit->reason = NVMM_VCPU_EXIT_HALTED;
 }
 
 #define VMX_QUAL_CR_NUM		__BITS(3,0)
@@ -1427,7 +1442,7 @@ vmx_inkernel_handle_cr8(struct nvmm_mach
 
 static void
 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	uint64_t qual;
 	int ret;
@@ -1453,7 +1468,7 @@ vmx_exit_cr(struct nvmm_machine *mach, s
 		vmx_inject_gp(vcpu);
 	}
 
-	exit->reason = NVMM_EXIT_NONE;
+	exit->reason = NVMM_VCPU_EXIT_NONE;
 }
 
 #define VMX_QUAL_IO_SIZE	__BITS(2,0)
@@ -1474,21 +1489,16 @@ vmx_exit_cr(struct nvmm_machine *mach, s
 
 static void
 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	uint64_t qual, info, inslen, rip;
 
 	qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
 	info = vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO);
 
-	exit->reason = NVMM_EXIT_IO;
-
-	if (qual & VMX_QUAL_IO_IN) {
-		exit->u.io.type = NVMM_EXIT_IO_IN;
-	} else {
-		exit->u.io.type = NVMM_EXIT_IO_OUT;
-	}
+	exit->reason = NVMM_VCPU_EXIT_IO;
 
+	exit->u.io.in = (qual & VMX_QUAL_IO_IN) != 0;
 	exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
 
 	KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
@@ -1513,7 +1523,7 @@ vmx_exit_io(struct nvmm_machine *mach, s
 	exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
 	exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
 
-	if ((exit->u.io.type == NVMM_EXIT_IO_IN) && exit->u.io.str) {
+	if (exit->u.io.in && exit->u.io.str) {
 		exit->u.io.seg = NVMM_X64_SEG_ES;
 	}
 
@@ -1533,59 +1543,56 @@ static const uint64_t msr_ignore_list[] 
 
 static bool
 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct vmx_cpudata *cpudata = vcpu->cpudata;
 	uint64_t val;
 	size_t i;
 
-	switch (exit->u.msr.type) {
-	case NVMM_EXIT_MSR_RDMSR:
-		if (exit->u.msr.msr == MSR_CR_PAT) {
+	if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
+		if (exit->u.rdmsr.msr == MSR_CR_PAT) {
 			val = vmx_vmread(VMCS_GUEST_IA32_PAT);
 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
 			goto handled;
 		}
-		if (exit->u.msr.msr == MSR_MISC_ENABLE) {
+		if (exit->u.rdmsr.msr == MSR_MISC_ENABLE) {
 			val = cpudata->gmsr_misc_enable;
 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
 			goto handled;
 		}
 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
-			if (msr_ignore_list[i] != exit->u.msr.msr)
+			if (msr_ignore_list[i] != exit->u.rdmsr.msr)
 				continue;
 			val = 0;
 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
 			goto handled;
 		}
-		break;
-	case NVMM_EXIT_MSR_WRMSR:
-		if (exit->u.msr.msr == MSR_TSC) {
-			cpudata->gtsc = exit->u.msr.val;
+	} else {
+		if (exit->u.wrmsr.msr == MSR_TSC) {
+			cpudata->gtsc = exit->u.wrmsr.val;
 			cpudata->gtsc_want_update = true;
 			goto handled;
 		}
-		if (exit->u.msr.msr == MSR_CR_PAT) {
-			val = exit->u.msr.val;
+		if (exit->u.wrmsr.msr == MSR_CR_PAT) {
+			val = exit->u.wrmsr.val;
 			if (__predict_false(!nvmm_x86_pat_validate(val))) {
 				goto error;
 			}
 			vmx_vmwrite(VMCS_GUEST_IA32_PAT, val);
 			goto handled;
 		}
-		if (exit->u.msr.msr == MSR_MISC_ENABLE) {
+		if (exit->u.wrmsr.msr == MSR_MISC_ENABLE) {
 			/* Don't care. */
 			goto handled;
 		}
 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
-			if (msr_ignore_list[i] != exit->u.msr.msr)
+			if (msr_ignore_list[i] != exit->u.wrmsr.msr)
 				continue;
 			goto handled;
 		}
-		break;
 	}
 
 	return false;
@@ -1600,50 +1607,61 @@ error:
 }
 
 static void
-vmx_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit, bool rdmsr)
+vmx_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
+    struct nvmm_vcpu_exit *exit)
 {
 	struct vmx_cpudata *cpudata = vcpu->cpudata;
 	uint64_t inslen, rip;
 
-	if (rdmsr) {
-		exit->u.msr.type = NVMM_EXIT_MSR_RDMSR;
-	} else {
-		exit->u.msr.type = NVMM_EXIT_MSR_WRMSR;
+	exit->reason = NVMM_VCPU_EXIT_RDMSR;
+	exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
+
+	if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
+		exit->reason = NVMM_VCPU_EXIT_NONE;
+		return;
 	}
 
-	exit->u.msr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
+	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
+	rip = vmx_vmread(VMCS_GUEST_RIP);
+	exit->u.rdmsr.npc = rip + inslen;
 
-	if (rdmsr) {
-		exit->u.msr.val = 0;
-	} else {
-		uint64_t rdx, rax;
-		rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
-		rax = cpudata->gprs[NVMM_X64_GPR_RAX];
-		exit->u.msr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
-	}
+	vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
+}
+
+static void
+vmx_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
+    struct nvmm_vcpu_exit *exit)
+{
+	struct vmx_cpudata *cpudata = vcpu->cpudata;
+	uint64_t rdx, rax, inslen, rip;
+
+	rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
+	rax = cpudata->gprs[NVMM_X64_GPR_RAX];
+
+	exit->reason = NVMM_VCPU_EXIT_WRMSR;
+	exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
+	exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
 
 	if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
-		exit->reason = NVMM_EXIT_NONE;
+		exit->reason = NVMM_VCPU_EXIT_NONE;
 		return;
 	}
 
-	exit->reason = NVMM_EXIT_MSR;
 	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
 	rip = vmx_vmread(VMCS_GUEST_RIP);
-	exit->u.msr.npc = rip + inslen;
+	exit->u.wrmsr.npc = rip + inslen;
 
 	vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
 }
 
 static void
 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct vmx_cpudata *cpudata = vcpu->cpudata;
 	uint16_t val;
 
-	exit->reason = NVMM_EXIT_NONE;
+	exit->reason = NVMM_VCPU_EXIT_NONE;
 
 	val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
 	    (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
@@ -1674,14 +1692,14 @@ error:
 
 static void
 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	uint64_t perm;
 	gpaddr_t gpa;
 
 	gpa = vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS);
 
-	exit->reason = NVMM_EXIT_MEMORY;
+	exit->reason = NVMM_VCPU_EXIT_MEMORY;
 	perm = vmx_vmread(VMCS_EXIT_QUALIFICATION);
 	if (perm & VMX_EPT_VIOLATION_WRITE)
 		exit->u.mem.prot = PROT_WRITE;
@@ -1865,7 +1883,7 @@ vmx_exit_evt(struct vmx_cpudata *cpudata
 
 static int
 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
-    struct nvmm_exit *exit)
+    struct nvmm_vcpu_exit *exit)
 {
 	struct nvmm_comm_page *comm = vcpu->comm;
 	struct vmx_machdata *machdata = mach->machdata;
@@ -1948,7 +1966,7 @@ vmx_vcpu_run(struct nvmm_machine *mach, 
 			vmx_exit_exc_nmi(mach, vcpu, exit);
 			break;
 		case VMCS_EXITCODE_EXT_INT:
-			exit->reason = NVMM_EXIT_NONE;
+			exit->reason = NVMM_VCPU_EXIT_NONE;
 			break;
 		case VMCS_EXITCODE_CPUID:
 			vmx_exit_cpuid(mach, vcpu, exit);
@@ -1963,19 +1981,19 @@ vmx_vcpu_run(struct nvmm_machine *mach, 
 			vmx_exit_io(mach, vcpu, exit);
 			break;
 		case VMCS_EXITCODE_RDMSR:
-			vmx_exit_msr(mach, vcpu, exit, true);
+			vmx_exit_rdmsr(mach, vcpu, exit);
 			break;
 		case VMCS_EXITCODE_WRMSR:
-			vmx_exit_msr(mach, vcpu, exit, false);
+			vmx_exit_wrmsr(mach, vcpu, exit);
 			break;
 		case VMCS_EXITCODE_SHUTDOWN:
-			exit->reason = NVMM_EXIT_SHUTDOWN;
+			exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
 			break;
 		case VMCS_EXITCODE_MONITOR:
-			exit->reason = NVMM_EXIT_MONITOR;
+			vmx_exit_insn(exit, NVMM_VCPU_EXIT_MONITOR);
 			break;
 		case VMCS_EXITCODE_MWAIT:
-			exit->reason = NVMM_EXIT_MWAIT;
+			vmx_exit_insn(exit, NVMM_VCPU_EXIT_MWAIT);
 			break;
 		case VMCS_EXITCODE_XSETBV:
 			vmx_exit_xsetbv(mach, vcpu, exit);
@@ -1995,18 +2013,18 @@ vmx_vcpu_run(struct nvmm_machine *mach, 
 		case VMCS_EXITCODE_VMXOFF:
 		case VMCS_EXITCODE_VMXON:
 			vmx_inject_ud(vcpu);
-			exit->reason = NVMM_EXIT_NONE;
+			exit->reason = NVMM_VCPU_EXIT_NONE;
 			break;
 		case VMCS_EXITCODE_EPT_VIOLATION:
 			vmx_exit_epf(mach, vcpu, exit);
 			break;
 		case VMCS_EXITCODE_INT_WINDOW:
 			vmx_event_waitexit_disable(vcpu, false);
-			exit->reason = NVMM_EXIT_INT_READY;
+			exit->reason = NVMM_VCPU_EXIT_INT_READY;
 			break;
 		case VMCS_EXITCODE_NMI_WINDOW:
 			vmx_event_waitexit_disable(vcpu, true);
-			exit->reason = NVMM_EXIT_NMI_READY;
+			exit->reason = NVMM_VCPU_EXIT_NMI_READY;
 			break;
 		default:
 			vmx_exit_invalid(exit, exitcode);
@@ -2023,7 +2041,7 @@ vmx_vcpu_run(struct nvmm_machine *mach, 
 		if (curlwp->l_flag & LW_USERRET) {
 			break;
 		}
-		if (exit->reason != NVMM_EXIT_NONE) {
+		if (exit->reason != NVMM_VCPU_EXIT_NONE) {
 			break;
 		}
 	}
@@ -2732,6 +2750,67 @@ vmx_vcpu_destroy(struct nvmm_machine *ma
 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
 }
 
+static int
+vmx_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
+{
+	struct vmx_cpudata *cpudata = vcpu->cpudata;
+	struct nvmm_vcpu_conf_cpuid *cpuid;
+	size_t i;
+
+	if (__predict_false(op != NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID))) {
+		return EINVAL;
+	}
+	cpuid = data;
+
+	if (__predict_false(cpuid->mask && cpuid->exit)) {
+		return EINVAL;
+	}
+	if (__predict_false(cpuid->mask &&
+	    ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
+	     (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
+	     (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
+	     (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
+		return EINVAL;
+	}
+
+	/* If unset, delete, to restore the default behavior. */
+	if (!cpuid->mask && !cpuid->exit) {
+		for (i = 0; i < VMX_NCPUIDS; i++) {
+			if (!cpudata->cpuidpresent[i]) {
+				continue;
+			}
+			if (cpudata->cpuid[i].leaf == cpuid->leaf) {
+				cpudata->cpuidpresent[i] = false;
+			}
+		}
+		return 0;
+	}
+
+	/* If already here, replace. */
+	for (i = 0; i < VMX_NCPUIDS; i++) {
+		if (!cpudata->cpuidpresent[i]) {
+			continue;
+		}
+		if (cpudata->cpuid[i].leaf == cpuid->leaf) {
+			memcpy(&cpudata->cpuid[i], cpuid,
+			    sizeof(struct nvmm_vcpu_conf_cpuid));
+			return 0;
+		}
+	}
+
+	/* Not here, insert. */
+	for (i = 0; i < VMX_NCPUIDS; i++) {
+		if (!cpudata->cpuidpresent[i]) {
+			cpudata->cpuidpresent[i] = true;
+			memcpy(&cpudata->cpuid[i], cpuid,
+			    sizeof(struct nvmm_vcpu_conf_cpuid));
+			return 0;
+		}
+	}
+
+	return ENOBUFS;
+}
+
 /* -------------------------------------------------------------------------- */
 
 static void
@@ -2777,44 +2856,7 @@ vmx_machine_destroy(struct nvmm_machine 
 static int
 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
 {
-	struct nvmm_mach_conf_x86_cpuid *cpuid = data;
-	struct vmx_machdata *machdata = (struct vmx_machdata *)mach->machdata;
-	size_t i;
-
-	if (__predict_false(op != NVMM_MACH_CONF_MD(NVMM_MACH_CONF_X86_CPUID))) {
-		return EINVAL;
-	}
-
-	if (__predict_false((cpuid->set.eax & cpuid->del.eax) ||
-	    (cpuid->set.ebx & cpuid->del.ebx) ||
-	    (cpuid->set.ecx & cpuid->del.ecx) ||
-	    (cpuid->set.edx & cpuid->del.edx))) {
-		return EINVAL;
-	}
-
-	/* If already here, replace. */
-	for (i = 0; i < VMX_NCPUIDS; i++) {
-		if (!machdata->cpuidpresent[i]) {
-			continue;
-		}
-		if (machdata->cpuid[i].leaf == cpuid->leaf) {
-			memcpy(&machdata->cpuid[i], cpuid,
-			    sizeof(struct nvmm_mach_conf_x86_cpuid));
-			return 0;
-		}
-	}
-
-	/* Not here, insert. */
-	for (i = 0; i < VMX_NCPUIDS; i++) {
-		if (!machdata->cpuidpresent[i]) {
-			machdata->cpuidpresent[i] = true;
-			memcpy(&machdata->cpuid[i], cpuid,
-			    sizeof(struct nvmm_mach_conf_x86_cpuid));
-			return 0;
-		}
-	}
-
-	return ENOBUFS;
+	panic("%s: impossible", __func__);
 }
 
 /* -------------------------------------------------------------------------- */
@@ -3138,14 +3180,17 @@ const struct nvmm_impl nvmm_x86_vmx = {
 	.init = vmx_init,
 	.fini = vmx_fini,
 	.capability = vmx_capability,
-	.conf_max = NVMM_X86_NCONF,
-	.conf_sizes = vmx_conf_sizes,
+	.mach_conf_max = NVMM_X86_MACH_NCONF,
+	.mach_conf_sizes = NULL,
+	.vcpu_conf_max = NVMM_X86_VCPU_NCONF,
+	.vcpu_conf_sizes = vmx_vcpu_conf_sizes,
 	.state_size = sizeof(struct nvmm_x64_state),
 	.machine_create = vmx_machine_create,
 	.machine_destroy = vmx_machine_destroy,
 	.machine_configure = vmx_machine_configure,
 	.vcpu_create = vmx_vcpu_create,
 	.vcpu_destroy = vmx_vcpu_destroy,
+	.vcpu_configure = vmx_vcpu_configure,
 	.vcpu_setstate = vmx_vcpu_setstate,
 	.vcpu_getstate = vmx_vcpu_getstate,
 	.vcpu_inject = vmx_vcpu_inject,

Index: src/tests/lib/libnvmm/h_io_assist.c
diff -u src/tests/lib/libnvmm/h_io_assist.c:1.8 src/tests/lib/libnvmm/h_io_assist.c:1.9
--- src/tests/lib/libnvmm/h_io_assist.c:1.8	Sat Jun  8 07:27:44 2019
+++ src/tests/lib/libnvmm/h_io_assist.c	Wed Oct 23 07:01:12 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: h_io_assist.c,v 1.8 2019/06/08 07:27:44 maxv Exp $	*/
+/*	$NetBSD: h_io_assist.c,v 1.9 2019/10/23 07:01:12 maxv Exp $	*/
 
 /*
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -242,25 +242,25 @@ handle_io(struct nvmm_machine *mach, str
 static void
 run_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
-	struct nvmm_exit *exit = vcpu->exit;
+	struct nvmm_vcpu_exit *exit = vcpu->exit;
 
 	while (1) {
 		if (nvmm_vcpu_run(mach, vcpu) == -1)
 			err(errno, "nvmm_vcpu_run");
 
 		switch (exit->reason) {
-		case NVMM_EXIT_NONE:
+		case NVMM_VCPU_EXIT_NONE:
 			break;
 
-		case NVMM_EXIT_MSR:
+		case NVMM_VCPU_EXIT_RDMSR:
 			/* Stop here. */
 			return;
 
-		case NVMM_EXIT_IO:
+		case NVMM_VCPU_EXIT_IO:
 			handle_io(mach, vcpu);
 			break;
 
-		case NVMM_EXIT_SHUTDOWN:
+		case NVMM_VCPU_EXIT_SHUTDOWN:
 			printf("Shutting down!\n");
 			return;
 

Index: src/tests/lib/libnvmm/h_mem_assist.c
diff -u src/tests/lib/libnvmm/h_mem_assist.c:1.14 src/tests/lib/libnvmm/h_mem_assist.c:1.15
--- src/tests/lib/libnvmm/h_mem_assist.c:1.14	Mon Oct 14 10:39:24 2019
+++ src/tests/lib/libnvmm/h_mem_assist.c	Wed Oct 23 07:01:12 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: h_mem_assist.c,v 1.14 2019/10/14 10:39:24 maxv Exp $	*/
+/*	$NetBSD: h_mem_assist.c,v 1.15 2019/10/23 07:01:12 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
@@ -90,25 +90,25 @@ handle_memory(struct nvmm_machine *mach,
 static void
 run_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
-	struct nvmm_exit *exit = vcpu->exit;
+	struct nvmm_vcpu_exit *exit = vcpu->exit;
 
 	while (1) {
 		if (nvmm_vcpu_run(mach, vcpu) == -1)
 			err(errno, "nvmm_vcpu_run");
 
 		switch (exit->reason) {
-		case NVMM_EXIT_NONE:
+		case NVMM_VCPU_EXIT_NONE:
 			break;
 
-		case NVMM_EXIT_MSR:
+		case NVMM_VCPU_EXIT_RDMSR:
 			/* Stop here. */
 			return;
 
-		case NVMM_EXIT_MEMORY:
+		case NVMM_VCPU_EXIT_MEMORY:
 			handle_memory(mach, vcpu);
 			break;
 
-		case NVMM_EXIT_SHUTDOWN:
+		case NVMM_VCPU_EXIT_SHUTDOWN:
 			printf("Shutting down!\n");
 			return;
 

Reply via email to