Hello tech@,

Here is the patch discussed in the previous email. This part covers
the handlers and changes in the header file.

@@ -5480,6 +5830,32 @@ svm_handle_inout(struct vcpu *vcpu)
 }

 /*
+ * svm_handle_mmio
+ *
+ * Exit handler for memory accesses to PCI MMIO region.
+ */
+int
+svm_handle_mmio(struct vcpu *vcpu, paddr_t iomem)
+{
+    int insn_length;
+    struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va;
+    struct vmm_emul_instruction info;
+    int ret;
+
+    vaddr_t insn_hva = (vaddr_t) vmcb->v_guest_ins_bytes;
+    insn_length = vmcb->v_exitinfo2 - vmcb->v_rip;
+
+    ret = emul_mmio(vcpu, iomem, insn_hva, &info);
+    if (ret) {
+        DPRINTF("%s: failted to handle MMIO@%lx\n", __func__, iomem);
+    }
+
+    vcpu->vc_gueststate.vg_rip += info.len;
+
+    return ret;
+}
+
+/*
  * vmx_handle_inout
  *
  * Exit handler for IN/OUT instructions.
@@ -5564,6 +5940,75 @@ vmx_handle_inout(struct vcpu *vcpu)
     }

     return (ret);
+}
+
+/*
+ * vmx_handle_mmio
+ *
+ * Exit handler for memory accesses to PCI MMIO region.
+ */
+int
+vmx_handle_mmio(struct vcpu *vcpu, paddr_t iomem)
+{
+    struct vmm_emul_instruction info;
+    uint64_t insn_length, insn_gpa;
+    vaddr_t insn_hva, insn_gva;
+    paddr_t insn_hpa;
+    int translation_failure, ret;
+
+    if (vmread(VMCS_INSTRUCTION_LENGTH, &insn_length)) {
+        DPRINTF("%s: can't obtain instruction length\n", __func__);
+        return (EINVAL);
+    }
+
+    /* Obtain the memory pointed by the guest rip. Firstly, GVA->GPA */
+    insn_gva = vcpu->vc_gueststate.vg_rip;
+
+    translation_failure = vmm_translate_gva(vcpu,
+            insn_gva, &insn_gpa, PROT_READ);
+    if (translation_failure) {
+        DPRINTF("%s: failted to translate guest rip"
+                "%lx, reason: %d\n",
+                __func__, insn_gva, translation_failure);
+        return (EINVAL);
+    }
+
+    /* Go down further, GPA->HPA */
+    if (!pmap_extract(vcpu->vc_parent->vm_map->pmap, insn_gpa, &insn_hpa)) {
+        DPRINTF("%s: failted to extract gpa %llx\n",
+                __func__, insn_gpa);
+        return (EINVAL);
+    }
+
+    /* Make it usable for kernel, HVA<-HPA */
+    insn_hva = PMAP_DIRECT_MAP(insn_hpa);
+
+#ifdef VMM_DEBUG
+    DPRINTF("%s: GVA 0x%lx -> HPA 0x%llx -> HVA 0x%llx\n",
+        __func__, insn_gva, (uint64_t)insn_hpa, (uint64_t)insn_hva);
+
+    DPRINTF("mmio: inst length: %lld, insn:\n", insn_length);
+
+    for (int i = -4; i < 4; i++) {
+        DPRINTF("\t0x%04x: ", (uint16_t) i);
+        for (int j = 0; j < 16; j++)
+            DPRINTF(" %02x", *((uint8_t *)insn_hva + i * 16 + j));
+        DPRINTF("\n");
+    }
+#endif /* VMM_DEBUG */
+
+    ret = emul_mmio(vcpu, iomem, insn_hva, &info);
+    if (ret) {
+        DPRINTF("%s: failted to handle MMIO@%lx\n", __func__, iomem);
+        return (ret);
+    }
+
+    /* everything over. we expect emul_mmio changed vcpu as if the
+     * instruction has been just executed but going forward is our job.
+     */
+    vcpu->vc_gueststate.vg_rip += insn_length;
+
+    return EAGAIN;
 }

 /*
Index: sys/arch/amd64/include/vmmvar.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/vmmvar.h,v
retrieving revision 1.67
diff -u -p -r1.67 vmmvar.h
--- sys/arch/amd64/include/vmmvar.h    17 Jul 2019 05:51:07 -0000    1.67
+++ sys/arch/amd64/include/vmmvar.h    23 Oct 2019 23:04:30 -0000
@@ -683,6 +683,7 @@ enum {

 enum {
     VMM_MEM_TYPE_REGULAR,
+    VMM_MEM_TYPE_MMIO,
     VMM_MEM_TYPE_UNKNOWN
 };

@@ -958,6 +959,12 @@ int    svm_enter_guest(uint64_t, struct vcp
     struct region_descriptor *);
 void    start_vmm_on_cpu(struct cpu_info *);
 void    stop_vmm_on_cpu(struct cpu_info *);
+
+
+/* MMIO-capable virtual devices.
+ * Only common and small devices should be emulated in kernel. */
+#define VMM_PCI_MMIO_TPM_BASE 0xFED40000ULL
+#define VMM_PCI_MMIO_TPM_CAP  0x14

 #endif /* _KERNEL */

Reply via email to