Register a vmstate post_load handler to call IOMMU_IOAS_CHANGE_PROCESS and
update the virtual address of all DMA mappings after CPR.

Signed-off-by: Steve Sistare <steven.sist...@oracle.com>
---
 backends/iommufd.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 80 insertions(+), 1 deletion(-)

diff --git a/backends/iommufd.c b/backends/iommufd.c
index 86fd9db..2e72b6f 100644
--- a/backends/iommufd.c
+++ b/backends/iommufd.c
@@ -17,7 +17,9 @@
 #include "qom/object_interfaces.h"
 #include "qemu/error-report.h"
 #include "migration/cpr.h"
+#include "migration/vmstate.h"
 #include "monitor/monitor.h"
+#include "exec/ramblock.h"
 #include "trace.h"
 #include <sys/ioctl.h>
 #include <linux/iommufd.h>
@@ -81,6 +83,83 @@ bool iommufd_change_process_capable(IOMMUFDBackend *be)
     return (errno != ENOTTY);
 }
 
+static int iommufd_change_process(IOMMUFDBackend *be,
+                                 struct iommu_ioas_change_process *args)
+{
+    int ret, fd = be->fd;
+
+    ret = ioctl(fd, IOMMU_IOAS_CHANGE_PROCESS, args);
+    if (ret) {
+        ret = -errno;
+        error_report("IOMMU_IOAS_CHANGE_PROCESS failed: %m");
+    }
+    return ret;
+}
+
+static int count_umap(RAMBlock *rb, void *opaque)
+{
+    if (qemu_ram_is_migratable(rb)) {
+        (*(int *)opaque)++;
+    }
+    return 0;
+}
+
+static int fill_umap(RAMBlock *rb, void *opaque)
+{
+    if (qemu_ram_is_migratable(rb)) {
+        struct iommu_ioas_change_process *args = opaque;
+        struct iommu_ioas_userspace_map *umap = (void *)args->umap;
+        int i = args->n_umap++;
+
+        assert(rb->host_old && rb->host);
+        umap[i].addr_old = (__u64)rb->host_old;
+        umap[i].addr_new = (__u64)rb->host;
+        umap[i].size = rb->max_length;
+    }
+    return 0;
+}
+
+static int cmp_umap(const void *elem1, const void *elem2)
+{
+    const struct iommu_ioas_userspace_map *e1 = elem1;
+    const struct iommu_ioas_userspace_map *e2 = elem2;
+
+    return (e1->addr_old < e2->addr_old) ? -1 :
+           (e1->addr_old > e2->addr_old);
+}
+
+static int iommufd_cpr_post_load(void *opaque, int version_id)
+{
+    IOMMUFDBackend *be = opaque;
+    struct iommu_ioas_change_process args = {
+        .size = sizeof(args),
+        .flags = 0,
+        .n_umap = 0,
+        .umap = 0,
+    };
+    int n = 0;
+    g_autofree struct iommu_ioas_userspace_map *umap = NULL;
+
+    RCU_READ_LOCK_GUARD();
+    qemu_ram_foreach_block(count_umap, &n);
+    umap = g_malloc_n(n, sizeof(*umap));
+    args.umap = (__u64)umap;
+    qemu_ram_foreach_block(fill_umap, &args);
+    qsort(umap, args.n_umap, sizeof(*umap), cmp_umap);
+    return iommufd_change_process(be, &args);
+}
+
+static const VMStateDescription iommufd_cpr_vmstate = {
+    .name = "iommufd",
+    .version_id = 0,
+    .minimum_version_id = 0,
+    .post_load = iommufd_cpr_post_load,
+    .needed = cpr_needed_for_reuse,
+    .fields = (VMStateField[]) {
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 bool iommufd_backend_connect(IOMMUFDBackend *be, const char *name, Error 
**errp)
 {
     int fd;
@@ -100,7 +179,7 @@ bool iommufd_backend_connect(IOMMUFDBackend *be, const char 
*name, Error **errp)
         be->fd = fd;
     }
     be->users++;
-
+    vmstate_register(NULL, -1, &iommufd_cpr_vmstate, be);
     trace_iommufd_backend_connect(be->fd, be->owned, be->users);
     return true;
 }
-- 
1.8.3.1


Reply via email to