When attaching a group to the container, handle the group's
reserved regions and particularly the IOMMU_RESV_MSI region
which requires an IOVA allocator to be initialized through
the iommu_get_msi_cookie API. This will allow the MSI IOVAs
to be transparently allocated on MSI controller's compose().

Signed-off-by: Eric Auger <[email protected]>
---
 drivers/vfio/vfio_iommu_type1.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2ba1942..701d8a8 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -36,6 +36,7 @@
 #include <linux/uaccess.h>
 #include <linux/vfio.h>
 #include <linux/workqueue.h>
+#include <linux/dma-iommu.h>
 
 #define DRIVER_VERSION  "0.2"
 #define DRIVER_AUTHOR   "Alex Williamson <[email protected]>"
@@ -734,6 +735,27 @@ static void vfio_test_domain_fgsp(struct vfio_domain 
*domain)
        __free_pages(pages, order);
 }
 
+static int vfio_iommu_handle_resv_regions(struct iommu_domain *domain,
+                                         struct iommu_group *group)
+{
+       struct list_head group_resv_regions;
+       struct iommu_resv_region *region, *next;
+       int ret = 0;
+
+       INIT_LIST_HEAD(&group_resv_regions);
+       iommu_get_group_resv_regions(group, &group_resv_regions);
+       list_for_each_entry(region, &group_resv_regions, list) {
+               if (region->prot & IOMMU_RESV_MSI) {
+                       ret = iommu_get_msi_cookie(domain, region->start);
+                       if (ret)
+                               break;
+               }
+       }
+       list_for_each_entry_safe(region, next, &group_resv_regions, list)
+               kfree(region);
+       return ret;
+}
+
 static int vfio_iommu_type1_attach_group(void *iommu_data,
                                         struct iommu_group *iommu_group)
 {
@@ -834,6 +856,10 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
        if (ret)
                goto out_detach;
 
+       ret = vfio_iommu_handle_resv_regions(domain->domain, iommu_group);
+       if (ret)
+               goto out_detach;
+
        list_add(&domain->next, &iommu->domain_list);
 
        mutex_unlock(&iommu->lock);
-- 
1.9.1

Reply via email to