From: jiangkunkun <jiangkun...@huawei.com>

In the past if vfio_iommu is not of pinned_page_dirty_scope and
vfio_dma is iommu_mapped, we populate full dirty bitmap for this
vfio_dma. Now we can try to get dirty log from iommu before make
the lousy decision.

Co-developed-by: Keqian Zhu <zhukeqi...@huawei.com>
Signed-off-by: Kunkun Jiang <jiangkun...@huawei.com>
---
 drivers/vfio/vfio_iommu_type1.c | 97 ++++++++++++++++++++++++++++++++-
 1 file changed, 94 insertions(+), 3 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 3b8522ebf955..1cd10f3e7ed4 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -999,6 +999,25 @@ static bool vfio_group_supports_hwdbm(struct vfio_group 
*group)
        return true;
 }
 
+static int vfio_iommu_dirty_log_clear(struct vfio_iommu *iommu,
+                                     dma_addr_t start_iova, size_t size,
+                                     unsigned long *bitmap_buffer,
+                                     dma_addr_t base_iova, size_t pgsize)
+{
+       struct vfio_domain *d;
+       unsigned long pgshift = __ffs(pgsize);
+       int ret;
+
+       list_for_each_entry(d, &iommu->domain_list, next) {
+               ret = iommu_clear_dirty_log(d->domain, start_iova, size,
+                                           bitmap_buffer, base_iova, pgshift);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
                              struct vfio_dma *dma, dma_addr_t base_iova,
                              size_t pgsize)
@@ -1010,13 +1029,28 @@ static int update_user_bitmap(u64 __user *bitmap, 
struct vfio_iommu *iommu,
        unsigned long shift = bit_offset % BITS_PER_LONG;
        unsigned long leftover;
 
+       if (iommu->pinned_page_dirty_scope || !dma->iommu_mapped)
+               goto bitmap_done;
+
+       /* try to get dirty log from IOMMU */
+       if (!iommu->num_non_hwdbm_groups) {
+               struct vfio_domain *d;
+
+               list_for_each_entry(d, &iommu->domain_list, next) {
+                       if (iommu_sync_dirty_log(d->domain, dma->iova, 
dma->size,
+                                               dma->bitmap, dma->iova, 
pgshift))
+                               return -EFAULT;
+               }
+               goto bitmap_done;
+       }
+
        /*
         * mark all pages dirty if any IOMMU capable device is not able
         * to report dirty pages and all pages are pinned and mapped.
         */
-       if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
-               bitmap_set(dma->bitmap, 0, nbits);
+       bitmap_set(dma->bitmap, 0, nbits);
 
+bitmap_done:
        if (shift) {
                bitmap_shift_left(dma->bitmap, dma->bitmap, shift,
                                  nbits + shift);
@@ -1078,6 +1112,18 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, 
struct vfio_iommu *iommu,
                 */
                bitmap_clear(dma->bitmap, 0, dma->size >> pgshift);
                vfio_dma_populate_bitmap(dma, pgsize);
+
+               /* Clear iommu dirty log to re-enable dirty log tracking */
+               if (!iommu->pinned_page_dirty_scope &&
+                   dma->iommu_mapped && !iommu->num_non_hwdbm_groups) {
+                       ret = vfio_iommu_dirty_log_clear(iommu, dma->iova,
+                                       dma->size, dma->bitmap, dma->iova,
+                                       pgsize);
+                       if (ret) {
+                               pr_warn("dma dirty log clear failed!\n");
+                               return ret;
+                       }
+               }
        }
        return 0;
 }
@@ -2780,6 +2826,48 @@ static int vfio_iommu_type1_unmap_dma(struct vfio_iommu 
*iommu,
                        -EFAULT : 0;
 }
 
+static void vfio_dma_dirty_log_start(struct vfio_iommu *iommu,
+                                    struct vfio_dma *dma)
+{
+       struct vfio_domain *d;
+
+       list_for_each_entry(d, &iommu->domain_list, next) {
+               /* Go through all domain anyway even if we fail */
+               iommu_split_block(d->domain, dma->iova, dma->size);
+       }
+}
+
+static void vfio_dma_dirty_log_stop(struct vfio_iommu *iommu,
+                                   struct vfio_dma *dma)
+{
+       struct vfio_domain *d;
+
+       list_for_each_entry(d, &iommu->domain_list, next) {
+               /* Go through all domain anyway even if we fail */
+               iommu_merge_page(d->domain, dma->iova, dma->size,
+                                d->prot | dma->prot);
+       }
+}
+
+static void vfio_iommu_dirty_log_switch(struct vfio_iommu *iommu, bool start)
+{
+       struct rb_node *n;
+
+       /* Split and merge even if all iommu don't support HWDBM now */
+       for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+               struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
+               if (!dma->iommu_mapped)
+                       continue;
+
+               /* Go through all dma range anyway even if we fail */
+               if (start)
+                       vfio_dma_dirty_log_start(iommu, dma);
+               else
+                       vfio_dma_dirty_log_stop(iommu, dma);
+       }
+}
+
 static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
                                        unsigned long arg)
 {
@@ -2812,8 +2900,10 @@ static int vfio_iommu_type1_dirty_pages(struct 
vfio_iommu *iommu,
                pgsize = 1 << __ffs(iommu->pgsize_bitmap);
                if (!iommu->dirty_page_tracking) {
                        ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
-                       if (!ret)
+                       if (!ret) {
                                iommu->dirty_page_tracking = true;
+                               vfio_iommu_dirty_log_switch(iommu, true);
+                       }
                }
                mutex_unlock(&iommu->lock);
                return ret;
@@ -2822,6 +2912,7 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu 
*iommu,
                if (iommu->dirty_page_tracking) {
                        iommu->dirty_page_tracking = false;
                        vfio_dma_bitmap_free_all(iommu);
+                       vfio_iommu_dirty_log_switch(iommu, false);
                }
                mutex_unlock(&iommu->lock);
                return 0;
-- 
2.19.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to