To avoid pinning pages when they are mapped in IOMMU page tables,
we add an MMU notifier to tell the addresses which are no longer
valid and try to unmap them.

Signed-off-by: Shenming Lu <lushenm...@huawei.com>
---
 drivers/vfio/vfio_iommu_type1.c | 68 +++++++++++++++++++++++++++++++++
 1 file changed, 68 insertions(+)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 4bb162c1d649..03ccc11057af 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -40,6 +40,7 @@
 #include <linux/notifier.h>
 #include <linux/dma-iommu.h>
 #include <linux/irqdomain.h>
+#include <linux/mmu_notifier.h>
 
 #define DRIVER_VERSION  "0.2"
 #define DRIVER_AUTHOR   "Alex Williamson <alex.william...@redhat.com>"
@@ -69,6 +70,7 @@ struct vfio_iommu {
        struct mutex            lock;
        struct rb_root          dma_list;
        struct blocking_notifier_head notifier;
+       struct mmu_notifier     mn;
        unsigned int            dma_avail;
        unsigned int            vaddr_invalid_count;
        uint64_t                pgsize_bitmap;
@@ -101,6 +103,7 @@ struct vfio_dma {
        struct task_struct      *task;
        struct rb_root          pfn_list;       /* Ex-user pinned pfn list */
        unsigned long           *bitmap;
+       unsigned long           *iopf_mapped_bitmap;
 };
 
 struct vfio_batch {
@@ -157,6 +160,10 @@ struct vfio_regions {
 #define DIRTY_BITMAP_PAGES_MAX  ((u64)INT_MAX)
 #define DIRTY_BITMAP_SIZE_MAX   DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
 
+#define IOPF_MAPPED_BITMAP_GET(dma, i) \
+                             ((dma->iopf_mapped_bitmap[(i) / BITS_PER_LONG]    
\
+                              >> ((i) % BITS_PER_LONG)) & 0x1)
+
 #define WAITED 1
 
 static int put_pfn(unsigned long pfn, int prot);
@@ -1149,6 +1156,35 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, 
struct vfio_dma *dma,
        return unlocked;
 }
 
+static void vfio_unmap_partial_iopf(struct vfio_iommu *iommu,
+                                   struct vfio_dma *dma,
+                                   dma_addr_t start, dma_addr_t end)
+{
+       unsigned long bit_offset;
+       size_t len;
+       struct vfio_domain *d;
+
+       while (start < end) {
+               bit_offset = (start - dma->iova) >> PAGE_SHIFT;
+
+               for (len = 0; start + len < end; len += PAGE_SIZE) {
+                       if (!IOPF_MAPPED_BITMAP_GET(dma,
+                                       bit_offset + (len >> PAGE_SHIFT)))
+                               break;
+               }
+
+               if (len) {
+                       list_for_each_entry(d, &iommu->domain_list, next)
+                               iommu_unmap(d->domain, start, len);
+
+                       bitmap_clear(dma->iopf_mapped_bitmap,
+                                    bit_offset, len >> PAGE_SHIFT);
+               }
+
+               start += (len + PAGE_SIZE);
+       }
+}
+
 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
 {
        WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list));
@@ -3096,6 +3132,38 @@ static int vfio_iommu_type1_dirty_pages(struct 
vfio_iommu *iommu,
        return -EINVAL;
 }
 
+static void mn_invalidate_range(struct mmu_notifier *mn, struct mm_struct *mm,
+                               unsigned long start, unsigned long end)
+{
+       struct vfio_iommu *iommu = container_of(mn, struct vfio_iommu, mn);
+       struct rb_node *n;
+
+       mutex_lock(&iommu->lock);
+
+       for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+               struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+               unsigned long start_n, end_n;
+
+               if (end <= dma->vaddr || start >= dma->vaddr + dma->size)
+                       continue;
+
+               start_n = ALIGN_DOWN(max_t(unsigned long, start, dma->vaddr),
+                                    PAGE_SIZE);
+               end_n = ALIGN(min_t(unsigned long, end, dma->vaddr + dma->size),
+                             PAGE_SIZE);
+
+               vfio_unmap_partial_iopf(iommu, dma,
+                                       start_n - dma->vaddr + dma->iova,
+                                       end_n - dma->vaddr + dma->iova);
+       }
+
+       mutex_unlock(&iommu->lock);
+}
+
+static const struct mmu_notifier_ops vfio_iommu_type1_mn_ops = {
+       .invalidate_range       = mn_invalidate_range,
+};
+
 static long vfio_iommu_type1_ioctl(void *iommu_data,
                                   unsigned int cmd, unsigned long arg)
 {
-- 
2.19.1

Reply via email to