On 2020/9/30 下午7:26, Eli Cohen wrote:
On Thu, Sep 24, 2020 at 11:21:06AM +0800, Jason Wang wrote:
To prepare for the ASID support for vhost-vdpa, try to pass IOTLB
object to dma helpers.
Maybe it's worth mentioning here that this patch does not change any
functionality and is presented as a preparation for passing different
iotlb's instead of using dev->iotlb


Right, let me add them in the next version.

Thanks



Signed-off-by: Jason Wang <jasow...@redhat.com>
---
  drivers/vhost/vdpa.c | 40 ++++++++++++++++++++++------------------
  1 file changed, 22 insertions(+), 18 deletions(-)

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 9c641274b9f3..74bef1c15a70 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -489,10 +489,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
        return r;
  }
-static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+                                  struct vhost_iotlb *iotlb,
+                                  u64 start, u64 last)
  {
        struct vhost_dev *dev = &v->vdev;
-       struct vhost_iotlb *iotlb = dev->iotlb;
        struct vhost_iotlb_map *map;
        struct page *page;
        unsigned long pfn, pinned;
@@ -514,8 +515,9 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, 
u64 start, u64 last)
  static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
  {
        struct vhost_dev *dev = &v->vdev;
+       struct vhost_iotlb *iotlb = dev->iotlb;
- vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
+       vhost_vdpa_iotlb_unmap(v, iotlb, 0ULL, 0ULL - 1);
        kfree(dev->iotlb);
        dev->iotlb = NULL;
  }
@@ -542,15 +544,14 @@ static int perm_to_iommu_flags(u32 perm)
        return flags | IOMMU_CACHE;
  }
-static int vhost_vdpa_map(struct vhost_vdpa *v,
+static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
                          u64 iova, u64 size, u64 pa, u32 perm)
  {
-       struct vhost_dev *dev = &v->vdev;
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
        int r = 0;
- r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
+       r = vhost_iotlb_add_range(iotlb, iova, iova + size - 1,
                                  pa, perm);
        if (r)
                return r;
@@ -559,7 +560,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
                r = ops->dma_map(vdpa, iova, size, pa, perm);
        } else if (ops->set_map) {
                if (!v->in_batch)
-                       r = ops->set_map(vdpa, dev->iotlb);
+                       r = ops->set_map(vdpa, iotlb);
        } else {
                r = iommu_map(v->domain, iova, pa, size,
                              perm_to_iommu_flags(perm));
@@ -568,29 +569,30 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
        return r;
  }
-static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
+static void vhost_vdpa_unmap(struct vhost_vdpa *v,
+                            struct vhost_iotlb *iotlb,
+                            u64 iova, u64 size)
  {
-       struct vhost_dev *dev = &v->vdev;
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
- vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
+       vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
if (ops->dma_map) {
                ops->dma_unmap(vdpa, iova, size);
        } else if (ops->set_map) {
                if (!v->in_batch)
-                       ops->set_map(vdpa, dev->iotlb);
+                       ops->set_map(vdpa, iotlb);
        } else {
                iommu_unmap(v->domain, iova, size);
        }
  }
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+                                          struct vhost_iotlb *iotlb,
                                           struct vhost_iotlb_msg *msg)
  {
        struct vhost_dev *dev = &v->vdev;
-       struct vhost_iotlb *iotlb = dev->iotlb;
        struct page **page_list;
        unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
        unsigned int gup_flags = FOLL_LONGTERM;
@@ -644,7 +646,7 @@ static int vhost_vdpa_process_iotlb_update(struct 
vhost_vdpa *v,
                        if (last_pfn && (this_pfn != last_pfn + 1)) {
                                /* Pin a contiguous chunk of memory */
                                csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
-                               if (vhost_vdpa_map(v, iova, csize,
+                               if (vhost_vdpa_map(v, iotlb, iova, csize,
                                                   map_pfn << PAGE_SHIFT,
                                                   msg->perm))
                                        goto out;
@@ -660,11 +662,12 @@ static int vhost_vdpa_process_iotlb_update(struct 
vhost_vdpa *v,
        }
/* Pin the rest chunk */
-       ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
+       ret = vhost_vdpa_map(v, iotlb, iova,
+                            (last_pfn - map_pfn + 1) << PAGE_SHIFT,
                             map_pfn << PAGE_SHIFT, msg->perm);
  out:
        if (ret) {
-               vhost_vdpa_unmap(v, msg->iova, msg->size);
+               vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
                atomic64_sub(npages, &dev->mm->pinned_vm);
        }
        mmap_read_unlock(dev->mm);
@@ -678,6 +681,7 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev 
*dev,
        struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
+       struct vhost_iotlb *iotlb = dev->iotlb;
        int r = 0;
r = vhost_dev_check_owner(dev);
@@ -686,17 +690,17 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev 
*dev,
switch (msg->type) {
        case VHOST_IOTLB_UPDATE:
-               r = vhost_vdpa_process_iotlb_update(v, msg);
+               r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
                break;
        case VHOST_IOTLB_INVALIDATE:
-               vhost_vdpa_unmap(v, msg->iova, msg->size);
+               vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
                break;
        case VHOST_IOTLB_BATCH_BEGIN:
                v->in_batch = true;
                break;
        case VHOST_IOTLB_BATCH_END:
                if (v->in_batch && ops->set_map)
-                       ops->set_map(vdpa, dev->iotlb);
+                       ops->set_map(vdpa, iotlb);
                v->in_batch = false;
                break;
        default:
--
2.20.1


Reply via email to