The dma-map IOVA alloc, link, and sync APIs perform significantly better
than dma-map / dma-unmap, as they avoid costly IOMMU synchronizations.
This difference is especially noticeable when mapping a 2MB region in
4KB pages.

Use the IOVA alloc, link, and sync APIs for GPU SVM, which create DMA
mappings between the CPU and GPU.

Signed-off-by: Matthew Brost <[email protected]>
---
 drivers/gpu/drm/drm_gpusvm.c | 63 +++++++++++++++++++++++++-----------
 include/drm/drm_gpusvm.h     |  3 ++
 2 files changed, 47 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 585d913d3d19..eff764445108 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1139,19 +1139,26 @@ static void __drm_gpusvm_unmap_pages(struct drm_gpusvm 
*gpusvm,
                struct drm_gpusvm_pages_flags flags = {
                        .__flags = svm_pages->flags.__flags,
                };
+               struct dma_iova_state __state = {};
 
-               for (i = 0, j = 0; i < npages; j++) {
-                       struct drm_pagemap_addr *addr = &svm_pages->dma_addr[j];
-
-                       if (addr->proto == DRM_INTERCONNECT_SYSTEM)
-                               dma_unmap_page(dev,
-                                              addr->addr,
-                                              PAGE_SIZE << addr->order,
-                                              addr->dir);
-                       else if (dpagemap && dpagemap->ops->device_unmap)
-                               dpagemap->ops->device_unmap(dpagemap,
-                                                           dev, *addr);
-                       i += 1 << addr->order;
+               if (dma_use_iova(&svm_pages->state)) {
+                       dma_iova_destroy(dev, &svm_pages->state,
+                                        npages * PAGE_SIZE,
+                                        svm_pages->dma_addr[0].dir, 0);
+               } else {
+                       for (i = 0, j = 0; i < npages; j++) {
+                               struct drm_pagemap_addr *addr = 
&svm_pages->dma_addr[j];
+
+                               if (addr->proto == DRM_INTERCONNECT_SYSTEM)
+                                       dma_unmap_page(dev,
+                                                      addr->addr,
+                                                      PAGE_SIZE << addr->order,
+                                                      addr->dir);
+                               else if (dpagemap && 
dpagemap->ops->device_unmap)
+                                       dpagemap->ops->device_unmap(dpagemap,
+                                                                   dev, *addr);
+                               i += 1 << addr->order;
+                       }
                }
 
                /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
@@ -1161,6 +1168,7 @@ static void __drm_gpusvm_unmap_pages(struct drm_gpusvm 
*gpusvm,
 
                drm_pagemap_put(svm_pages->dpagemap);
                svm_pages->dpagemap = NULL;
+               svm_pages->state = __state;
        }
 }
 
@@ -1408,6 +1416,7 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
        struct drm_gpusvm_pages_flags flags;
        enum dma_data_direction dma_dir = ctx->read_only ? DMA_TO_DEVICE :
                                                           DMA_BIDIRECTIONAL;
+       struct dma_iova_state *state = &svm_pages->state;
 
 retry:
        if (time_after(jiffies, timeout))
@@ -1539,13 +1548,29 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
                                goto err_unmap;
                        }
 
-                       addr = dma_map_page(gpusvm->drm->dev,
-                                           page, 0,
-                                           PAGE_SIZE << order,
-                                           dma_dir);
-                       if (dma_mapping_error(gpusvm->drm->dev, addr)) {
-                               err = -EFAULT;
-                               goto err_unmap;
+                       if (!i)
+                               dma_iova_try_alloc(gpusvm->drm->dev, state, 0,
+                                                  npages * PAGE_SIZE);
+
+                       if (dma_use_iova(state)) {
+                               err = dma_iova_link(gpusvm->drm->dev, state,
+                                                   page_to_phys(page),
+                                                   i * PAGE_SIZE,
+                                                   PAGE_SIZE << order,
+                                                   dma_dir, 0);
+                               if (err)
+                                       goto err_unmap;
+
+                               addr = state->addr + i * PAGE_SIZE;
+                       } else {
+                               addr = dma_map_page(gpusvm->drm->dev,
+                                                   page, 0,
+                                                   PAGE_SIZE << order,
+                                                   dma_dir);
+                               if (dma_mapping_error(gpusvm->drm->dev, addr)) {
+                                       err = -EFAULT;
+                                       goto err_unmap;
+                               }
                        }
 
                        svm_pages->dma_addr[j] = drm_pagemap_addr_encode
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index 2578ac92a8d4..6772d8a92788 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -6,6 +6,7 @@
 #ifndef __DRM_GPUSVM_H__
 #define __DRM_GPUSVM_H__
 
+#include <linux/dma-mapping.h>
 #include <linux/kref.h>
 #include <linux/interval_tree.h>
 #include <linux/mmu_notifier.h>
@@ -136,6 +137,7 @@ struct drm_gpusvm_pages_flags {
  * @dma_addr: Device address array
  * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
  *            Note this is assuming only one drm_pagemap per range is allowed.
+ * @state: DMA IOVA state for mapping.
  * @notifier_seq: Notifier sequence number of the range's pages
  * @flags: Flags for range
  * @flags.migrate_devmem: Flag indicating whether the range can be migrated to 
device memory
@@ -147,6 +149,7 @@ struct drm_gpusvm_pages_flags {
 struct drm_gpusvm_pages {
        struct drm_pagemap_addr *dma_addr;
        struct drm_pagemap *dpagemap;
+       struct dma_iova_state state;
        unsigned long notifier_seq;
        struct drm_gpusvm_pages_flags flags;
 };
-- 
2.34.1

Reply via email to