The dma-map IOVA alloc, link, and sync APIs perform significantly better
than dma-map / dma-unmap, as they avoid costly IOMMU synchronizations.
This difference is especially noticeable when mapping a 2MB region in
4KB pages.

Use the IOVA alloc, link, and sync APIs for GPU SVM, which create DMA
mappings between the CPU and GPU.

Signed-off-by: Matthew Brost <[email protected]>
---
v3:
 - Always link IOVA in mixed mappings
 - Sync IOVA
v4:
 - Initialize IOVA state in get_pages
 - Use pack IOVA linking (Jason)
 - s/page_to_phys/hmm_pfn_to_phys (Leon)

 drivers/gpu/drm/drm_gpusvm.c | 55 ++++++++++++++++++++++++++++++------
 include/drm/drm_gpusvm.h     |  5 ++++
 2 files changed, 52 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 4b8130a4ce95..800caaf0a783 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1139,11 +1139,19 @@ static void __drm_gpusvm_unmap_pages(struct drm_gpusvm 
*gpusvm,
                struct drm_gpusvm_pages_flags flags = {
                        .__flags = svm_pages->flags.__flags,
                };
+               bool use_iova = dma_use_iova(&svm_pages->state);
+
+               if (use_iova) {
+                       dma_iova_unlink(dev, &svm_pages->state, 0,
+                                       svm_pages->state_offset,
+                                       svm_pages->dma_addr[0].dir, 0);
+                       dma_iova_free(dev, &svm_pages->state);
+               }
 
                for (i = 0, j = 0; i < npages; j++) {
                        struct drm_pagemap_addr *addr = &svm_pages->dma_addr[j];
 
-                       if (addr->proto == DRM_INTERCONNECT_SYSTEM)
+                       if (!use_iova && addr->proto == DRM_INTERCONNECT_SYSTEM)
                                dma_unmap_page(dev,
                                               addr->addr,
                                               PAGE_SIZE << addr->order,
@@ -1408,6 +1416,7 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
        struct drm_gpusvm_pages_flags flags;
        enum dma_data_direction dma_dir = ctx->read_only ? DMA_TO_DEVICE :
                                                           DMA_BIDIRECTIONAL;
+       struct dma_iova_state *state = &svm_pages->state;
 
 retry:
        if (time_after(jiffies, timeout))
@@ -1446,6 +1455,9 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
        if (err)
                goto err_free;
 
+       *state = (struct dma_iova_state){};
+       svm_pages->state_offset = 0;
+
 map_pages:
        /*
         * Perform all dma mappings under the notifier lock to not
@@ -1539,13 +1551,33 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
                                goto err_unmap;
                        }
 
-                       addr = dma_map_page(gpusvm->drm->dev,
-                                           page, 0,
-                                           PAGE_SIZE << order,
-                                           dma_dir);
-                       if (dma_mapping_error(gpusvm->drm->dev, addr)) {
-                               err = -EFAULT;
-                               goto err_unmap;
+                       if (!i)
+                               dma_iova_try_alloc(gpusvm->drm->dev, state,
+                                                  npages * PAGE_SIZE >=
+                                                  HPAGE_PMD_SIZE ?
+                                                  HPAGE_PMD_SIZE : 0,
+                                                  npages * PAGE_SIZE);
+
+                       if (dma_use_iova(state)) {
+                               err = dma_iova_link(gpusvm->drm->dev, state,
+                                                   hmm_pfn_to_phys(pfns[i]),
+                                                   svm_pages->state_offset,
+                                                   PAGE_SIZE << order,
+                                                   dma_dir, 0);
+                               if (err)
+                                       goto err_unmap;
+
+                               addr = state->addr + svm_pages->state_offset;
+                               svm_pages->state_offset += PAGE_SIZE << order;
+                       } else {
+                               addr = dma_map_page(gpusvm->drm->dev,
+                                                   page, 0,
+                                                   PAGE_SIZE << order,
+                                                   dma_dir);
+                               if (dma_mapping_error(gpusvm->drm->dev, addr)) {
+                                       err = -EFAULT;
+                                       goto err_unmap;
+                               }
                        }
 
                        svm_pages->dma_addr[j] = drm_pagemap_addr_encode
@@ -1557,6 +1589,13 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
                flags.has_dma_mapping = true;
        }
 
+       if (dma_use_iova(state)) {
+               err = dma_iova_sync(gpusvm->drm->dev, state, 0,
+                                   svm_pages->state_offset);
+               if (err)
+                       goto err_unmap;
+       }
+
        if (pagemap) {
                flags.has_devmem_pages = true;
                drm_pagemap_get(dpagemap);
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index 2578ac92a8d4..cd94bb2ee6ee 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -6,6 +6,7 @@
 #ifndef __DRM_GPUSVM_H__
 #define __DRM_GPUSVM_H__
 
+#include <linux/dma-mapping.h>
 #include <linux/kref.h>
 #include <linux/interval_tree.h>
 #include <linux/mmu_notifier.h>
@@ -136,6 +137,8 @@ struct drm_gpusvm_pages_flags {
  * @dma_addr: Device address array
  * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
  *            Note this is assuming only one drm_pagemap per range is allowed.
+ * @state: DMA IOVA state for mapping.
+ * @state_offset: DMA IOVA offset for mapping.
  * @notifier_seq: Notifier sequence number of the range's pages
  * @flags: Flags for range
  * @flags.migrate_devmem: Flag indicating whether the range can be migrated to 
device memory
@@ -147,6 +150,8 @@ struct drm_gpusvm_pages_flags {
 struct drm_gpusvm_pages {
        struct drm_pagemap_addr *dma_addr;
        struct drm_pagemap *dpagemap;
+       struct dma_iova_state state;
+       unsigned long state_offset;
        unsigned long notifier_seq;
        struct drm_gpusvm_pages_flags flags;
 };
-- 
2.34.1

Reply via email to