We should be able to do this now after checking all the prerequisites.

v2: fix entrie count in the sgt

Signed-off-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c    | 46 ++++++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h      |  9 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 96 ++++++++++++++++++++
 3 files changed, 142 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index a290ae830b11..55bb39281c5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -318,22 +318,45 @@ amdgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
        }
 
        if (attach->invalidate) {
-               /* move buffer into GTT */
+               /* move buffer into GTT or VRAM */
                struct ttm_operation_ctx ctx = { false, false };
+               unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
 
-               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+               if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
+                   attach->peer2peer) {
+                       bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+                       domains |= AMDGPU_GEM_DOMAIN_VRAM;
+               }
+               amdgpu_bo_placement_from_domain(bo, domains);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                if (r)
                        return ERR_PTR(r);
        }
 
-       sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
-       if (IS_ERR(sgt))
-               return sgt;
+       switch (bo->tbo.mem.mem_type) {
+       case TTM_PL_TT:
+               sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+                                           bo->tbo.num_pages);
+               if (IS_ERR(sgt))
+                       return sgt;
+
+               if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+                                     DMA_ATTR_SKIP_CPU_SYNC)) {
+                       r = -EINVAL;
+                       goto error_free;
+               }
+               break;
 
-       if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
-                             DMA_ATTR_SKIP_CPU_SYNC))
+       case TTM_PL_VRAM:
+               r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
+                                             dir, &sgt);
+               if (r)
+                       goto error_free;
+               break;
+       default:
+               r = -EINVAL;
                goto error_free;
+       }
 
        if (attach->dev->driver != adev->dev->driver)
                bo->prime_shared_count++;
@@ -343,7 +366,7 @@ amdgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
 error_free:
        sg_free_table(sgt);
        kfree(sgt);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(r);
 }
 
 /**
@@ -367,10 +390,15 @@ static void amdgpu_gem_unmap_dma_buf(struct 
dma_buf_attachment *attach,
        if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
                bo->prime_shared_count--;
 
-       if (sgt) {
+       if (!sgt)
+               return;
+
+       if (sgt->sgl->page_link) {
                dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
                sg_free_table(sgt);
                kfree(sgt);
+       } else {
+               amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
        }
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index c2b7669004ba..0b4cdbe867e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -72,6 +72,15 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager 
*man);
 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
 
 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+                             struct ttm_mem_reg *mem,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table **sgt);
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table *sgt);
 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index ec9ea3fdbb4a..520cea4dbdab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -399,6 +399,102 @@ static void amdgpu_vram_mgr_del(struct 
ttm_mem_type_manager *man,
        mem->mm_node = NULL;
 }
 
+/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @mem: TTM memory object
+ * @dev: the other device
+ * @dir: dma direction
+ * @sgt: resulting sg table
+ *
+ * Allocate and fill a sg table from a VRAM allocation.
+ */
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+                             struct ttm_mem_reg *mem,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table **sgt)
+{
+       struct drm_mm_node *node;
+       struct scatterlist *sg;
+       int num_entries = 0;
+       unsigned int pages;
+       int i, r;
+
+       *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
+       if (!*sgt)
+               return -ENOMEM;
+
+       for (pages = mem->num_pages, node = mem->mm_node;
+            pages; pages -= node->size, ++node)
+               ++num_entries;
+
+       r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
+       if (r)
+               goto error_free;
+
+       for_each_sg((*sgt)->sgl, sg, num_entries, i)
+               sg->length = 0;
+
+       node = mem->mm_node;
+       for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+               phys_addr_t phys = (node->start << PAGE_SHIFT) +
+                       adev->gmc.aper_base;
+               size_t size = node->size << PAGE_SHIFT;
+               dma_addr_t addr;
+
+               ++node;
+               addr = dma_map_resource(dev, phys, size, dir,
+                                       DMA_ATTR_SKIP_CPU_SYNC);
+               r = dma_mapping_error(dev, addr);
+               if (r)
+                       goto error_unmap;
+
+               sg_set_dma_addr(sg, addr, size, 0);
+       }
+       return 0;
+
+error_unmap:
+       for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+               if (!sg->length)
+                       continue;
+
+               dma_unmap_resource(dev, sg->dma_address,
+                                  sg->length, dir,
+                                  DMA_ATTR_SKIP_CPU_SYNC);
+       }
+       sg_free_table(*sgt);
+
+error_free:
+       kfree(*sgt);
+       return r;
+}
+
+/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @sgt: sg table to free
+ *
+ * Free a previously allocate sg table.
+ */
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table *sgt)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i)
+               dma_unmap_resource(dev, sg->dma_address,
+                                  sg->length, dir,
+                                  DMA_ATTR_SKIP_CPU_SYNC);
+       sg_free_table(sgt);
+       kfree(sgt);
+}
+
 /**
  * amdgpu_vram_mgr_usage - how many bytes are used in this domain
  *
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to