On 19/09/17 11:17 AM, Deucher, Alexander wrote:
-----Original Message-----
From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf
Of Tom St Denis
Sent: Tuesday, September 19, 2017 8:13 AM
To: amd-gfx@lists.freedesktop.org
Cc: StDenis, Tom
Subject: [PATCH] drm/amd/amdgpu: add support for iova_to_phys to
replace TTM trace (v5)

Signed-off-by: Tom St Denis <tom.stde...@amd.com>

(v2): Add domain to iova debugfs
(v3): Add true read/write methods to access system memory of pages
       mapped to the device
(v4): Move get_domain call out of loop and return on error
(v5): Just use kmap/kunmap
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 99
+++++++++++++++++++++++++++++++++
  1 file changed, 99 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 50d20903de4f..c7f8e081a772 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -43,6 +43,7 @@
  #include <linux/swap.h>
  #include <linux/pagemap.h>
  #include <linux/debugfs.h>
+#include <linux/iommu.h>

Just to double check, do you need any compile guards for the IOMMU disabled 
case?

I honestly don't know. Is there an easy way to setup a cross build that won't automagically enable the iommu in the configuration?

(also, if the iommu team co-operated from the get go...).

Looking at the cavium thunder ethernet source (I grepped for iova_to_phys) it seems the return of dev_to_domain might be NULL if IOMMU is disabled in which case these read/writes will return -EFAULT...

ideally the iommu_iova_to_phys() would be an identity function if the domain is NULL. Which it doesn't. It'll OOPS if domain==NULL.

So the fix might be to skip the iova_to_phys call if domain==NULL and act on the address as a physical address.

On my carrizo with IOMMU enabled I can read the gfx ring buffer directly with --vm-read for both the Carrizo and Polaris10 (the Carrizo doesn't do mapping, the Polaris10 does). I can try a boot with iommu disabled and see what happens.

Tom



Alex

  #include "amdgpu.h"
  #include "amdgpu_trace.h"
  #include "bif/bif_4_1_d.h"
@@ -1809,7 +1810,104 @@ static const struct file_operations
amdgpu_ttm_gtt_fops = {

  #endif

+static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
+                                  size_t size, loff_t *pos)
+{
+       struct amdgpu_device *adev = file_inode(f)->i_private;
+       ssize_t result, n;
+       int r;
+       uint64_t phys;
+       void *ptr;
+       struct iommu_domain *dom;
+
+       dom = iommu_get_domain_for_dev(adev->dev);
+       if (!dom)
+               return -EFAULT;
+
+       result = 0;
+       while (size) {
+               // get physical address and map
+               phys = iommu_iova_to_phys(dom, *pos);
+
+               // copy upto one page
+               if (size > PAGE_SIZE)
+                       n = PAGE_SIZE;
+               else
+                       n = size;
+
+               // to end of the page
+               if (((*pos & (PAGE_SIZE - 1)) + n) >= PAGE_SIZE)
+                       n = PAGE_SIZE - (*pos & (PAGE_SIZE - 1));
+
+               ptr = kmap(pfn_to_page(PFN_DOWN(phys)));
+               if (!ptr)
+                       return -EFAULT;
+
+               r = copy_to_user(buf, ptr, n);
+               kunmap(pfn_to_page(PFN_DOWN(phys)));
+               if (r)
+                       return -EFAULT;
+
+               *pos += n;
+               size -= n;
+               result += n;
+       }
+
+       return result;
+}
+
+static ssize_t amdgpu_iova_to_phys_write(struct file *f, const char __user
*buf,
+                                  size_t size, loff_t *pos)
+{
+       struct amdgpu_device *adev = file_inode(f)->i_private;
+       ssize_t result, n;
+       int r;
+       uint64_t phys;
+       void *ptr;
+       struct iommu_domain *dom;
+
+       dom = iommu_get_domain_for_dev(adev->dev);
+       if (!dom)
+               return -EFAULT;
+
+       result = 0;
+       while (size) {
+               // get physical address and map
+               phys = iommu_iova_to_phys(dom, *pos);

+               // copy upto one page
+               if (size > PAGE_SIZE)
+                       n = PAGE_SIZE;
+               else
+                       n = size;
+
+               // to end of the page
+               if (((*pos & (PAGE_SIZE - 1)) + n) >= PAGE_SIZE)
+                       n = PAGE_SIZE - (*pos & (PAGE_SIZE - 1));
+
+               ptr = kmap(pfn_to_page(PFN_DOWN(phys)));
+               if (!ptr)
+                       return -EFAULT;
+
+               r = copy_from_user(ptr, buf, n);
+               kunmap(pfn_to_page(PFN_DOWN(phys)));
+               if (r)
+                       return -EFAULT;
+
+               *pos += n;
+               size -= n;
+               result += n;
+       }
+
+       return result;
+}
+
+static const struct file_operations amdgpu_ttm_iova_fops = {
+       .owner = THIS_MODULE,
+       .read = amdgpu_iova_to_phys_read,
+       .write = amdgpu_iova_to_phys_write,
+       .llseek = default_llseek
+};

  static const struct {
        char *name;
@@ -1820,6 +1918,7 @@ static const struct {
  #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
        { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
  #endif
+       { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
  };

  #endif
--
2.12.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to