A gem object is created using dma_alloc_writecombine. Currently, this
buffer is assumed to be contiguous. If a IOMMU mapping is created for
DRM, this buffer would be non-contig so the map functions are modified
to call dma_mmap_writecombine. This works for both contig and non-contig
buffers.

Signed-off-by: Prathyush K <prathyush.k at samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_gem.c |   35 ++++++++++++++----------------
 1 files changed, 16 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c 
b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 5c8b683..59240f7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -162,17 +162,22 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object 
*obj,
 {
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
        struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-       unsigned long pfn;

        if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+               unsigned long pfn;
                if (!buf->pages)
                        return -EINTR;

                pfn = page_to_pfn(buf->pages[page_offset++]);
-       } else
-               pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
-
-       return vm_insert_mixed(vma, f_vaddr, pfn);
+               return vm_insert_mixed(vma, f_vaddr, pfn);
+       } else {
+               int ret;
+               ret = dma_mmap_writecombine(obj->dev->dev, vma, buf->kvaddr,
+                                       buf->dma_addr, buf->size);
+               if (ret)
+                       DRM_ERROR("dma_mmap_writecombine failed\n");
+               return ret;
+       }
 }

 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
@@ -503,7 +508,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
        struct drm_gem_object *obj = filp->private_data;
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
        struct exynos_drm_gem_buf *buffer;
-       unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+       unsigned long vm_size, usize, uaddr = vma->vm_start;
        int ret;

        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -543,19 +548,11 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
                        usize -= PAGE_SIZE;
                } while (usize > 0);
        } else {
-               /*
-                * get page frame number to physical memory to be mapped
-                * to user space.
-                */
-               pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
-                                                               PAGE_SHIFT;
-
-               DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
-               if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
-                                       vma->vm_page_prot)) {
-                       DRM_ERROR("failed to remap pfn range.\n");
-                       return -EAGAIN;
+               ret = dma_mmap_writecombine(obj->dev->dev, vma, buffer->kvaddr,
+                                       buffer->dma_addr, buffer->size);
+               if (ret) {
+                       DRM_ERROR("dma_mmap_writecombine failed\n");
+                       return ret;
                }
        }

-- 
1.7.0.4

Reply via email to