OMAP GEM objects backed by dma_buf reuse the current OMAP GEM object
support as much as possible. If the imported buffer is physically
contiguous its physical address will be used directly, reusing the
OMAP_BO_MEM_DMA_API code paths. Otherwise it will be mapped through the
TILER using a pages list created from the scatterlist instead of the
shmem backing storage.

Disallow exporting imported buffers for now as those code paths haven't
been verified. Use cases of such a feature are suspicious anyway.

Signed-off-by: Laurent Pinchart <laurent.pinchart at ideasonboard.com>
---
 drivers/gpu/drm/omapdrm/omap_drv.h        |   2 +
 drivers/gpu/drm/omapdrm/omap_gem.c        | 138 ++++++++++++++++++++++++------
 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c |  57 +++++++++---
 3 files changed, 163 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h 
b/drivers/gpu/drm/omapdrm/omap_drv.h
index 97fcb892fdd7..6615b7d51ee3 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -200,6 +200,8 @@ void omap_gem_deinit(struct drm_device *dev);

 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
                union omap_gem_size gsize, uint32_t flags);
+struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+               struct sg_table *sgt);
 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
                union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
 void omap_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c 
b/drivers/gpu/drm/omapdrm/omap_gem.c
index 11df4f78d8a7..8a54d333a47b 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -33,6 +33,7 @@
 #define OMAP_BO_MEM_DMA_API    0x01000000      /* memory allocated with the 
dma_alloc_* API */
 #define OMAP_BO_MEM_SHMEM      0x02000000      /* memory allocated through 
shmem backing */
 #define OMAP_BO_MEM_EXT                0x04000000      /* memory allocated 
externally */
+#define OMAP_BO_MEM_DMABUF     0x08000000      /* memory imported from a 
dmabuf */
 #define OMAP_BO_EXT_SYNC       0x10000000      /* externally allocated sync 
object */

 struct omap_gem_object {
@@ -49,17 +50,25 @@ struct omap_gem_object {
        uint32_t roll;

        /**
-        * If buffer is allocated physically contiguous, the OMAP_BO_MEM_DMA_API
-        * flag is set and the paddr is valid.  Also if the buffer is remapped
-        * in TILER and paddr_cnt > 0, then paddr is valid. But if you are using
-        * the physical address and OMAP_BO_MEM_DMA_API is not set, then you
-        * should be going thru omap_gem_{get,put}_paddr() to ensure the mapping
-        * is not removed from under your feet.
+        * paddr contains the buffer DMA address. It is valid for
         *
-        * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
-        * buffer is requested, but doesn't mean that it is. Use the
-        * OMAP_BO_MEM_DMA_API flag to determine if the buffer has a DMA capable
-        * physical address.
+        * - buffers allocated through the DMA mapping API (with the
+        *   OMAP_BO_MEM_DMA_API flag set)
+        *
+        * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
+        *   if they are physically contiguous (when sgt->orig_nents == 1)
+        *
+        * - buffers mapped through the TILER when paddr_cnt is not zero, in
+        *   which case the DMA address points to the TILER aperture
+        *
+        * Physically contiguous buffers have their DMA address equal to the
+        * physical address as we don't remap those buffers through the TILER.
+        *
+        * Buffers mapped to the TILER have their DMA address pointing to the
+        * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
+        * the DMA address must be accessed through omap_get_get_paddr() to
+        * ensure that the mapping won't disappear unexpectedly. References must
+        * be released with omap_gem_put_paddr().
         */
        dma_addr_t paddr;

@@ -69,6 +78,12 @@ struct omap_gem_object {
        uint32_t paddr_cnt;

        /**
+        * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
+        * is set and the sgt field is valid.
+        */
+       struct sg_table *sgt;
+
+       /**
         * tiler block used when buffer is remapped in DMM/TILER.
         */
        struct tiler_block *block;
@@ -166,6 +181,17 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
        return drm_vma_node_offset_addr(&obj->vma_node);
 }

+static bool is_contiguous(struct omap_gem_object *omap_obj)
+{
+       if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
+               return true;
+
+       if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
+               return true;
+
+       return false;
+}
+
 /* 
-----------------------------------------------------------------------------
  * Eviction
  */
@@ -384,7 +410,7 @@ static int fault_1d(struct drm_gem_object *obj,
                omap_gem_cpu_sync(obj, pgoff);
                pfn = page_to_pfn(omap_obj->pages[pgoff]);
        } else {
-               BUG_ON(!(omap_obj->flags & OMAP_BO_MEM_DMA_API));
+               BUG_ON(!is_contiguous(omap_obj));
                pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
        }

@@ -774,8 +800,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,

        mutex_lock(&obj->dev->struct_mutex);

-       if (!(omap_obj->flags & OMAP_BO_MEM_DMA_API) &&
-           remap && priv->usergart) {
+       if (!is_contiguous(omap_obj) && remap && priv->usergart) {
                if (omap_obj->paddr_cnt == 0) {
                        struct page **pages;
                        uint32_t npages = obj->size >> PAGE_SHIFT;
@@ -822,7 +847,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
                omap_obj->paddr_cnt++;

                *paddr = omap_obj->paddr;
-       } else if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
+       } else if (is_contiguous(omap_obj)) {
                *paddr = omap_obj->paddr;
        } else {
                ret = -EINVAL;
@@ -1290,9 +1315,6 @@ unlock:
  * Constructor & Destructor
  */

-/* don't call directly.. called from GEM core when it is time to actually
- * free the object..
- */
 void omap_gem_free_object(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
@@ -1314,14 +1336,20 @@ void omap_gem_free_object(struct drm_gem_object *obj)

        /* don't free externally allocated backing memory */
        if (!(omap_obj->flags & OMAP_BO_MEM_EXT)) {
-               if (omap_obj->pages)
-                       omap_gem_detach_pages(obj);
+               if (omap_obj->pages) {
+                       if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
+                               kfree(omap_obj->pages);
+                       else
+                               omap_gem_detach_pages(obj);
+               }

                if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
                        dma_free_writecombine(dev->dev, obj->size,
                                        omap_obj->vaddr, omap_obj->paddr);
                } else if (omap_obj->vaddr) {
                        vunmap(omap_obj->vaddr);
+               } else if (obj->import_attach) {
+                       drm_prime_gem_destroy(obj, omap_obj->sgt);
                }
        }

@@ -1367,14 +1395,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device 
*dev,
                flags |= tiler_get_cpu_cache_flags();
        } else if ((flags & OMAP_BO_SCANOUT) && !priv->usergart) {
                /*
-                * Use contiguous memory if we don't have DMM to remap
-                * discontiguous buffers.
+                * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
+                * tiled. However, to lower the pressure on memory allocation,
+                * use contiguous memory only if no TILER is available.
                 */
                flags |= OMAP_BO_MEM_DMA_API;
-       } else if (!(flags & OMAP_BO_MEM_EXT)) {
+       } else if (!(flags & (OMAP_BO_MEM_EXT | OMAP_BO_MEM_DMABUF))) {
                /*
-                * All other buffers not backed with external memory are
-                * shmem-backed.
+                * All other buffers not backed by external memory or dma_buf
+                * are shmem-backed.
                 */
                flags |= OMAP_BO_MEM_SHMEM;
        }
@@ -1436,6 +1465,67 @@ fail:
        return NULL;
 }

+struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+                                          struct sg_table *sgt)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_gem_object *omap_obj;
+       struct drm_gem_object *obj;
+       union omap_gem_size gsize;
+
+       /* Without a DMM only physically contiguous buffers can be supported. */
+       if (sgt->orig_nents != 1 && !priv->usergart)
+               return ERR_PTR(-EINVAL);
+
+       mutex_lock(&dev->struct_mutex);
+
+       gsize.bytes = PAGE_ALIGN(size);
+       obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
+       if (!obj) {
+               obj = ERR_PTR(-ENOMEM);
+               goto done;
+       }
+
+       omap_obj = to_omap_bo(obj);
+       omap_obj->sgt = sgt;
+
+       if (sgt->orig_nents == 1) {
+               omap_obj->paddr = sg_dma_address(sgt->sgl);
+       } else {
+               /* Create pages list from sgt */
+               struct sg_page_iter iter;
+               struct page **pages;
+               unsigned int npages;
+               unsigned int i = 0;
+
+               npages = DIV_ROUND_UP(size, PAGE_SIZE);
+               pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+               if (!pages) {
+                       omap_gem_free_object(obj);
+                       obj = ERR_PTR(-ENOMEM);
+                       goto done;
+               }
+
+               omap_obj->pages = pages;
+
+               for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+                       pages[i++] = sg_page_iter_page(&iter);
+                       if (i > npages)
+                               break;
+               }
+
+               if (WARN_ON(i != npages)) {
+                       omap_gem_free_object(obj);
+                       obj = ERR_PTR(-ENOMEM);
+                       goto done;
+               }
+       }
+
+done:
+       mutex_unlock(&dev->struct_mutex);
+       return obj;
+}
+
 /* convenience method to construct a GEM buffer object, and userspace handle */
 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
                union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c 
b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 27c297672076..3f7d25a8baf1 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -21,6 +21,10 @@

 #include "omap_drv.h"

+/* 
-----------------------------------------------------------------------------
+ * DMABUF Export
+ */
+
 static struct sg_table *omap_gem_map_dma_buf(
                struct dma_buf_attachment *attachment,
                enum dma_data_direction dir)
@@ -170,6 +174,10 @@ struct dma_buf *omap_gem_prime_export(struct drm_device 
*dev,
 {
        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

+       /* Don't allow exporting buffers that have been imported. */
+       if (obj->import_attach)
+               return ERR_PTR(-EINVAL);
+
        exp_info.ops = &omap_dmabuf_ops;
        exp_info.size = obj->size;
        exp_info.flags = flags;
@@ -178,15 +186,20 @@ struct dma_buf *omap_gem_prime_export(struct drm_device 
*dev,
        return dma_buf_export(&exp_info);
 }

+/* 
-----------------------------------------------------------------------------
+ * DMABUF Import
+ */
+
 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
-               struct dma_buf *buffer)
+                                            struct dma_buf *dma_buf)
 {
+       struct dma_buf_attachment *attach;
        struct drm_gem_object *obj;
+       struct sg_table *sgt;
+       int ret;

-       /* is this one of own objects? */
-       if (buffer->ops == &omap_dmabuf_ops) {
-               obj = buffer->priv;
-               /* is it from our device? */
+       if (dma_buf->ops == &omap_dmabuf_ops) {
+               obj = dma_buf->priv;
                if (obj->dev == dev) {
                        /*
                         * Importing dmabuf exported from out own gem increases
@@ -197,9 +210,33 @@ struct drm_gem_object *omap_gem_prime_import(struct 
drm_device *dev,
                }
        }

-       /*
-        * TODO add support for importing buffers from other devices..
-        * for now we don't need this but would be nice to add eventually
-        */
-       return ERR_PTR(-EINVAL);
+       attach = dma_buf_attach(dma_buf, dev->dev);
+       if (IS_ERR(attach))
+               return ERR_CAST(attach);
+
+       get_dma_buf(dma_buf);
+
+       sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+       if (IS_ERR(sgt)) {
+               ret = PTR_ERR(sgt);
+               goto fail_detach;
+       }
+
+       obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
+       if (IS_ERR(obj)) {
+               ret = PTR_ERR(obj);
+               goto fail_unmap;
+       }
+
+       obj->import_attach = attach;
+
+       return obj;
+
+fail_unmap:
+       dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+       dma_buf_detach(dma_buf, attach);
+       dma_buf_put(dma_buf);
+
+       return ERR_PTR(ret);
 }
-- 
2.4.10

Reply via email to