We now have two implementations for vmapping a whole object, one for
dma-buf and one for the ringbuffer. If we couple the vmapping into the
obj->pages lifetime, then we can reuse an obj->vmapping for both and at
the same time couple it into the shrinker.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h         | 12 ++++---
 drivers/gpu/drm/i915/i915_gem.c         | 41 ++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_dmabuf.c  | 55 +++++----------------------------
 drivers/gpu/drm/i915/intel_ringbuffer.c | 53 ++++++++++---------------------
 4 files changed, 72 insertions(+), 89 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b36c97c8c486..600b6d4a0139 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2004,10 +2004,7 @@ struct drm_i915_gem_object {
                struct scatterlist *sg;
                int last;
        } get_page;
-
-       /* prime dma-buf support */
-       void *dma_buf_vmapping;
-       int vmapping_count;
+       void *vmapping;
 
        /** Breadcrumb of last rendering to the buffer.
         * There can only be one writer, but we allow for multiple readers.
@@ -2706,12 +2703,19 @@ static inline void i915_gem_object_pin_pages(struct 
drm_i915_gem_object *obj)
        BUG_ON(obj->pages == NULL);
        obj->pages_pin_count++;
 }
+
 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
        BUG_ON(obj->pages_pin_count == 0);
        obj->pages_pin_count--;
 }
 
+void *__must_check i915_gem_object_pin_vmap(struct drm_i915_gem_object *obj);
+static inline void i915_gem_object_unpin_vmap(struct drm_i915_gem_object *obj)
+{
+       i915_gem_object_unpin_pages(obj);
+}
+
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
                         struct intel_engine_cs *to);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5ab974a19779..1f07cd17be04 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2150,6 +2150,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object 
*obj)
        ops->put_pages(obj);
        obj->pages = NULL;
 
+       if (obj->vmapping) {
+               vunmap(obj->vmapping);
+               obj->vmapping = NULL;
+       }
+
        i915_gem_object_invalidate(obj);
 
        return 0;
@@ -2309,6 +2314,42 @@ i915_gem_object_get_pages(struct drm_i915_gem_object 
*obj)
        return 0;
 }
 
+void *i915_gem_object_pin_vmap(struct drm_i915_gem_object *obj)
+{
+       int ret;
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret)
+               return ERR_PTR(ret);
+
+       i915_gem_object_pin_pages(obj);
+
+       if (obj->vmapping == NULL) {
+               struct sg_page_iter sg_iter;
+               struct page **pages;
+               int n;
+
+               n = obj->base.size >> PAGE_SHIFT;
+               pages = kmalloc(n*sizeof(*pages), GFP_TEMPORARY);
+               if (pages == NULL)
+                       pages = drm_malloc_ab(n, sizeof(*pages));
+               if (pages != NULL) {
+                       n = 0;
+                       for_each_sg_page(obj->pages->sgl, &sg_iter, 
obj->pages->nents, 0)
+                               pages[n++] = sg_page_iter_page(&sg_iter);
+
+                       obj->vmapping = vmap(pages, n, 0, PAGE_KERNEL);
+                       drm_free_large(pages);
+               }
+               if (obj->vmapping == NULL) {
+                       i915_gem_object_unpin_pages(obj);
+                       return ERR_PTR(-ENOMEM);
+               }
+       }
+
+       return obj->vmapping;
+}
+
 void i915_vma_move_to_active(struct i915_vma *vma,
                             struct intel_engine_cs *ring)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 82a1f4b57778..18bdad6a54d2 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -95,14 +95,12 @@ static void i915_gem_unmap_dma_buf(struct 
dma_buf_attachment *attachment,
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
 
-       mutex_lock(&obj->base.dev->struct_mutex);
-
        dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
        sg_free_table(sg);
        kfree(sg);
 
-       i915_gem_object_unpin_pages(obj);
-
+       mutex_lock(&obj->base.dev->struct_mutex);
+       i915_gem_object_unpin_vmap(obj);
        mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
@@ -110,51 +108,17 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        struct drm_device *dev = obj->base.dev;
-       struct sg_page_iter sg_iter;
-       struct page **pages;
-       int ret, i;
+       void *addr;
+       int ret;
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ERR_PTR(ret);
 
-       if (obj->dma_buf_vmapping) {
-               obj->vmapping_count++;
-               goto out_unlock;
-       }
-
-       ret = i915_gem_object_get_pages(obj);
-       if (ret)
-               goto err;
-
-       i915_gem_object_pin_pages(obj);
-
-       ret = -ENOMEM;
-
-       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-       if (pages == NULL)
-               goto err_unpin;
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-               pages[i++] = sg_page_iter_page(&sg_iter);
-
-       obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
-       drm_free_large(pages);
-
-       if (!obj->dma_buf_vmapping)
-               goto err_unpin;
-
-       obj->vmapping_count = 1;
-out_unlock:
+       addr = i915_gem_object_pin_vmap(obj);
        mutex_unlock(&dev->struct_mutex);
-       return obj->dma_buf_vmapping;
 
-err_unpin:
-       i915_gem_object_unpin_pages(obj);
-err:
-       mutex_unlock(&dev->struct_mutex);
-       return ERR_PTR(ret);
+       return addr;
 }
 
 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -163,12 +127,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf 
*dma_buf, void *vaddr)
        struct drm_device *dev = obj->base.dev;
 
        mutex_lock(&dev->struct_mutex);
-       if (--obj->vmapping_count == 0) {
-               vunmap(obj->dma_buf_vmapping);
-               obj->dma_buf_vmapping = NULL;
-
-               i915_gem_object_unpin_pages(obj);
-       }
+       i915_gem_object_unpin_pages(obj);
        mutex_unlock(&dev->struct_mutex);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 5b837ed842f6..99a1fdff4924 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1908,34 +1908,12 @@ static int init_phys_status_page(struct intel_engine_cs 
*ring)
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
        if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
-               vunmap(ringbuf->virtual_start);
+               i915_gem_object_unpin_vmap(ringbuf->obj);
        else
                iounmap(ringbuf->virtual_start);
-       ringbuf->virtual_start = NULL;
        i915_gem_object_ggtt_unpin(ringbuf->obj);
 }
 
-static u32 *vmap_obj(struct drm_i915_gem_object *obj)
-{
-       struct sg_page_iter sg_iter;
-       struct page **pages;
-       void *addr;
-       int i;
-
-       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-       if (pages == NULL)
-               return NULL;
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-               pages[i++] = sg_page_iter_page(&sg_iter);
-
-       addr = vmap(pages, i, 0, PAGE_KERNEL);
-       drm_free_large(pages);
-
-       return addr;
-}
-
 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                                     struct intel_ringbuffer *ringbuf)
 {
@@ -1949,32 +1927,33 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device 
*dev,
 
        if (HAS_LLC(dev_priv) && !obj->stolen) {
                ret = i915_gem_object_set_to_cpu_domain(obj, true);
-               if (ret) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return ret;
-               }
+               if (ret)
+                       goto unpin;
 
-               ringbuf->virtual_start = vmap_obj(obj);
-               if (ringbuf->virtual_start == NULL) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return -ENOMEM;
+               ringbuf->virtual_start = i915_gem_object_pin_vmap(obj);
+               if (IS_ERR(ringbuf->virtual_start)) {
+                       ret = PTR_ERR(ringbuf->virtual_start);
+                       ringbuf->virtual_start = NULL;
+                       goto unpin;
                }
        } else {
                ret = i915_gem_object_set_to_gtt_domain(obj, true);
-               if (ret) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return ret;
-               }
+               if (ret)
+                       goto unpin;
 
                ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base 
+
                                                    
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
                if (ringbuf->virtual_start == NULL) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return -EINVAL;
+                       ret = -ENOMEM;
+                       goto unpin;
                }
        }
 
        return 0;
+
+unpin:
+       i915_gem_object_ggtt_unpin(obj);
+       return ret;
 }
 
 void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
-- 
2.1.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to