Hi,

On 07/17/2015 03:31 PM, john.c.harri...@intel.com wrote:
From: John Harrison <john.c.harri...@intel.com>

The request structure is reference counted. When the count reached
zero, the request was immediately freed and all associated objects
were unrefereced/unallocated. This meant that the driver mutex lock
must be held at the point where the count reaches zero. This was fine
while all references were held internally to the driver. However, the
plan is to allow the underlying fence object (and hence the request
itself) to be returned to other drivers and to userland. External
users cannot be expected to acquire a driver private mutex lock.

Rather than attempt to disentangle the request structure from the
driver mutex lock, the decsion was to defer the free code until a
later (safer) point. Hence this patch changes the unreference callback
to merely move the request onto a delayed free list. The driver's
retire worker thread will then process the list and actually call the
free function on the requests.

[new patch in series]

For: VIZ-5190
Signed-off-by: John Harrison <john.c.harri...@intel.com>
---
  drivers/gpu/drm/i915/i915_drv.h         | 22 +++---------------
  drivers/gpu/drm/i915/i915_gem.c         | 41 +++++++++++++++++++++++++++++----
  drivers/gpu/drm/i915/intel_display.c    |  2 +-
  drivers/gpu/drm/i915/intel_lrc.c        |  2 ++
  drivers/gpu/drm/i915/intel_pm.c         |  2 +-
  drivers/gpu/drm/i915/intel_ringbuffer.c |  2 ++
  drivers/gpu/drm/i915/intel_ringbuffer.h |  4 ++++
  7 files changed, 50 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 88a4746..61c3db2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2161,14 +2161,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
   * initial reference taken using kref_init
   */
  struct drm_i915_gem_request {
-       /**
-        * Underlying object for implementing the signal/wait stuff.
-        * NB: Never return this fence object to user land! It is unsafe to
-        * let anything outside of the i915 driver get hold of the fence
-        * object as the clean up when decrementing the reference count
-        * requires holding the driver mutex lock.
-        */
+       /** Underlying object for implementing the signal/wait stuff. */
        struct fence fence;
+       struct list_head delay_free_list;

Maybe call this delay_free_link to continue the established convention.


        /** On Which ring this request was generated */
        struct drm_i915_private *i915;
@@ -2281,21 +2276,10 @@ i915_gem_request_reference(struct drm_i915_gem_request 
*req)
  static inline void
  i915_gem_request_unreference(struct drm_i915_gem_request *req)
  {
-       WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-       fence_put(&req->fence);
-}
-
-static inline void
-i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
-{
-       struct drm_device *dev;
-
        if (!req)
                return;

-       dev = req->ring->dev;
-       if (kref_put_mutex(&req->fence.refcount, fence_release, 
&dev->struct_mutex))
-               mutex_unlock(&dev->struct_mutex);
+       fence_put(&req->fence);
  }

  static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index af79716..482835a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2616,10 +2616,27 @@ static void i915_set_reset_status(struct 
drm_i915_private *dev_priv,
        }
  }

-static void i915_gem_request_free(struct fence *req_fence)
+static void i915_gem_request_release(struct fence *req_fence)
  {
        struct drm_i915_gem_request *req = container_of(req_fence,
                                                 typeof(*req), fence);
+       struct intel_engine_cs *ring = req->ring;
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       unsigned long flags;
+
+       /*
+        * Need to add the request to a deferred dereference list to be
+        * processed at a mutex lock safe time.
+        */
+       spin_lock_irqsave(&ring->delayed_free_lock, flags);

At the moment there is no request unreferencing from irq handlers right? Unless (or until) you plan to add that you could use simple spin_lock here. (And in the i915_gem_retire_requests_ring.)

+       list_add_tail(&req->delay_free_list, &ring->delayed_free_list);
+       spin_unlock_irqrestore(&ring->delayed_free_lock, flags);
+
+       queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);

Have you decided to re-use the retire worker just for convenience of for some other reason as well?

I found it a bit unexpected and though dedicated request free worker would be cleaner, but I don't know, not a strong opinion.

+}
+
+static void i915_gem_request_free(struct drm_i915_gem_request *req)
+{
        struct intel_context *ctx = req->ctx;

        BUG_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
@@ -2696,7 +2713,7 @@ static const struct fence_ops i915_gem_request_fops = {
        .enable_signaling       = i915_gem_request_enable_signaling,
        .signaled               = i915_gem_request_is_completed,
        .wait                   = fence_default_wait,
-       .release                = i915_gem_request_free,
+       .release                = i915_gem_request_release,
        .fence_value_str        = i915_fence_value_str,
        .timeline_value_str     = i915_fence_timeline_value_str,
  };
@@ -2992,6 +3009,21 @@ i915_gem_retire_requests_ring(struct intel_engine_cs 
*ring)
                i915_gem_request_assign(&ring->trace_irq_req, NULL);
        }

+       while (!list_empty(&ring->delayed_free_list)) {
+               struct drm_i915_gem_request *request;
+               unsigned long flags;
+
+               request = list_first_entry(&ring->delayed_free_list,
+                                          struct drm_i915_gem_request,
+                                          delay_free_list);

Need a spinlock to sample list head here. Then maybe move it on a temporary list and do the freeing afterwards.

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to