Combine the near identical implementations of intel_logical_ring_begin()
and intel_ring_begin() - the only difference is that the logical wait
has to check for a matching ring (which is assumed by legacy).

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_lrc.c        | 141 ++------------------------------
 drivers/gpu/drm/i915/intel_lrc.h        |   1 -
 drivers/gpu/drm/i915/intel_mocs.c       |  12 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c | 111 +++++++++++++------------
 4 files changed, 69 insertions(+), 196 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index dc4fc9d8612c..3d14b69632e8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -698,48 +698,6 @@ int intel_logical_ring_alloc_request_extras(struct 
drm_i915_gem_request *request
        return 0;
 }
 
-static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
-                                      int bytes)
-{
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_i915_gem_request *target;
-       unsigned space;
-       int ret;
-
-       if (intel_ring_space(ringbuf) >= bytes)
-               return 0;
-
-       /* The whole point of reserving space is to not wait! */
-       WARN_ON(ringbuf->reserved_in_use);
-
-       list_for_each_entry(target, &ring->request_list, list) {
-               /*
-                * The request queue is per-engine, so can contain requests
-                * from multiple ringbuffers. Here, we must ignore any that
-                * aren't from the ringbuffer we're considering.
-                */
-               if (target->ringbuf != ringbuf)
-                       continue;
-
-               /* Would completion of this request free enough space? */
-               space = __intel_ring_space(target->postfix, ringbuf->tail,
-                                          ringbuf->size);
-               if (space >= bytes)
-                       break;
-       }
-
-       if (WARN_ON(&target->list == &ring->request_list))
-               return -ENOSPC;
-
-       ret = i915_wait_request(target);
-       if (ret)
-               return ret;
-
-       ringbuf->space = space;
-       return 0;
-}
-
 /*
  * intel_logical_ring_advance_and_submit() - advance the tail and submit the 
workload
  * @request: Request to advance the logical ringbuffer of.
@@ -763,89 +721,6 @@ intel_logical_ring_advance_and_submit(struct 
drm_i915_gem_request *request)
                execlists_context_queue(request);
 }
 
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
-       int rem = ringbuf->size - ringbuf->tail;
-       memset(ringbuf->virtual_start + ringbuf->tail, 0, rem);
-
-       ringbuf->tail = 0;
-       intel_ring_update_space(ringbuf);
-}
-
-static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
-{
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
-       int remain_usable = ringbuf->effective_size - ringbuf->tail;
-       int remain_actual = ringbuf->size - ringbuf->tail;
-       int ret, total_bytes, wait_bytes = 0;
-       bool need_wrap = false;
-
-       if (ringbuf->reserved_in_use)
-               total_bytes = bytes;
-       else
-               total_bytes = bytes + ringbuf->reserved_size;
-
-       if (unlikely(bytes > remain_usable)) {
-               /*
-                * Not enough space for the basic request. So need to flush
-                * out the remainder and then wait for base + reserved.
-                */
-               wait_bytes = remain_actual + total_bytes;
-               need_wrap = true;
-       } else {
-               if (unlikely(total_bytes > remain_usable)) {
-                       /*
-                        * The base request will fit but the reserved space
-                        * falls off the end. So only need to to wait for the
-                        * reserved size after flushing out the remainder.
-                        */
-                       wait_bytes = remain_actual + ringbuf->reserved_size;
-                       need_wrap = true;
-               } else if (total_bytes > ringbuf->space) {
-                       /* No wrapping required, just waiting. */
-                       wait_bytes = total_bytes;
-               }
-       }
-
-       if (wait_bytes) {
-               ret = logical_ring_wait_for_space(req, wait_bytes);
-               if (unlikely(ret))
-                       return ret;
-
-               if (need_wrap)
-                       __wrap_ring_buffer(ringbuf);
-       }
-
-       return 0;
-}
-
-/**
- * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some 
commands
- *
- * @req: The request to start some new work for
- * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
- *
- * The ringbuffer might not be ready to accept the commands right away (maybe 
it needs to
- * be wrapped, or wait a bit for the tail to be updated). This function takes 
care of that
- * and also preallocates a request (every workload submission is still 
mediated through
- * requests, same as it did with legacy ringbuffer submission).
- *
- * Return: non-zero if the ringbuffer is not ready to be written to.
- */
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
-{
-       int ret;
-
-       WARN_ON(req == NULL);
-
-       ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
-       if (ret)
-               return ret;
-
-       req->ringbuf->space -= num_dwords * sizeof(uint32_t);
-       return 0;
-}
-
 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
 {
        /*
@@ -858,7 +733,7 @@ int intel_logical_ring_reserve_space(struct 
drm_i915_gem_request *request)
         */
        intel_ring_reserved_space_reserve(request->ringbuf, 
MIN_SPACE_FOR_ADD_REQUEST);
 
-       return intel_logical_ring_begin(request, 0);
+       return intel_ring_begin(request, 0);
 }
 
 /**
@@ -928,7 +803,7 @@ int intel_execlists_submission(struct 
i915_execbuffer_params *params,
 
        if (ring == &dev_priv->ring[RCS] &&
            instp_mode != dev_priv->relative_constants_mode) {
-               ret = intel_logical_ring_begin(params->request, 4);
+               ret = intel_ring_begin(params->request, 4);
                if (ret)
                        return ret;
 
@@ -1104,7 +979,7 @@ static int intel_logical_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       ret = intel_logical_ring_begin(req, w->count * 2 + 2);
+       ret = intel_ring_begin(req, w->count * 2 + 2);
        if (ret)
                return ret;
 
@@ -1566,7 +1441,7 @@ static int intel_logical_ring_emit_pdps(struct 
drm_i915_gem_request *req)
        const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
        int i, ret;
 
-       ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+       ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
        if (ret)
                return ret;
 
@@ -1611,7 +1486,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request 
*req,
                req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
        }
 
-       ret = intel_logical_ring_begin(req, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
@@ -1655,7 +1530,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request 
*request,
        uint32_t cmd;
        int ret;
 
-       ret = intel_logical_ring_begin(request, 4);
+       ret = intel_ring_begin(request, 4);
        if (ret)
                return ret;
 
@@ -1722,7 +1597,7 @@ static int gen8_emit_flush_render(struct 
drm_i915_gem_request *request,
                        vf_flush_wa = true;
        }
 
-       ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
+       ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
        if (ret)
                return ret;
 
@@ -1779,7 +1654,7 @@ static int gen8_emit_request(struct drm_i915_gem_request 
*request)
         * used as a workaround for not being allowed to do lite
         * restore with HEAD==TAIL (WaIdleLiteRestore).
         */
-       ret = intel_logical_ring_begin(request, 8);
+       ret = intel_ring_begin(request, 8);
        if (ret)
                return ret;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 9d4aa699e593..32401e11cebe 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -60,7 +60,6 @@ int intel_logical_ring_reserve_space(struct 
drm_i915_gem_request *request);
 void intel_logical_ring_stop(struct intel_engine_cs *ring);
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
 int intel_logical_rings_init(struct drm_device *dev);
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c 
b/drivers/gpu/drm/i915/intel_mocs.c
index d8a7fdc7baeb..5d4f6f3b67cd 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -200,11 +200,9 @@ static int emit_mocs_control_table(struct 
drm_i915_gem_request *req,
        if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
                return -ENODEV;
 
-       ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
-       if (ret) {
-               DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+       ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+       if (ret)
                return ret;
-       }
 
        intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
@@ -257,11 +255,9 @@ static int emit_mocs_l3cc_table(struct 
drm_i915_gem_request *req,
        if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
                return -ENODEV;
 
-       ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
-       if (ret) {
-               DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+       ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+       if (ret)
                return ret;
-       }
 
        intel_ring_emit(ringbuf,
                        MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 86c54584f64a..c694f602a0b8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2062,46 +2062,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs 
*ring)
        ring->dev = NULL;
 }
 
-static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
-{
-       struct intel_ringbuffer *ringbuf = ring->buffer;
-       struct drm_i915_gem_request *request;
-       unsigned space;
-       int ret;
-
-       if (intel_ring_space(ringbuf) >= n)
-               return 0;
-
-       /* The whole point of reserving space is to not wait! */
-       WARN_ON(ringbuf->reserved_in_use);
-
-       list_for_each_entry(request, &ring->request_list, list) {
-               space = __intel_ring_space(request->postfix, ringbuf->tail,
-                                          ringbuf->size);
-               if (space >= n)
-                       break;
-       }
-
-       if (WARN_ON(&request->list == &ring->request_list))
-               return -ENOSPC;
-
-       ret = i915_wait_request(request);
-       if (ret)
-               return ret;
-
-       ringbuf->space = space;
-       return 0;
-}
-
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
-       int rem = ringbuf->size - ringbuf->tail;
-       memset(ringbuf->virtual_start + ringbuf->tail, 0, rem);
-
-       ringbuf->tail = 0;
-       intel_ring_update_space(ringbuf);
-}
-
 int intel_ring_idle(struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_request *req;
@@ -2188,9 +2148,59 @@ void intel_ring_reserved_space_end(struct 
intel_ringbuffer *ringbuf)
        ringbuf->reserved_in_use = false;
 }
 
-static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_engine_cs *ring = req->ring;
+       struct drm_i915_gem_request *target;
+       unsigned space;
+       int ret;
+
+       if (intel_ring_space(ringbuf) >= bytes)
+               return 0;
+
+       /* The whole point of reserving space is to not wait! */
+       WARN_ON(ringbuf->reserved_in_use);
+
+       list_for_each_entry(target, &ring->request_list, list) {
+               /*
+                * The request queue is per-engine, so can contain requests
+                * from multiple ringbuffers. Here, we must ignore any that
+                * aren't from the ringbuffer we're considering.
+                */
+               if (target->ringbuf != ringbuf)
+                       continue;
+
+               /* Would completion of this request free enough space? */
+               space = __intel_ring_space(target->postfix, ringbuf->tail,
+                                          ringbuf->size);
+               if (space >= bytes)
+                       break;
+       }
+
+       if (WARN_ON(&target->list == &ring->request_list))
+               return -ENOSPC;
+
+       ret = i915_wait_request(target);
+       if (ret)
+               return ret;
+
+       ringbuf->space = space;
+       return 0;
+}
+
+static void ring_wrap(struct intel_ringbuffer *ringbuf)
+{
+       int rem = ringbuf->size - ringbuf->tail;
+       memset(ringbuf->virtual_start + ringbuf->tail, 0, rem);
+
+       ringbuf->tail = 0;
+       intel_ring_update_space(ringbuf);
+}
+
+static int ring_prepare(struct drm_i915_gem_request *req, int bytes)
+{
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
        int remain_usable = ringbuf->effective_size - ringbuf->tail;
        int remain_actual = ringbuf->size - ringbuf->tail;
        int ret, total_bytes, wait_bytes = 0;
@@ -2224,33 +2234,26 @@ static int __intel_ring_prepare(struct intel_engine_cs 
*ring, int bytes)
        }
 
        if (wait_bytes) {
-               ret = ring_wait_for_space(ring, wait_bytes);
+               ret = wait_for_space(req, wait_bytes);
                if (unlikely(ret))
                        return ret;
 
                if (need_wrap)
-                       __wrap_ring_buffer(ringbuf);
+                       ring_wrap(ringbuf);
        }
 
        return 0;
 }
 
-int intel_ring_begin(struct drm_i915_gem_request *req,
-                    int num_dwords)
+int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-       struct intel_engine_cs *ring;
-       struct drm_i915_private *dev_priv;
        int ret;
 
-       WARN_ON(req == NULL);
-       ring = req->ring;
-       dev_priv = req->i915;
-
-       ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+       ret = ring_prepare(req, num_dwords * sizeof(uint32_t));
        if (ret)
                return ret;
 
-       ring->buffer->space -= num_dwords * sizeof(uint32_t);
+       req->ringbuf->space -= num_dwords * sizeof(uint32_t);
        return 0;
 }
 
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to