<snip>


 static void nested_enable_signaling(struct drm_i915_gem_request *rq)
@@ -1223,6 +1155,9 @@ int i915_guc_submission_enable(struct drm_i915_private 
*dev_priv)
        enum intel_engine_id id;
        int err;

+       BUILD_BUG_ON(ARRAY_SIZE(engine->execlist_port) *
+                    sizeof(struct guc_wq_item) > GUC_WQ_SIZE);
+

I believe we also need to multiply by I915_NUM_ENGINES here since we share the WQ across all engines.

        if (!client) {
                client = guc_client_alloc(dev_priv,
                                          INTEL_INFO(dev_priv)->ring_mask,
@@ -1250,7 +1185,6 @@ int i915_guc_submission_enable(struct drm_i915_private 
*dev_priv)
        guc_interrupts_capture(dev_priv);

        for_each_engine(engine, dev_priv, id) {
-               const int wqi_size = sizeof(struct guc_wq_item);
                struct drm_i915_gem_request *rq;

                /* The tasklet was initialised by execlists, and may be in
@@ -1263,10 +1197,8 @@ int i915_guc_submission_enable(struct drm_i915_private 
*dev_priv)

                /* Replay the current set of previously submitted requests */
                spin_lock_irq(&engine->timeline->lock);
-               list_for_each_entry(rq, &engine->timeline->requests, link) {
-                       guc_client_update_wq_rsvd(client, wqi_size);
+               list_for_each_entry(rq, &engine->timeline->requests, link)
                        i915_guc_submit(rq);

Aren't we potentially sending requests that were squashed in i915_guc_dequeue on different wq items here, thus potentially (although unlikely) overflowing the wq?

Thanks,
Daniele

-               }
                spin_unlock_irq(&engine->timeline->lock);
        }

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to