No intended functional change.  Split the logic into
two functions, one to set the state and one to use
the state to emit the ring contents.

Signed-off-by: Alex Deucher <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 201 ++++++++++++++-----------
 1 file changed, 110 insertions(+), 91 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index f2b95ad57d97f..4126be3ed3efd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -99,40 +99,15 @@ void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence 
*f)
        amdgpu_sa_bo_free(&ib->sa_bo, f);
 }
 
-/**
- * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
- *
- * @ring: ring index the IB is associated with
- * @job: job to schedule
- * @f: fence created during this submission
- *
- * Schedule an IB on the associated ring (all asics).
- * Returns 0 on success, error on failure.
- *
- * On SI, there are two parallel engines fed from the primary ring,
- * the CE (Constant Engine) and the DE (Drawing Engine).  Since
- * resource descriptors have moved to memory, the CE allows you to
- * prime the caches while the DE is updating register state so that
- * the resource descriptors will be already in cache when the draw is
- * processed.  To accomplish this, the userspace driver submits two
- * IBs, one for the CE and one for the DE.  If there is a CE IB (called
- * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
- * to SI there was just a DE IB.
- */
-int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct amdgpu_job *job,
-                      struct dma_fence **f)
+static int amdgpu_ib_emit(struct amdgpu_ring *ring, struct amdgpu_job *job)
 {
        struct amdgpu_device *adev = ring->adev;
+       int vmid = AMDGPU_JOB_GET_VMID(job);
        struct amdgpu_ib *ib;
-       struct dma_fence *tmp = NULL;
-       struct amdgpu_fence *af;
-       struct amdgpu_vm *vm;
-       uint64_t fence_ctx;
        uint32_t status = 0, alloc_size;
+       u64 shadow_va, csa_va, gds_va;
        unsigned int fence_flags = 0;
        bool secure, init_shadow;
-       u64 shadow_va, csa_va, gds_va;
-       int vmid = AMDGPU_JOB_GET_VMID(job);
        unsigned int cond_exec;
        unsigned int i;
        int r = 0;
@@ -143,61 +118,23 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
                return -EINVAL;
 
        ib = &job->ibs[0];
-       vm = job->vm;
-       fence_ctx = job->base.s_fence ?
-               job->base.s_fence->finished.context : 0;
        shadow_va = job->shadow_va;
        csa_va = job->csa_va;
        gds_va = job->gds_va;
        init_shadow = job->init_shadow;
-       af = job->hw_fence;
-       /* Save the context of the job for reset handling.
-        * The driver needs this so it can skip the ring
-        * contents for guilty contexts.
-        */
-       af->context = fence_ctx;
-       /* the vm fence is also part of the job's context */
-       job->hw_vm_fence->context = fence_ctx;
-
-       if (!ring->sched.ready) {
-               dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", 
ring->name);
-               return -EINVAL;
-       }
 
-       if (vm && !job->vmid) {
-               dev_err(adev->dev, "VM IB without ID\n");
-               return -EINVAL;
-       }
-
-       if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
-           (!ring->funcs->secure_submission_supported)) {
-               dev_err(adev->dev, "secure submissions not supported on ring 
<%s>\n", ring->name);
-               return -EINVAL;
-       }
+       if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
+               fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
 
        alloc_size = ring->funcs->emit_frame_size + job->num_ibs *
                ring->funcs->emit_ib_size;
 
        r = amdgpu_ring_alloc(ring, alloc_size);
        if (r) {
-               dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
+               dev_err(adev->dev, "Ring allocation for IB failed (%d).\n", r);
                return r;
        }
 
-       job->need_ctx_switch = ring->current_ctx != fence_ctx;
-       if (ring->funcs->emit_pipeline_sync && job &&
-           ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
-            job->need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
-
-               job->need_pipe_sync = true;
-               job->pipe_sync_seq = ring->fence_drv.sync_seq;
-
-               if (tmp)
-                       trace_amdgpu_ib_pipe_sync(job, tmp);
-
-               dma_fence_put(tmp);
-       }
-
        if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && 
ring->funcs->emit_mem_sync)
                ring->funcs->emit_mem_sync(ring);
 
@@ -208,11 +145,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
        if (ring->funcs->insert_start)
                ring->funcs->insert_start(ring);
 
-       r = amdgpu_vm_flush(ring, job);
-       if (r) {
-               amdgpu_ring_undo(ring);
-               return r;
-       }
        amdgpu_vm_emit_flush(ring, job);
 
        amdgpu_ring_ib_begin(ring);
@@ -264,9 +196,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
 
        amdgpu_device_invalidate_hdp(adev, ring);
 
-       if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
-               fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
-
        /* wrap the last IB with fence */
        if (job->uf_addr) {
                amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
@@ -278,25 +207,13 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
                amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
        }
 
-       r = amdgpu_fence_init(ring, af);
-       if (r) {
-               dev_err(adev->dev, "failed to emit fence (%d)\n", r);
-               if (job->vmid)
-                       amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid);
-               amdgpu_ring_undo(ring);
-               return r;
-       }
-       amdgpu_fence_emit(ring, af, fence_flags);
-       *f = &af->base;
-       /* get a ref for the job */
-       dma_fence_get(*f);
+       amdgpu_fence_emit(ring, job->hw_fence, fence_flags);
 
        if (ring->funcs->insert_end)
                ring->funcs->insert_end(ring);
 
        amdgpu_ring_patch_cond_exec(ring, cond_exec);
 
-       ring->current_ctx = fence_ctx;
        if (ring->funcs->emit_switch_buffer)
                amdgpu_ring_emit_switch_buffer(ring);
 
@@ -310,7 +227,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
         * fence so we know what rings contents to backup
         * after we reset the queue.
         */
-       amdgpu_fence_save_wptr(af);
+       amdgpu_fence_save_wptr(job->hw_fence);
 
        amdgpu_ring_ib_end(ring);
        amdgpu_ring_commit(ring);
@@ -318,6 +235,108 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
        return 0;
 }
 
+/**
+ * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @ring: ring index the IB is associated with
+ * @job: job to schedule
+ * @f: fence created during this submission
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine).  Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed.  To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE.  If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
+ * to SI there was just a DE IB.
+ */
+int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct amdgpu_job *job,
+                      struct dma_fence **f)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct dma_fence *tmp = NULL;
+       struct amdgpu_fence *af;
+       struct amdgpu_ib *ib;
+       struct amdgpu_vm *vm;
+       uint64_t fence_ctx;
+       int r = 0;
+
+       if (!job)
+               return -EINVAL;
+       if (job->num_ibs == 0)
+               return -EINVAL;
+
+       ib = &job->ibs[0];
+       vm = job->vm;
+       fence_ctx = job->base.s_fence ?
+               job->base.s_fence->finished.context : 0;
+       af = job->hw_fence;
+       /* Save the context of the job for reset handling.
+        * The driver needs this so it can skip the ring
+        * contents for guilty contexts.
+        */
+       af->context = fence_ctx;
+       /* the vm fence is also part of the job's context */
+       job->hw_vm_fence->context = fence_ctx;
+
+       if (!ring->sched.ready) {
+               dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", 
ring->name);
+               return -EINVAL;
+       }
+
+       if (vm && !job->vmid) {
+               dev_err(adev->dev, "VM IB without ID\n");
+               return -EINVAL;
+       }
+
+       if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
+           (!ring->funcs->secure_submission_supported)) {
+               dev_err(adev->dev, "secure submissions not supported on ring 
<%s>\n", ring->name);
+               return -EINVAL;
+       }
+
+       job->need_ctx_switch = ring->current_ctx != fence_ctx;
+       if (ring->funcs->emit_pipeline_sync && job &&
+           ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
+            job->need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
+
+               job->need_pipe_sync = true;
+               job->pipe_sync_seq = ring->fence_drv.sync_seq;
+
+               if (tmp)
+                       trace_amdgpu_ib_pipe_sync(job, tmp);
+
+               dma_fence_put(tmp);
+       }
+
+       r = amdgpu_vm_flush(ring, job);
+       if (r)
+               return r;
+
+       r = amdgpu_fence_init(ring, af);
+       if (r) {
+               dev_err(adev->dev, "failed to emit fence (%d)\n", r);
+               if (job->vmid)
+                       amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid);
+               return r;
+       }
+       *f = &af->base;
+       /* get a ref for the job */
+       dma_fence_get(*f);
+
+       r = amdgpu_ib_emit(ring, job);
+       if (r)
+               return r;
+       ring->current_ctx = fence_ctx;
+
+       return 0;
+}
+
 /**
  * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
  *
-- 
2.52.0

Reply via email to