Rather than saving and reemitting the ring's contents, use the state stored in the job and fence to reemit the state explicitly. This greatly simplifies reemitting the ring state and allows it to be reemitted over and over if there are multiple ring resets.
v2: save and restore ring state as well Signed-off-by: Alex Deucher <[email protected]> --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 91 ++--------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 46 +++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 33 ++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 50 +------------ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 15 +--- 5 files changed, 80 insertions(+), 155 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index c1cb47e92d08c..28691f9b6e32d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -89,16 +89,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) return seq; } -static void amdgpu_fence_save_fence_wptr_start(struct amdgpu_fence *af) -{ - af->fence_wptr_start = af->ring->wptr; -} - -static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af) -{ - af->fence_wptr_end = af->ring->wptr; -} - /** * amdgpu_fence_emit - emit a fence on the requested ring * @@ -154,11 +144,8 @@ int amdgpu_fence_init(struct amdgpu_ring *ring, struct amdgpu_fence *af) void amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af, unsigned int flags) { - amdgpu_fence_save_fence_wptr_start(af); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, af->base.seqno, flags | AMDGPU_FENCE_FLAG_INT); - amdgpu_fence_save_fence_wptr_end(af); - amdgpu_fence_save_wptr(af); } /** @@ -743,7 +730,6 @@ void amdgpu_fence_driver_update_timedout_fence_state(struct amdgpu_fence *af) struct amdgpu_ring *ring = af->ring; unsigned long flags; u32 seq, last_seq; - bool reemitted = false; last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; @@ -761,84 +747,17 @@ void amdgpu_fence_driver_update_timedout_fence_state(struct amdgpu_fence *af) if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) { fence = container_of(unprocessed, struct amdgpu_fence, base); - if (fence->reemitted > 1) - reemitted = true; - else if (fence == af) + if (fence == af) { dma_fence_set_error(&fence->base, -ETIME); - else if (fence->context == af->context) + fence->skip_ib = true; + } else if (fence->context == af->context) { dma_fence_set_error(&fence->base, -ECANCELED); - } - rcu_read_unlock(); - } while (last_seq != seq); - spin_unlock_irqrestore(&ring->fence_drv.lock, flags); - - if (reemitted) { - /* if we've already reemitted once then just cancel everything */ - amdgpu_fence_driver_force_completion(af->ring, &af->base); - af->ring->ring_backup_entries_to_copy = 0; - } -} - -void amdgpu_fence_save_wptr(struct amdgpu_fence *af) -{ - af->wptr = af->ring->wptr; -} - -static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, - u64 start_wptr, u32 end_wptr) -{ - unsigned int first_idx = start_wptr & ring->buf_mask; - unsigned int last_idx = end_wptr & ring->buf_mask; - unsigned int i; - - /* Backup the contents of the ring buffer. */ - for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask) - ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i]; -} - -void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, - struct amdgpu_fence *guilty_fence) -{ - struct dma_fence *unprocessed; - struct dma_fence __rcu **ptr; - struct amdgpu_fence *fence; - u64 wptr; - u32 seq, last_seq; - - last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; - seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; - wptr = ring->fence_drv.signalled_wptr; - ring->ring_backup_entries_to_copy = 0; - - do { - last_seq++; - last_seq &= ring->fence_drv.num_fences_mask; - - ptr = &ring->fence_drv.fences[last_seq]; - rcu_read_lock(); - unprocessed = rcu_dereference(*ptr); - - if (unprocessed && !dma_fence_is_signaled(unprocessed)) { - fence = container_of(unprocessed, struct amdgpu_fence, base); - - /* save everything if the ring is not guilty, otherwise - * just save the content from other contexts. - */ - if (!fence->reemitted && - (!guilty_fence || (fence->context != guilty_fence->context))) { - amdgpu_ring_backup_unprocessed_command(ring, wptr, - fence->wptr); - } else if (!fence->reemitted) { - /* always save the fence */ - amdgpu_ring_backup_unprocessed_command(ring, - fence->fence_wptr_start, - fence->fence_wptr_end); + fence->skip_ib = true; } - wptr = fence->wptr; - fence->reemitted++; } rcu_read_unlock(); } while (last_seq != seq); + spin_unlock_irqrestore(&ring->fence_drv.lock, flags); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 4126be3ed3efd..ab6e35716c266 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -99,7 +99,7 @@ void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f) amdgpu_sa_bo_free(&ib->sa_bo, f); } -static int amdgpu_ib_emit(struct amdgpu_ring *ring, struct amdgpu_job *job) +int amdgpu_ib_emit(struct amdgpu_ring *ring, struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; int vmid = AMDGPU_JOB_GET_VMID(job); @@ -135,6 +135,40 @@ static int amdgpu_ib_emit(struct amdgpu_ring *ring, struct amdgpu_job *job) return r; } + if (job->hw_fence->skip_ib) { + if (ring->funcs->insert_start) + ring->funcs->insert_start(ring); + if (job->emit_vm_fence) { + amdgpu_ring_ib_begin(ring); + if (ring->funcs->init_cond_exec) + cond_exec = amdgpu_ring_init_cond_exec(ring, + ring->cond_exe_gpu_addr); + if (job->vm_flush_needed) + amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); + amdgpu_fence_emit(ring, job->hw_vm_fence, 0); + amdgpu_ring_ib_end(ring); + amdgpu_ring_patch_cond_exec(ring, cond_exec); + } + amdgpu_ring_ib_begin(ring); + if (ring->funcs->emit_gfx_shadow) + amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va, + init_shadow, vmid); + if (ring->funcs->init_cond_exec) + cond_exec = amdgpu_ring_init_cond_exec(ring, + ring->cond_exe_gpu_addr); + if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec) { + amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0); + amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); + } + amdgpu_fence_emit(ring, job->hw_fence, fence_flags); + if (ring->funcs->insert_end) + ring->funcs->insert_end(ring); + amdgpu_ring_patch_cond_exec(ring, cond_exec); + amdgpu_ring_ib_end(ring); + amdgpu_ring_commit(ring); + return 0; + } + if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync) ring->funcs->emit_mem_sync(ring); @@ -221,14 +255,6 @@ static int amdgpu_ib_emit(struct amdgpu_ring *ring, struct amdgpu_job *job) ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) ring->funcs->emit_wave_limit(ring, false); - /* Save the wptr associated with this fence. - * This must be last for resets to work properly - * as we need to save the wptr associated with this - * fence so we know what rings contents to backup - * after we reset the queue. - */ - amdgpu_fence_save_wptr(job->hw_fence); - amdgpu_ring_ib_end(ring); amdgpu_ring_commit(ring); @@ -300,6 +326,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct amdgpu_job *job, return -EINVAL; } + amdgpu_ring_save_state(ring, job); + job->need_ctx_switch = ring->current_ctx != fence_ctx; if (ring->funcs->emit_pipeline_sync && job && ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index af57e116126a8..887bfd40b9bfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -85,6 +85,37 @@ static void amdgpu_job_core_dump(struct amdgpu_device *adev, } } +static int amdgpu_job_reemit_jobs(struct drm_sched_job *timedout_s_job) +{ + struct amdgpu_job *timedout_job = to_amdgpu_job(timedout_s_job); + struct amdgpu_ring *ring = to_amdgpu_ring(timedout_s_job->sched); + struct drm_gpu_scheduler *sched = &ring->sched; + struct drm_sched_job *s_job, *tmp; + int r; + + /* skip reemit if we reset all the rings on an instance */ + if (ring->all_instance_rings_reset) + return 0; + + amdgpu_ring_restore_state(ring, timedout_job); + + r = amdgpu_ib_emit(ring, timedout_job); + if (r) + return r; + list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { + struct amdgpu_job *job = to_amdgpu_job(s_job); + + amdgpu_ring_save_state(ring, job); + + r = amdgpu_ib_emit(ring, job); + if (r) + return r; + } + + return 0; +} + + static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) { struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); @@ -138,6 +169,8 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) /* Stop the scheduler to prevent anybody else from touching the ring buffer. */ drm_sched_wqueue_stop(&ring->sched); r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence); + if (!r) + r = amdgpu_job_reemit_jobs(s_job); if (!r) { /* Start the scheduler again */ drm_sched_wqueue_start(&ring->sched); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 0065ac0fb1e52..3a031649f6c97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -104,29 +104,6 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) return 0; } -/** - * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit - * - * @ring: amdgpu_ring structure holding ring information - * @ndw: number of dwords to allocate in the ring buffer - * - * Allocate @ndw dwords in the ring buffer (all asics). - * doesn't check the max_dw limit as we may be reemitting - * several submissions. - */ -static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw) -{ - /* Align requested size with padding so unlock_commit can - * pad safely */ - ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; - - ring->count_dw = ndw; - ring->wptr_old = ring->wptr; - - if (ring->funcs->begin_use) - ring->funcs->begin_use(ring); -} - /** * amdgpu_ring_insert_nop - insert NOP packets * @@ -370,12 +347,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, /* Initialize cached_rptr to 0 */ ring->cached_rptr = 0; - if (!ring->ring_backup) { - ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL); - if (!ring->ring_backup) - return -ENOMEM; - } - /* Allocate ring buffer */ if (ring->ring_obj == NULL) { r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes, @@ -386,7 +357,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, (void **)&ring->ring); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); - kvfree(ring->ring_backup); return r; } amdgpu_ring_clear_ring(ring); @@ -430,8 +400,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) amdgpu_bo_free_kernel(&ring->ring_obj, &ring->gpu_addr, (void **)&ring->ring); - kvfree(ring->ring_backup); - ring->ring_backup = NULL; dma_fence_put(ring->vmid_wait); ring->vmid_wait = NULL; @@ -868,8 +836,6 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, struct amdgpu_fence *guilty_fence) { - /* back up the non-guilty commands */ - amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence); /* set an error on all fences from the context */ if (guilty_fence) amdgpu_fence_driver_update_timedout_fence_state(guilty_fence); @@ -878,22 +844,8 @@ void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, struct amdgpu_fence *guilty_fence) { - unsigned int i; - int r; - /* verify that the ring is functional */ - r = amdgpu_ring_test_ring(ring); - if (r) - return r; - - /* Re-emit the non-guilty commands */ - if (ring->ring_backup_entries_to_copy) { - amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy); - for (i = 0; i < ring->ring_backup_entries_to_copy; i++) - amdgpu_ring_write(ring, ring->ring_backup[i]); - amdgpu_ring_commit(ring); - } - return 0; + return amdgpu_ring_test_ring(ring); } bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 340d4f6b87a93..e935159a61a3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -150,11 +150,8 @@ struct amdgpu_fence { u64 wptr; /* fence context for resets */ u64 context; - /* has this fence been reemitted */ - unsigned int reemitted; - /* wptr for the fence for the submission */ - u64 fence_wptr_start; - u64 fence_wptr_end; + /* fence state */ + bool skip_ib; }; extern const struct drm_sched_backend_ops amdgpu_sched_ops; @@ -163,7 +160,6 @@ void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring, struct dma_fence *timedout_fence); void amdgpu_fence_driver_update_timedout_fence_state(struct amdgpu_fence *af); -void amdgpu_fence_save_wptr(struct amdgpu_fence *af); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, @@ -312,9 +308,6 @@ struct amdgpu_ring { struct amdgpu_bo *ring_obj; uint32_t *ring; - /* backups for resets */ - uint32_t *ring_backup; - unsigned int ring_backup_entries_to_copy; unsigned rptr_offs; u64 rptr_gpu_addr; u32 *rptr_cpu_addr; @@ -572,14 +565,14 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, enum amdgpu_ib_pool_type pool, struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f); +int amdgpu_ib_emit(struct amdgpu_ring *ring, struct amdgpu_job *job); int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence **f); + int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring); -void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, - struct amdgpu_fence *guilty_fence); void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, struct amdgpu_fence *guilty_fence); int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, -- 2.52.0
