On 2023-10-11 19:58, Matthew Brost wrote:
> Also add a lockdep assert to drm_sched_start_timeout.
> 
> Signed-off-by: Matthew Brost <matthew.br...@intel.com>
> Reviewed-by: Luben Tuikov <luben.tui...@amd.com>

I don't remember sending a Reviewed-by email to this patch.

I'll add the R-V to the commit when I apply and push this patch,
after replying with a R-V email.

Regards,
Luben

> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 23 +++++++++++++----------
>  1 file changed, 13 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
> b/drivers/gpu/drm/scheduler/sched_main.c
> index cf4c23db7547..c4d5c3d265a8 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -427,11 +427,20 @@ static void drm_sched_job_done_cb(struct dma_fence *f, 
> struct dma_fence_cb *cb)
>   */
>  static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
>  {
> +     lockdep_assert_held(&sched->job_list_lock);
> +
>       if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
>           !list_empty(&sched->pending_list))
>               queue_delayed_work(sched->timeout_wq, &sched->work_tdr, 
> sched->timeout);
>  }
>  
> +static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
> +{
> +     spin_lock(&sched->job_list_lock);
> +     drm_sched_start_timeout(sched);
> +     spin_unlock(&sched->job_list_lock);
> +}
> +
>  /**
>   * drm_sched_fault - immediately start timeout handler
>   *
> @@ -544,11 +553,8 @@ static void drm_sched_job_timedout(struct work_struct 
> *work)
>               spin_unlock(&sched->job_list_lock);
>       }
>  
> -     if (status != DRM_GPU_SCHED_STAT_ENODEV) {
> -             spin_lock(&sched->job_list_lock);
> -             drm_sched_start_timeout(sched);
> -             spin_unlock(&sched->job_list_lock);
> -     }
> +     if (status != DRM_GPU_SCHED_STAT_ENODEV)
> +             drm_sched_start_timeout_unlocked(sched);
>  }
>  
>  /**
> @@ -674,11 +680,8 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, 
> bool full_recovery)
>                       drm_sched_job_done(s_job, -ECANCELED);
>       }
>  
> -     if (full_recovery) {
> -             spin_lock(&sched->job_list_lock);
> -             drm_sched_start_timeout(sched);
> -             spin_unlock(&sched->job_list_lock);
> -     }
> +     if (full_recovery)
> +             drm_sched_start_timeout_unlocked(sched);
>  
>       drm_sched_wqueue_start(sched);
>  }

Reply via email to