On Thu, Aug 05, 2021 at 12:46:57PM +0200, Daniel Vetter wrote:
> drm_sched_job_init is already at the right place, so this boils down
> to deleting code.
> 
> Signed-off-by: Daniel Vetter <daniel.vet...@intel.com>
> Cc: Rob Clark <robdcl...@gmail.com>
> Cc: Sean Paul <s...@poorly.run>
> Cc: Sumit Semwal <sumit.sem...@linaro.org>
> Cc: "Christian König" <christian.koe...@amd.com>
> Cc: linux-arm-...@vger.kernel.org
> Cc: freedr...@lists.freedesktop.org
> Cc: linux-me...@vger.kernel.org
> Cc: linaro-mm-...@lists.linaro.org

Merged up to this patch, except for etnaviv.
-Daniel

> ---
>  drivers/gpu/drm/msm/msm_gem.h        |  5 -----
>  drivers/gpu/drm/msm/msm_gem_submit.c | 19 +++++--------------
>  drivers/gpu/drm/msm/msm_ringbuffer.c | 12 ------------
>  3 files changed, 5 insertions(+), 31 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
> index f9e3ffb2309a..8bf0ac707fd7 100644
> --- a/drivers/gpu/drm/msm/msm_gem.h
> +++ b/drivers/gpu/drm/msm/msm_gem.h
> @@ -312,11 +312,6 @@ struct msm_gem_submit {
>       struct ww_acquire_ctx ticket;
>       uint32_t seqno;         /* Sequence number of the submit on the ring */
>  
> -     /* Array of struct dma_fence * to block on before submitting this job.
> -      */
> -     struct xarray deps;
> -     unsigned long last_dep;
> -
>       /* Hw fence, which is created when the scheduler executes the job, and
>        * is signaled when the hw finishes (via seqno write from cmdstream)
>        */
> diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c 
> b/drivers/gpu/drm/msm/msm_gem_submit.c
> index 96cea0ba4cfd..fb5a2eab27a2 100644
> --- a/drivers/gpu/drm/msm/msm_gem_submit.c
> +++ b/drivers/gpu/drm/msm/msm_gem_submit.c
> @@ -52,8 +52,6 @@ static struct msm_gem_submit *submit_create(struct 
> drm_device *dev,
>               return ERR_PTR(ret);
>       }
>  
> -     xa_init_flags(&submit->deps, XA_FLAGS_ALLOC);
> -
>       kref_init(&submit->ref);
>       submit->dev = dev;
>       submit->aspace = queue->ctx->aspace;
> @@ -72,8 +70,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
>  {
>       struct msm_gem_submit *submit =
>                       container_of(kref, struct msm_gem_submit, ref);
> -     unsigned long index;
> -     struct dma_fence *fence;
>       unsigned i;
>  
>       if (submit->fence_id) {
> @@ -82,12 +78,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
>               mutex_unlock(&submit->queue->lock);
>       }
>  
> -     xa_for_each (&submit->deps, index, fence) {
> -             dma_fence_put(fence);
> -     }
> -
> -     xa_destroy(&submit->deps);
> -
>       dma_fence_put(submit->user_fence);
>       dma_fence_put(submit->hw_fence);
>  
> @@ -343,8 +333,9 @@ static int submit_fence_sync(struct msm_gem_submit 
> *submit, bool no_implicit)
>               if (no_implicit)
>                       continue;
>  
> -             ret = drm_gem_fence_array_add_implicit(&submit->deps, obj,
> -                     write);
> +             ret = drm_sched_job_add_implicit_dependencies(&submit->base,
> +                                                           obj,
> +                                                           write);
>               if (ret)
>                       break;
>       }
> @@ -588,7 +579,7 @@ static struct drm_syncobj **msm_parse_deps(struct 
> msm_gem_submit *submit,
>               if (ret)
>                       break;
>  
> -             ret = drm_gem_fence_array_add(&submit->deps, fence);
> +             ret = drm_sched_job_add_dependency(&submit->base, fence);
>               if (ret)
>                       break;
>  
> @@ -798,7 +789,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void 
> *data,
>                       goto out_unlock;
>               }
>  
> -             ret = drm_gem_fence_array_add(&submit->deps, in_fence);
> +             ret = drm_sched_job_add_dependency(&submit->base, in_fence);
>               if (ret)
>                       goto out_unlock;
>       }
> diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c 
> b/drivers/gpu/drm/msm/msm_ringbuffer.c
> index bd54c1412649..652b1dedd7c1 100644
> --- a/drivers/gpu/drm/msm/msm_ringbuffer.c
> +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
> @@ -11,17 +11,6 @@ static uint num_hw_submissions = 8;
>  MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into 
> ringbuffer (default 8)");
>  module_param(num_hw_submissions, uint, 0600);
>  
> -static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
> -             struct drm_sched_entity *s_entity)
> -{
> -     struct msm_gem_submit *submit = to_msm_submit(job);
> -
> -     if (!xa_empty(&submit->deps))
> -             return xa_erase(&submit->deps, submit->last_dep++);
> -
> -     return NULL;
> -}
> -
>  static struct dma_fence *msm_job_run(struct drm_sched_job *job)
>  {
>       struct msm_gem_submit *submit = to_msm_submit(job);
> @@ -52,7 +41,6 @@ static void msm_job_free(struct drm_sched_job *job)
>  }
>  
>  const struct drm_sched_backend_ops msm_sched_ops = {
> -     .dependency = msm_job_dependency,
>       .run_job = msm_job_run,
>       .free_job = msm_job_free
>  };
> -- 
> 2.32.0
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

Reply via email to