Re: [PATCH 1/2] drm/scheduler: modify API to avoid redundancy

2018-08-09 Thread Nayan Deshmukh
Hi Daniel,


On Thu, Aug 9, 2018 at 2:27 PM Daniel Vetter  wrote:
>
> On Fri, Jul 20, 2018 at 2:21 PM, Nayan Deshmukh
>  wrote:
> > entity has a scheduler field and we don't need the sched argument
> > in any of the functions where entity is provided.
> >
> > Signed-off-by: Nayan Deshmukh 
>
> This breaks the make htmldocs build a bit:
>
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:262: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_flush'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:303: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_fini'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:365: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_destroy'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:730: warning: Excess
> function parameter 'sched' description in 'drm_sched_job_init'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:263: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_flush'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:304: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_fini'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:366: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_destroy'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:731: warning: Excess
> function parameter 'sched' description in 'drm_sched_job_init'
>
> Care to fix it?
My Bad thanks for pointing it out. I will send in a patch to fix it.

Cheers,
Nayan
>
> Thanks, Daniel
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|  2 +-
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 13 +
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  3 +--
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  3 +--
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  2 +-
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c|  4 ++--
> >  drivers/gpu/drm/etnaviv/etnaviv_drv.c |  3 +--
> >  drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  4 ++--
> >  drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++-
> >  drivers/gpu/drm/v3d/v3d_drv.c |  4 +---
> >  drivers/gpu/drm/v3d/v3d_gem.c |  2 --
> >  include/drm/gpu_scheduler.h   | 10 +++---
> >  13 files changed, 30 insertions(+), 42 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> > index 7c5cc33d0cda..7e5ebf823309 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> > @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser 
> > *p,
> > job = p->job;
> > p->job = NULL;
> >
> > -   r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
> > +   r = drm_sched_job_init(&job->base, entity, p->filp);
> > if (r) {
> > amdgpu_job_free(job);
> > amdgpu_mn_unlock(p->mn);
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > index 83e3b320a793..df6965761046 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
> >
> >  failed:
> > for (j = 0; j < i; j++)
> > -   drm_sched_entity_destroy(&adev->rings[j]->sched,
> > - &ctx->rings[j].entity);
> > +   drm_sched_entity_destroy(&ctx->rings[j].entity);
> > kfree(ctx->fences);
> > ctx->fences = NULL;
> > return r;
> > @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
> > if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
> > continue;
> >
> > -   drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
> > -   &ctx->rings[i].entity);
> > +   drm_sched_entity_destroy(&ctx->rings[i].entity);
> > }
> >
> > amdgpu_ctx_fini(ref);
> > @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr 
> > *mgr)
> > if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
> > continue;
> >
> > -   max_wait = 
> > drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
> > - &ctx->rings[i].entity, max_wait);
> > +   max_wait = 
> > drm_sched_entity_flush(&ctx->rings[i].entity,
> > + max_wait);
> > }
> > }
> > mutex_unlock(&mgr->lock);
> > @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr 
> > *mgr)
> > continue;
> >
> > if (kref_read(&ctx->refcount) == 1)
> > -   
> 

Re: [PATCH 1/2] drm/scheduler: modify API to avoid redundancy

2018-08-09 Thread Daniel Vetter
On Fri, Jul 20, 2018 at 2:21 PM, Nayan Deshmukh
 wrote:
> entity has a scheduler field and we don't need the sched argument
> in any of the functions where entity is provided.
>
> Signed-off-by: Nayan Deshmukh 

This breaks the make htmldocs build a bit:

./drivers/gpu/drm/scheduler/gpu_scheduler.c:262: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_flush'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:303: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_fini'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:365: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_destroy'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:730: warning: Excess
function parameter 'sched' description in 'drm_sched_job_init'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:263: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_flush'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:304: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_fini'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:366: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_destroy'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:731: warning: Excess
function parameter 'sched' description in 'drm_sched_job_init'

Care to fix it?

Thanks, Daniel
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 13 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  3 +--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  3 +--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c|  4 ++--
>  drivers/gpu/drm/etnaviv/etnaviv_drv.c |  3 +--
>  drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  4 ++--
>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++-
>  drivers/gpu/drm/v3d/v3d_drv.c |  4 +---
>  drivers/gpu/drm/v3d/v3d_gem.c |  2 --
>  include/drm/gpu_scheduler.h   | 10 +++---
>  13 files changed, 30 insertions(+), 42 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 7c5cc33d0cda..7e5ebf823309 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
> job = p->job;
> p->job = NULL;
>
> -   r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
> +   r = drm_sched_job_init(&job->base, entity, p->filp);
> if (r) {
> amdgpu_job_free(job);
> amdgpu_mn_unlock(p->mn);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 83e3b320a793..df6965761046 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
>
>  failed:
> for (j = 0; j < i; j++)
> -   drm_sched_entity_destroy(&adev->rings[j]->sched,
> - &ctx->rings[j].entity);
> +   drm_sched_entity_destroy(&ctx->rings[j].entity);
> kfree(ctx->fences);
> ctx->fences = NULL;
> return r;
> @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
> if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
> continue;
>
> -   drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
> -   &ctx->rings[i].entity);
> +   drm_sched_entity_destroy(&ctx->rings[i].entity);
> }
>
> amdgpu_ctx_fini(ref);
> @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr 
> *mgr)
> if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
> continue;
>
> -   max_wait = 
> drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
> - &ctx->rings[i].entity, max_wait);
> +   max_wait = 
> drm_sched_entity_flush(&ctx->rings[i].entity,
> + max_wait);
> }
> }
> mutex_unlock(&mgr->lock);
> @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr 
> *mgr)
> continue;
>
> if (kref_read(&ctx->refcount) == 1)
> -   
> drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
> -   &ctx->rings[i].entity);
> +   drm_sched_entity_fini(&ctx->rings[i].entity);
> else
> DRM_ERROR("ctx %p is still alive\n", ctx);
> }
> diff --git a/drivers/gpu/drm/amd/a

Re: [PATCH 1/2] drm/scheduler: modify API to avoid redundancy

2018-07-20 Thread Eric Anholt
Nayan Deshmukh  writes:

> entity has a scheduler field and we don't need the sched argument
> in any of the functions where entity is provided.
>
> Signed-off-by: Nayan Deshmukh 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 13 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  3 +--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  3 +--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c|  4 ++--
>  drivers/gpu/drm/etnaviv/etnaviv_drv.c |  3 +--
>  drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  4 ++--
>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++-
>  drivers/gpu/drm/v3d/v3d_drv.c |  4 +---
>  drivers/gpu/drm/v3d/v3d_gem.c |  2 --
>  include/drm/gpu_scheduler.h   | 10 +++---
>  13 files changed, 30 insertions(+), 42 deletions(-)
>

> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c 
> b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index dac71e3b4514..a3b55c542025 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct 
> dma_fence *f,
>   *
>   * Returns the remaining time in jiffies left from the input timeout
>   */
> -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
> -struct drm_sched_entity *entity, long timeout)
> +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
>  {
> + struct drm_gpu_scheduler *sched;
>   long ret = timeout;
>  
> + sched = entity->sched;
>   if (!drm_sched_entity_is_initialized(sched, entity))
>   return ret;
>   /**
> @@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
>   * entity and signals all jobs with an error code if the process was killed.
>   *
>   */
> -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
> -struct drm_sched_entity *entity)
> +void drm_sched_entity_fini(struct drm_sched_entity *entity)
>  {
> + struct drm_gpu_scheduler *sched;
>  
> + sched = entity->sched;

Maybe fold the initialization into the declaration above, like you did
elsewhere?

Regardless, this is a wonderful cleanup of the API.

Reviewed-by: Eric Anholt 


signature.asc
Description: PGP signature
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] drm/scheduler: modify API to avoid redundancy

2018-07-20 Thread Christian König

Am 20.07.2018 um 14:21 schrieb Nayan Deshmukh:

entity has a scheduler field and we don't need the sched argument
in any of the functions where entity is provided.

Signed-off-by: Nayan Deshmukh 


Reviewed-by: Christian König  for the series.


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 13 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  3 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  3 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c|  4 ++--
  drivers/gpu/drm/etnaviv/etnaviv_drv.c |  3 +--
  drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  4 ++--
  drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++-
  drivers/gpu/drm/v3d/v3d_drv.c |  4 +---
  drivers/gpu/drm/v3d/v3d_gem.c |  2 --
  include/drm/gpu_scheduler.h   | 10 +++---
  13 files changed, 30 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7c5cc33d0cda..7e5ebf823309 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
  
-	r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);

+   r = drm_sched_job_init(&job->base, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 83e3b320a793..df6965761046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
  
  failed:

for (j = 0; j < i; j++)
-   drm_sched_entity_destroy(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
+   drm_sched_entity_destroy(&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
  
-		drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,

-   &ctx->rings[i].entity);
+   drm_sched_entity_destroy(&ctx->rings[i].entity);
}
  
  	amdgpu_ctx_fini(ref);

@@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
  
-			max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,

- &ctx->rings[i].entity, max_wait);
+   max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
+ max_wait);
}
}
mutex_unlock(&mgr->lock);
@@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
  
  			if (kref_read(&ctx->refcount) == 1)

-   
drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
-   &ctx->rings[i].entity);
+   drm_sched_entity_fini(&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5a2c26a85984..631481a730e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct 
drm_sched_entity *entity,
if (!f)
return -EINVAL;
  
-	r = drm_sched_job_init(&job->base, entity->sched, entity, owner);

+   r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;
  
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index 13977ea6a097..913705d4dfd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
return;
}
} else {
-   drm_sched_entity_destroy(adev->mman.entity.sched,
-&adev->mman.entity);
+   drm_sched_entity_destroy(&adev->mman.entity);
}
  
  	/* this just adjusts TTM size idea, which sets lpfn to the correct value */

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/a

[PATCH 1/2] drm/scheduler: modify API to avoid redundancy

2018-07-20 Thread Nayan Deshmukh
entity has a scheduler field and we don't need the sched argument
in any of the functions where entity is provided.

Signed-off-by: Nayan Deshmukh 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 13 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c|  4 ++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c |  3 +--
 drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  4 ++--
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++-
 drivers/gpu/drm/v3d/v3d_drv.c |  4 +---
 drivers/gpu/drm/v3d/v3d_gem.c |  2 --
 include/drm/gpu_scheduler.h   | 10 +++---
 13 files changed, 30 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7c5cc33d0cda..7e5ebf823309 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
 
-   r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+   r = drm_sched_job_init(&job->base, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 83e3b320a793..df6965761046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
 failed:
for (j = 0; j < i; j++)
-   drm_sched_entity_destroy(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
+   drm_sched_entity_destroy(&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
 
-   drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
-   &ctx->rings[i].entity);
+   drm_sched_entity_destroy(&ctx->rings[i].entity);
}
 
amdgpu_ctx_fini(ref);
@@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
 
-   max_wait = 
drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity, max_wait);
+   max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
+ max_wait);
}
}
mutex_unlock(&mgr->lock);
@@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
 
if (kref_read(&ctx->refcount) == 1)
-   
drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
-   &ctx->rings[i].entity);
+   drm_sched_entity_fini(&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5a2c26a85984..631481a730e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct 
drm_sched_entity *entity,
if (!f)
return -EINVAL;
 
-   r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
+   r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 13977ea6a097..913705d4dfd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
return;
}
} else {
-   drm_sched_entity_destroy(adev->mman.entity.sched,
-&adev->mman.entity);
+   drm_sched_entity_destroy(&adev->mman.entity);
}
 
/* this just adjusts TTM size idea, which sets lpfn to the correct 
value */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 80b5c453f8c1..8e2c96da275e 100644
---