The scheduler of the entity is decided by the run queue on which
it is queued. This patch avoids us the effort required to maintain
a sync between rq and sched field when we start shifting entites
among different rqs.

Signed-off-by: Nayan Deshmukh <nayan26deshm...@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  6 +++---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 19 +++++++++----------
 drivers/gpu/drm/scheduler/sched_fence.c   |  2 +-
 include/drm/gpu_scheduler.h               |  2 --
 6 files changed, 16 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7e5ebf823309..9572ca1ac15e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1262,7 +1262,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        priority = job->base.s_priority;
        drm_sched_entity_push_job(&job->base, entity);
 
-       ring = to_amdgpu_ring(entity->sched);
+       ring = to_amdgpu_ring(entity->rq->sched);
        amdgpu_ring_priority_get(ring, priority);
 
        ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 631481a730e0..391e2f7c03aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -143,7 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct 
drm_sched_entity *entity,
        priority = job->base.s_priority;
        drm_sched_entity_push_job(&job->base, entity);
 
-       ring = to_amdgpu_ring(entity->sched);
+       ring = to_amdgpu_ring(entity->rq->sched);
        amdgpu_ring_priority_get(ring, priority);
 
        return 0;
@@ -167,7 +167,7 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct 
amdgpu_ring *ring,
 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
                                               struct drm_sched_entity 
*s_entity)
 {
-       struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
+       struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
        struct amdgpu_job *job = to_amdgpu_job(sched_job);
        struct amdgpu_vm *vm = job->vm;
        struct dma_fence *fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 74b4a28a41d6..5d7d7900ccab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -387,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                ats_entries = 0;
        }
 
-       ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+       ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
 
        r = reservation_object_reserve_shared(bo->tbo.resv);
        if (r)
@@ -1113,7 +1113,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device 
*adev,
                struct amdgpu_ring *ring;
                struct dma_fence *fence;
 
-               ring = container_of(vm->entity.sched, struct amdgpu_ring,
+               ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
                                    sched);
 
                amdgpu_ring_pad_ib(ring, params.ib);
@@ -1403,7 +1403,7 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,
                                           addr, flags);
        }
 
-       ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+       ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
 
        nptes = last - start + 1;
 
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index a3b55c542025..3f2fc5e8242a 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -185,7 +185,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
        memset(entity, 0, sizeof(struct drm_sched_entity));
        INIT_LIST_HEAD(&entity->list);
        entity->rq = rq_list[0];
-       entity->sched = rq_list[0]->sched;
        entity->guilty = guilty;
        entity->last_scheduled = NULL;
 
@@ -210,8 +209,8 @@ EXPORT_SYMBOL(drm_sched_entity_init);
 static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
                                            struct drm_sched_entity *entity)
 {
-       return entity->sched == sched &&
-               entity->rq != NULL;
+       return entity->rq != NULL &&
+               entity->rq->sched == sched;
 }
 
 /**
@@ -278,7 +277,7 @@ long drm_sched_entity_flush(struct drm_sched_entity 
*entity, long timeout)
        struct drm_gpu_scheduler *sched;
        long ret = timeout;
 
-       sched = entity->sched;
+       sched = entity->rq->sched;
        if (!drm_sched_entity_is_initialized(sched, entity))
                return ret;
        /**
@@ -317,7 +316,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
 {
        struct drm_gpu_scheduler *sched;
 
-       sched = entity->sched;
+       sched = entity->rq->sched;
        drm_sched_entity_set_rq(entity, NULL);
 
        /* Consumption of existing IBs wasn't completed. Forcefully
@@ -388,7 +387,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, 
struct dma_fence_cb *cb
                container_of(cb, struct drm_sched_entity, cb);
        entity->dependency = NULL;
        dma_fence_put(f);
-       drm_sched_wakeup(entity->sched);
+       drm_sched_wakeup(entity->rq->sched);
 }
 
 static void drm_sched_entity_clear_dep(struct dma_fence *f, struct 
dma_fence_cb *cb)
@@ -438,7 +437,7 @@ EXPORT_SYMBOL(drm_sched_entity_set_rq);
 bool drm_sched_dependency_optimized(struct dma_fence* fence,
                                    struct drm_sched_entity *entity)
 {
-       struct drm_gpu_scheduler *sched = entity->sched;
+       struct drm_gpu_scheduler *sched = entity->rq->sched;
        struct drm_sched_fence *s_fence;
 
        if (!fence || dma_fence_is_signaled(fence))
@@ -455,7 +454,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
 
 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
 {
-       struct drm_gpu_scheduler *sched = entity->sched;
+       struct drm_gpu_scheduler *sched = entity->rq->sched;
        struct dma_fence * fence = entity->dependency;
        struct drm_sched_fence *s_fence;
 
@@ -500,7 +499,7 @@ static bool drm_sched_entity_add_dependency_cb(struct 
drm_sched_entity *entity)
 static struct drm_sched_job *
 drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 {
-       struct drm_gpu_scheduler *sched = entity->sched;
+       struct drm_gpu_scheduler *sched = entity->rq->sched;
        struct drm_sched_job *sched_job = to_drm_sched_job(
                                                
spsc_queue_peek(&entity->job_queue));
 
@@ -744,7 +743,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
                       struct drm_sched_entity *entity,
                       void *owner)
 {
-       struct drm_gpu_scheduler *sched = entity->sched;
+       struct drm_gpu_scheduler *sched = entity->rq->sched;
 
        job->sched = sched;
        job->entity = entity;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c 
b/drivers/gpu/drm/scheduler/sched_fence.c
index df4461648e3f..4029312fdd81 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -172,7 +172,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct 
drm_sched_entity *entity,
                return NULL;
 
        fence->owner = owner;
-       fence->sched = entity->sched;
+       fence->sched = entity->rq->sched;
        spin_lock_init(&fence->lock);
 
        seq = atomic_inc_return(&entity->fence_seq);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 728346abcc81..091b9afcd184 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -52,7 +52,6 @@ enum drm_sched_priority {
  *        runqueue.
  * @rq: runqueue to which this entity belongs.
  * @rq_lock: lock to modify the runqueue to which this entity belongs.
- * @sched: the scheduler instance to which this entity is enqueued.
  * @job_queue: the list of jobs of this entity.
  * @fence_seq: a linearly increasing seqno incremented with each
  *             new &drm_sched_fence which is part of the entity.
@@ -76,7 +75,6 @@ struct drm_sched_entity {
        struct list_head                list;
        struct drm_sched_rq             *rq;
        spinlock_t                      rq_lock;
-       struct drm_gpu_scheduler        *sched;
 
        struct spsc_queue               job_queue;
 
-- 
2.14.3

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to