Am 25.11.20 um 04:17 schrieb Luben Tuikov:
Rename "ring_mirror_list" to "pending_list",
to describe what something is, not what it does,
how it's used, or how the hardware implements it.

This also abstracts the actual hardware
implementation, i.e. how the low-level driver
communicates with the device it drives, ring, CAM,
etc., shouldn't be exposed to DRM.

The pending_list keeps jobs submitted, which are
out of our control. Usually this means they are
pending execution status in hardware, but the
latter definition is a more general (inclusive)
definition.

Signed-off-by: Luben Tuikov <luben.tui...@amd.com>

In general the rename is a good idea, but I think we should try to remove this linked list in general.

As the original name described this is essentially a ring buffer, the is no reason I can see to use a linked list here except for the add/remove madness we currently have.

Anyway patch is Acked-by: Christian König <christian.koe...@amd.com> for now.

Regards,
Christian.

---
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c |  4 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  |  4 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c     |  2 +-
  drivers/gpu/drm/scheduler/sched_main.c      | 34 ++++++++++-----------
  include/drm/gpu_scheduler.h                 | 10 +++---
  5 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8358cae0b5a4..db77a5bdfa45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1427,7 +1427,7 @@ static void amdgpu_ib_preempt_job_recovery(struct 
drm_gpu_scheduler *sched)
        struct dma_fence *fence;
spin_lock(&sched->job_list_lock);
-       list_for_each_entry(s_job, &sched->ring_mirror_list, list) {
+       list_for_each_entry(s_job, &sched->pending_list, list) {
                fence = sched->ops->run_job(s_job);
                dma_fence_put(fence);
        }
@@ -1459,7 +1459,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct 
amdgpu_ring *ring)
no_preempt:
        spin_lock(&sched->job_list_lock);
-       list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
+       list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
                if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
                        /* remove job from ring_mirror_list */
                        list_del_init(&s_job->list);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4df6de81cd41..fbae600aa5f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4127,8 +4127,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device 
*adev)
                        continue;
spin_lock(&ring->sched.job_list_lock);
-               job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
-                               struct drm_sched_job, list);
+               job = list_first_entry_or_null(&ring->sched.pending_list,
+                                              struct drm_sched_job, list);
                spin_unlock(&ring->sched.job_list_lock);
                if (job)
                        return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index aca52a46b93d..ff48101bab55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct 
drm_gpu_scheduler *sched)
        }
/* Signal all jobs already scheduled to HW */
-       list_for_each_entry(s_job, &sched->ring_mirror_list, list) {
+       list_for_each_entry(s_job, &sched->pending_list, list) {
                struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_set_error(&s_fence->finished, -EHWPOISON);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c
index c52eba407ebd..b694df12aaba 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -198,7 +198,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
  static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
  {
        if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
-           !list_empty(&sched->ring_mirror_list))
+           !list_empty(&sched->pending_list))
                schedule_delayed_work(&sched->work_tdr, sched->timeout);
  }
@@ -258,7 +258,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
  {
        spin_lock(&sched->job_list_lock);
- if (list_empty(&sched->ring_mirror_list))
+       if (list_empty(&sched->pending_list))
                cancel_delayed_work(&sched->work_tdr);
        else
                mod_delayed_work(system_wq, &sched->work_tdr, remaining);
@@ -272,7 +272,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
        struct drm_gpu_scheduler *sched = s_job->sched;
spin_lock(&sched->job_list_lock);
-       list_add_tail(&s_job->list, &sched->ring_mirror_list);
+       list_add_tail(&s_job->list, &sched->pending_list);
        drm_sched_start_timeout(sched);
        spin_unlock(&sched->job_list_lock);
  }
@@ -286,7 +286,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
        spin_lock(&sched->job_list_lock);
-       job = list_first_entry_or_null(&sched->ring_mirror_list,
+       job = list_first_entry_or_null(&sched->pending_list,
                                       struct drm_sched_job, list);
if (job) {
@@ -371,7 +371,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
   * Stop the scheduler and also removes and frees all completed jobs.
   * Note: bad job will not be freed as it might be used later and so it's
   * callers responsibility to release it manually if it's not part of the
- * mirror list any more.
+ * pending list any more.
   *
   */
  void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job 
*bad)
@@ -392,15 +392,15 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, 
struct drm_sched_job *bad)
                 * Add at the head of the queue to reflect it was the earliest
                 * job extracted.
                 */
-               list_add(&bad->list, &sched->ring_mirror_list);
+               list_add(&bad->list, &sched->pending_list);
/*
         * Iterate the job list from later to  earlier one and either deactive
-        * their HW callbacks or remove them from mirror list if they already
+        * their HW callbacks or remove them from pending list if they already
         * signaled.
         * This iteration is thread safe as sched thread is stopped.
         */
-       list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list,
+       list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
                                         list) {
                if (s_job->s_fence->parent &&
                    dma_fence_remove_callback(s_job->s_fence->parent,
@@ -408,7 +408,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct 
drm_sched_job *bad)
                        atomic_dec(&sched->hw_rq_count);
                } else {
                        /*
-                        * remove job from ring_mirror_list.
+                        * remove job from pending_list.
                         * Locking here is for concurrent resume timeout
                         */
                        spin_lock(&sched->job_list_lock);
@@ -463,7 +463,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool 
full_recovery)
         * so no new jobs are being inserted or removed. Also concurrent
         * GPU recovers can't run in parallel.
         */
-       list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
+       list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
                struct dma_fence *fence = s_job->s_fence->parent;
atomic_inc(&sched->hw_rq_count);
@@ -494,7 +494,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool 
full_recovery)
  EXPORT_SYMBOL(drm_sched_start);
/**
- * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
+ * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
   *
   * @sched: scheduler instance
   *
@@ -506,7 +506,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler 
*sched)
        bool found_guilty = false;
        struct dma_fence *fence;
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
+       list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
                struct drm_sched_fence *s_fence = s_job->s_fence;
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
@@ -665,7 +665,7 @@ static void drm_sched_process_job(struct dma_fence *f, 
struct dma_fence_cb *cb)
   *
   * @sched: scheduler instance
   *
- * Returns the next finished job from the mirror list (if there is one)
+ * Returns the next finished job from the pending list (if there is one)
   * ready for it to be destroyed.
   */
  static struct drm_sched_job *
@@ -675,7 +675,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
/*
         * Don't destroy jobs while the timeout worker is running  OR thread
-        * is being parked and hence assumed to not touch ring_mirror_list
+        * is being parked and hence assumed to not touch pending_list
         */
        if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
            !cancel_delayed_work(&sched->work_tdr)) ||
@@ -684,11 +684,11 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock); - job = list_first_entry_or_null(&sched->ring_mirror_list,
+       job = list_first_entry_or_null(&sched->pending_list,
                                       struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
-               /* remove job from ring_mirror_list */
+               /* remove job from pending_list */
                list_del_init(&job->list);
        } else {
                job = NULL;
@@ -858,7 +858,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker);
        init_waitqueue_head(&sched->job_scheduled);
-       INIT_LIST_HEAD(&sched->ring_mirror_list);
+       INIT_LIST_HEAD(&sched->pending_list);
        spin_lock_init(&sched->job_list_lock);
        atomic_set(&sched->hw_rq_count, 0);
        INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 3add0072bd37..2e0c368e19f6 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -174,7 +174,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence 
*f);
   * @sched: the scheduler instance on which this job is scheduled.
   * @s_fence: contains the fences for the scheduling of job.
   * @finish_cb: the callback for the finished fence.
- * @node: used to append this struct to the 
@drm_gpu_scheduler.ring_mirror_list.
+ * @node: used to append this struct to the @drm_gpu_scheduler.pending_list.
   * @id: a unique id assigned to each job scheduled on the scheduler.
   * @karma: increment on every hang caused by this job. If this exceeds the 
hang
   *         limit of the scheduler then the job is marked guilty and will not
@@ -203,7 +203,7 @@ struct drm_sched_job {
  static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
                                            int threshold)
  {
-       return (s_job && atomic_inc_return(&s_job->karma) > threshold);
+       return s_job && atomic_inc_return(&s_job->karma) > threshold;
  }
/**
@@ -260,8 +260,8 @@ struct drm_sched_backend_ops {
   * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
   *            timeout interval is over.
   * @thread: the kthread on which the scheduler which run.
- * @ring_mirror_list: the list of jobs which are currently in the job queue.
- * @job_list_lock: lock to protect the ring_mirror_list.
+ * @pending_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the pending_list.
   * @hang_limit: once the hangs by a job crosses this limit then it is marked
   *              guilty and it will be considered for scheduling further.
   * @score: score to help loadbalancer pick a idle sched
@@ -282,7 +282,7 @@ struct drm_gpu_scheduler {
        atomic64_t                      job_id_count;
        struct delayed_work             work_tdr;
        struct task_struct              *thread;
-       struct list_head                ring_mirror_list;
+       struct list_head                pending_list;
        spinlock_t                      job_list_lock;
        int                             hang_limit;
        atomic_t                        score;

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to