[PATCH 2/8] drm/amdgpu: add framework for HW specific priority settings v9

2017-09-21 Thread Andres Rodriguez
Add an initial framework for changing the HW priorities of rings. The
framework allows requesting priority changes for the lifetime of an
amdgpu_job. After the job completes the priority will decay to the next
lowest priority for which a request is still valid.

A new ring function set_priority() can now be populated to take care of
the HW specific programming sequence for priority changes.

v2: set priority before emitting IB, and take a ref on amdgpu_job
v3: use AMD_SCHED_PRIORITY_* instead of AMDGPU_CTX_PRIORITY_*
v4: plug amdgpu_ring_restore_priority_cb into amdgpu_job_free_cb
v5: use atomic for tracking job priorities instead of last_job
v6: rename amdgpu_ring_priority_[get/put]() and align parameters
v7: replace spinlocks with mutexes for KIQ compatibility
v8: raise ring priority during cs_ioctl, instead of job_run
v9: priority_get() before push_job()

Acked-by: Christian König 
Signed-off-by: Andres Rodriguez 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  4 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  | 76 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h  | 15 ++
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  7 +++
 5 files changed, 103 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5840d07..bc8a403 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1164,6 +1164,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
+   amdgpu_ring_priority_get(job->ring,
+amd_sched_get_job_priority(&job->base));
 
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4510627..83d1343 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -103,6 +103,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
+   amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
@@ -139,6 +140,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct 
amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
+   amdgpu_ring_priority_get(job->ring,
+amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base);
 
return 0;
@@ -203,6 +206,7 @@ static struct dma_fence *amdgpu_job_run(struct 
amd_sched_job *sched_job)
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
+
amdgpu_job_free_resources(job);
return fence;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5ce6528..331ed8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -155,6 +155,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
 }
 
 /**
+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+   int i;
+
+   if (!ring->funcs->set_priority)
+   return;
+
+   if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+   return;
+
+   /* no need to restore if the job is already at the lowest priority */
+   if (priority == AMD_SCHED_PRIORITY_NORMAL)
+   return;
+
+   mutex_lock(&ring->priority_mutex);
+   /* something higher prio is executing, no need to decay */
+   if (ring->priority > priority)
+   goto out_unlock;
+
+   /* decay priority to the next level with a job available */
+   for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+   if (i == AMD_SCHED_PRIORITY_NORMAL
+   || atomic_read(&ring->num_jobs[i])) {
+   ring->priority = i;
+   ring->funcs->set_priority(ring, i);
+   break;
+   }
+   }
+
+out_unlock:
+   mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority:

[PATCH 2/8] drm/amdgpu: add framework for HW specific priority settings v9

2017-07-07 Thread Andres Rodriguez
Add an initial framework for changing the HW priorities of rings. The
framework allows requesting priority changes for the lifetime of an
amdgpu_job. After the job completes the priority will decay to the next
lowest priority for which a request is still valid.

A new ring function set_priority() can now be populated to take care of
the HW specific programming sequence for priority changes.

v2: set priority before emitting IB, and take a ref on amdgpu_job
v3: use AMD_SCHED_PRIORITY_* instead of AMDGPU_CTX_PRIORITY_*
v4: plug amdgpu_ring_restore_priority_cb into amdgpu_job_free_cb
v5: use atomic for tracking job priorities instead of last_job
v6: rename amdgpu_ring_priority_[get/put]() and align parameters
v7: replace spinlocks with mutexes for KIQ compatibility
v8: raise ring priority during cs_ioctl, instead of job_run
v9: priority_get() before push_job()

Acked-by: Christian König 
Signed-off-by: Andres Rodriguez 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  4 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  | 76 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h  | 15 ++
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  7 +++
 5 files changed, 103 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 546a77e..763d74e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1102,6 +1102,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
amdgpu_cs_parser_fini(p, 0, true);
+   amdgpu_ring_priority_get(job->ring,
+amd_sched_get_job_priority(&job->base));
 
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 3d641e1..63b0f3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -101,6 +101,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
+   amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
@@ -137,6 +138,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct 
amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
+   amdgpu_ring_priority_get(job->ring,
+amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base);
 
return 0;
@@ -201,6 +204,7 @@ static struct dma_fence *amdgpu_job_run(struct 
amd_sched_job *sched_job)
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
+
amdgpu_job_free_resources(job);
return fence;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 75165e0..2d8b20a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -155,6 +155,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
 }
 
 /**
+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+   int i;
+
+   if (!ring->funcs->set_priority)
+   return;
+
+   if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+   return;
+
+   /* no need to restore if the job is already at the lowest priority */
+   if (priority == AMD_SCHED_PRIORITY_NORMAL)
+   return;
+
+   mutex_lock(&ring->priority_mutex);
+   /* something higher prio is executing, no need to decay */
+   if (ring->priority > priority)
+   goto out_unlock;
+
+   /* decay priority to the next level with a job available */
+   for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+   if (i == AMD_SCHED_PRIORITY_NORMAL
+   || atomic_read(&ring->num_jobs[i])) {
+   ring->priority = i;
+   ring->funcs->set_priority(ring, i);
+   break;
+   }
+   }
+
+out_unlock:
+   mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Re

Re: [PATCH 2/8] drm/amdgpu: add framework for HW specific priority settings v9

2017-06-27 Thread Christian König

Am 26.06.2017 um 22:36 schrieb Andres Rodriguez:

Add an initial framework for changing the HW priorities of rings. The
framework allows requesting priority changes for the lifetime of an
amdgpu_job. After the job completes the priority will decay to the next
lowest priority for which a request is still valid.

A new ring function set_priority() can now be populated to take care of
the HW specific programming sequence for priority changes.

v2: set priority before emitting IB, and take a ref on amdgpu_job
v3: use AMD_SCHED_PRIORITY_* instead of AMDGPU_CTX_PRIORITY_*
v4: plug amdgpu_ring_restore_priority_cb into amdgpu_job_free_cb
v5: use atomic for tracking job priorities instead of last_job
v6: rename amdgpu_ring_priority_[get/put]() and align parameters
v7: replace spinlocks with mutexes for KIQ compatibility
v8: raise ring priority during cs_ioctl, instead of job_run
v9: priority_get() before push_job()

Signed-off-by: Andres Rodriguez 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|  2 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  4 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  | 76 ++-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h  | 15 ++
  drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  7 +++
  5 files changed, 103 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index aeee684..2d2d59b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1102,6 +1102,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
amdgpu_cs_parser_fini(p, 0, true);
+   amdgpu_ring_priority_get(job->ring,
+amd_sched_get_job_priority(&job->base));
  
  	trace_amdgpu_cs_ioctl(job);

amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 3d641e1..63b0f3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -101,6 +101,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
  {
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
  
+	amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));

dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
@@ -137,6 +138,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct 
amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
+   amdgpu_ring_priority_get(job->ring,
+amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base);
  
  	return 0;

@@ -201,6 +204,7 @@ static struct dma_fence *amdgpu_job_run(struct 
amd_sched_job *sched_job)
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
+
amdgpu_job_free_resources(job);
return fence;
  }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 75165e0..2d8b20a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -155,6 +155,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
  }
  
  /**

+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+   int i;
+
+   if (!ring->funcs->set_priority)
+   return;
+
+   if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+   return;
+
+   /* no need to restore if the job is already at the lowest priority */
+   if (priority == AMD_SCHED_PRIORITY_NORMAL)
+   return;
+
+   mutex_lock(&ring->priority_mutex);
+   /* something higher prio is executing, no need to decay */
+   if (ring->priority > priority)
+   goto out_unlock;
+
+   /* decay priority to the next level with a job available */
+   for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+   if (i == AMD_SCHED_PRIORITY_NORMAL
+   || atomic_read(&ring->num_jobs[i])) {
+   ring->priority = i;
+   ring->funcs->set_priority(ring, i);
+   break;
+   }
+   }
+
+out_unlock:
+   mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holdi

[PATCH 2/8] drm/amdgpu: add framework for HW specific priority settings v9

2017-06-26 Thread Andres Rodriguez
Add an initial framework for changing the HW priorities of rings. The
framework allows requesting priority changes for the lifetime of an
amdgpu_job. After the job completes the priority will decay to the next
lowest priority for which a request is still valid.

A new ring function set_priority() can now be populated to take care of
the HW specific programming sequence for priority changes.

v2: set priority before emitting IB, and take a ref on amdgpu_job
v3: use AMD_SCHED_PRIORITY_* instead of AMDGPU_CTX_PRIORITY_*
v4: plug amdgpu_ring_restore_priority_cb into amdgpu_job_free_cb
v5: use atomic for tracking job priorities instead of last_job
v6: rename amdgpu_ring_priority_[get/put]() and align parameters
v7: replace spinlocks with mutexes for KIQ compatibility
v8: raise ring priority during cs_ioctl, instead of job_run
v9: priority_get() before push_job()

Signed-off-by: Andres Rodriguez 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  4 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  | 76 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h  | 15 ++
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  7 +++
 5 files changed, 103 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index aeee684..2d2d59b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1102,6 +1102,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
amdgpu_cs_parser_fini(p, 0, true);
+   amdgpu_ring_priority_get(job->ring,
+amd_sched_get_job_priority(&job->base));
 
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 3d641e1..63b0f3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -101,6 +101,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
+   amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
@@ -137,6 +138,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct 
amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
+   amdgpu_ring_priority_get(job->ring,
+amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base);
 
return 0;
@@ -201,6 +204,7 @@ static struct dma_fence *amdgpu_job_run(struct 
amd_sched_job *sched_job)
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
+
amdgpu_job_free_resources(job);
return fence;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 75165e0..2d8b20a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -155,6 +155,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
 }
 
 /**
+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+   int i;
+
+   if (!ring->funcs->set_priority)
+   return;
+
+   if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+   return;
+
+   /* no need to restore if the job is already at the lowest priority */
+   if (priority == AMD_SCHED_PRIORITY_NORMAL)
+   return;
+
+   mutex_lock(&ring->priority_mutex);
+   /* something higher prio is executing, no need to decay */
+   if (ring->priority > priority)
+   goto out_unlock;
+
+   /* decay priority to the next level with a job available */
+   for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+   if (i == AMD_SCHED_PRIORITY_NORMAL
+   || atomic_read(&ring->num_jobs[i])) {
+   ring->priority = i;
+   ring->funcs->set_priority(ring, i);
+   break;
+   }
+   }
+
+out_unlock:
+   mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Request a ring's priority to 

[PATCH 2/8] drm/amdgpu: add framework for HW specific priority settings v9

2017-06-09 Thread Andres Rodriguez
Add an initial framework for changing the HW priorities of rings. The
framework allows requesting priority changes for the lifetime of an
amdgpu_job. After the job completes the priority will decay to the next
lowest priority for which a request is still valid.

A new ring function set_priority() can now be populated to take care of
the HW specific programming sequence for priority changes.

v2: set priority before emitting IB, and take a ref on amdgpu_job
v3: use AMD_SCHED_PRIORITY_* instead of AMDGPU_CTX_PRIORITY_*
v4: plug amdgpu_ring_restore_priority_cb into amdgpu_job_free_cb
v5: use atomic for tracking job priorities instead of last_job
v6: rename amdgpu_ring_priority_[get/put]() and align parameters
v7: replace spinlocks with mutexes for KIQ compatibility
v8: raise ring priority during cs_ioctl, instead of job_run
v9: priority_get() before push_job()

Signed-off-by: Andres Rodriguez 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  4 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  | 76 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h  | 15 ++
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  7 +++
 5 files changed, 103 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b17635c..6eda0f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1005,6 +1005,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
amdgpu_cs_parser_fini(p, 0, true);
+   amdgpu_ring_priority_get(job->ring,
+amd_sched_get_job_priority(&job->base));
 
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 3d641e1..63b0f3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -101,6 +101,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
+   amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
@@ -137,6 +138,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct 
amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
+   amdgpu_ring_priority_get(job->ring,
+amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base);
 
return 0;
@@ -201,6 +204,7 @@ static struct dma_fence *amdgpu_job_run(struct 
amd_sched_job *sched_job)
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
+
amdgpu_job_free_resources(job);
return fence;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 75165e0..2d8b20a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -155,6 +155,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
 }
 
 /**
+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+   int i;
+
+   if (!ring->funcs->set_priority)
+   return;
+
+   if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+   return;
+
+   /* no need to restore if the job is already at the lowest priority */
+   if (priority == AMD_SCHED_PRIORITY_NORMAL)
+   return;
+
+   mutex_lock(&ring->priority_mutex);
+   /* something higher prio is executing, no need to decay */
+   if (ring->priority > priority)
+   goto out_unlock;
+
+   /* decay priority to the next level with a job available */
+   for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+   if (i == AMD_SCHED_PRIORITY_NORMAL
+   || atomic_read(&ring->num_jobs[i])) {
+   ring->priority = i;
+   ring->funcs->set_priority(ring, i);
+   break;
+   }
+   }
+
+out_unlock:
+   mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Request a ring's priority to