Am 09.06.2017 um 00:06 schrieb Andres Rodriguez:
Add an initial framework for changing the HW priorities of rings. The
framework allows requesting priority changes for the lifetime of an
amdgpu_job. After the job completes the priority will decay to the next
lowest priority for which a request is still valid.

A new ring function set_priority() can now be populated to take care of
the HW specific programming sequence for priority changes.

v2: set priority before emitting IB, and take a ref on amdgpu_job
v3: use AMD_SCHED_PRIORITY_* instead of AMDGPU_CTX_PRIORITY_*
v4: plug amdgpu_ring_restore_priority_cb into amdgpu_job_free_cb
v5: use atomic for tracking job priorities instead of last_job
v6: rename amdgpu_ring_priority_[get/put]() and align parameters
v7: replace spinlocks with mutexes for KIQ compatibility
v8: raise ring priority during cs_ioctl, instead of job_run

Signed-off-by: Andres Rodriguez <andre...@gmail.com>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c        |  2 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c       |  4 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c      | 76 ++++++++++++++++++++++++++-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h      | 15 ++++++
  drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  7 +++
  5 files changed, 103 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b17635c..d84d026 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -996,30 +996,32 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        if (r) {
                amdgpu_job_free(job);
                return r;
        }
job->owner = p->filp;
        job->fence_ctx = entity->fence_context;
        p->fence = dma_fence_get(&job->base.s_fence->finished);
        cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
        job->uf_sequence = cs->out.handle;
        amdgpu_job_free_resources(job);
        amdgpu_cs_parser_fini(p, 0, true);
trace_amdgpu_cs_ioctl(job);
        amd_sched_entity_push_job(&job->base);
+       amdgpu_ring_priority_get(job->ring,
+                                amd_sched_get_job_priority(&job->base));

After the call to amd_sched_entity_push_job() we no longer own the pointer, so the job could be already be freed when you dereference it.

I suggest to just move the call to amdgpu_ring_priority_get() before the one to amd_sched_entity_push_job(), cause the whole thing is racy in the first place.

Christian.

return 0;
  }
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
  {
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_fpriv *fpriv = filp->driver_priv;
        union drm_amdgpu_cs *cs = data;
        struct amdgpu_cs_parser parser = {};
        bool reserved_buffers = false;
        int i, r;
if (!adev->accel_working)
                return -EBUSY;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 3d641e1..f10ce0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -89,30 +89,31 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
  {
        struct dma_fence *f;
        unsigned i;
/* use sched fence if available */
        f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i)
                amdgpu_ib_free(job->adev, &job->ibs[i], f);
  }
static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
  {
        struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
        dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->dep_sync);
        amdgpu_sync_free(&job->sched_sync);
        kfree(job);
  }
void amdgpu_job_free(struct amdgpu_job *job)
  {
        amdgpu_job_free_resources(job);
dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->dep_sync);
        amdgpu_sync_free(&job->sched_sync);
@@ -126,30 +127,32 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct 
amdgpu_ring *ring,
        int r;
        job->ring = ring;
if (!f)
                return -EINVAL;
r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
        if (r)
                return r;
job->owner = owner;
        job->fence_ctx = entity->fence_context;
        *f = dma_fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
        amd_sched_entity_push_job(&job->base);
+       amdgpu_ring_priority_get(job->ring,
+                                amd_sched_get_job_priority(&job->base));
return 0;
  }
static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
  {
        struct amdgpu_job *job = to_amdgpu_job(sched_job);
        struct amdgpu_vm *vm = job->vm;
struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
        int r;
if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
                r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
                if (r)
@@ -189,25 +192,26 @@ static struct dma_fence *amdgpu_job_run(struct 
amd_sched_job *sched_job)
trace_amdgpu_sched_run_job(job);
        if (job->vm)
                fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
        /* skip ib schedule when vram is lost */
        if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
                DRM_ERROR("Skip scheduling IBs!\n");
        else {
                r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, 
&fence);
                if (r)
                        DRM_ERROR("Error scheduling IBs (%d)\n", r);
        }
        /* if gpu reset, hw fence will be replaced here */
        dma_fence_put(job->fence);
        job->fence = dma_fence_get(fence);
+
        amdgpu_job_free_resources(job);
        return fence;
  }
const struct amd_sched_backend_ops amdgpu_sched_ops = {
        .dependency = amdgpu_job_dependency,
        .run_job = amdgpu_job_run,
        .timedout_job = amdgpu_job_timedout,
        .free_job = amdgpu_job_free_cb
  };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 75165e0..2d8b20a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -143,45 +143,114 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
   * amdgpu_ring_undo - reset the wptr
   *
   * @ring: amdgpu_ring structure holding ring information
   *
   * Reset the driver's copy of the wptr (all asics).
   */
  void amdgpu_ring_undo(struct amdgpu_ring *ring)
  {
        ring->wptr = ring->wptr_old;
if (ring->funcs->end_use)
                ring->funcs->end_use(ring);
  }
/**
+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority)
+{
+       int i;
+
+       if (!ring->funcs->set_priority)
+               return;
+
+       if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+               return;
+
+       /* no need to restore if the job is already at the lowest priority */
+       if (priority == AMD_SCHED_PRIORITY_NORMAL)
+               return;
+
+       mutex_lock(&ring->priority_mutex);
+       /* something higher prio is executing, no need to decay */
+       if (ring->priority > priority)
+               goto out_unlock;
+
+       /* decay priority to the next level with a job available */
+       for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+               if (i == AMD_SCHED_PRIORITY_NORMAL
+                               || atomic_read(&ring->num_jobs[i])) {
+                       ring->priority = i;
+                       ring->funcs->set_priority(ring, i);
+                       break;
+               }
+       }
+
+out_unlock:
+       mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Request a ring's priority to be raised to @priority (refcounted).
+ */
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority)
+{
+       if (!ring->funcs->set_priority)
+               return;
+
+       atomic_inc(&ring->num_jobs[priority]);
+
+       mutex_lock(&ring->priority_mutex);
+       if (priority <= ring->priority)
+               goto out_unlock;
+
+       ring->priority = priority;
+       ring->funcs->set_priority(ring, priority);
+
+out_unlock:
+       mutex_unlock(&ring->priority_mutex);
+}
+
+/**
   * amdgpu_ring_init - init driver ring struct.
   *
   * @adev: amdgpu_device pointer
   * @ring: amdgpu_ring structure holding ring information
   * @max_ndw: maximum number of dw for ring alloc
   * @nop: nop packet for this ring
   *
   * Initialize the driver information for the selected ring (all asics).
   * Returns 0 on success, error on failure.
   */
  int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned max_dw, struct amdgpu_irq_src *irq_src,
                     unsigned irq_type)
  {
-       int r;
+       int r, i;
if (ring->adev == NULL) {
                if (adev->num_rings >= AMDGPU_MAX_RINGS)
                        return -EINVAL;
ring->adev = adev;
                ring->idx = adev->num_rings++;
                adev->rings[ring->idx] = ring;
                r = amdgpu_fence_driver_init_ring(ring,
                        amdgpu_sched_hw_submission);
                if (r)
                        return r;
        }
if (ring->funcs->support_64bit_ptrs) {
@@ -243,33 +312,38 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct 
amdgpu_ring *ring,
        /* Allocate ring buffer */
        if (ring->ring_obj == NULL) {
                r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
                                            AMDGPU_GEM_DOMAIN_GTT,
                                            &ring->ring_obj,
                                            &ring->gpu_addr,
                                            (void **)&ring->ring);
                if (r) {
                        dev_err(adev->dev, "(%d) ring create failed\n", r);
                        return r;
                }
                amdgpu_ring_clear_ring(ring);
        }
ring->max_dw = max_dw;
+       ring->priority = AMD_SCHED_PRIORITY_NORMAL;
+       mutex_init(&ring->priority_mutex);
        INIT_LIST_HEAD(&ring->lru_list);
        amdgpu_ring_lru_touch(adev, ring);
+ for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
+               atomic_set(&ring->num_jobs[i], 0);
+
        if (amdgpu_debugfs_ring_init(adev, ring)) {
                DRM_ERROR("Failed to register debugfs file for rings !\n");
        }
return 0;
  }
/**
   * amdgpu_ring_fini - tear down the driver ring struct.
   *
   * @adev: amdgpu_device pointer
   * @ring: amdgpu_ring structure holding ring information
   *
   * Tear down the driver information for the selected ring (all asics).
   */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bc8dec9..7348769 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -12,30 +12,31 @@
   * all copies or substantial portions of the Software.
   *
   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   * Authors: Christian König
   */
  #ifndef __AMDGPU_RING_H__
  #define __AMDGPU_RING_H__
+#include <drm/amdgpu_drm.h>
  #include "gpu_scheduler.h"
/* max number of rings */
  #define AMDGPU_MAX_RINGS              18
  #define AMDGPU_MAX_GFX_RINGS          1
  #define AMDGPU_MAX_COMPUTE_RINGS      8
  #define AMDGPU_MAX_VCE_RINGS          3
  #define AMDGPU_MAX_UVD_ENC_RINGS      2
/* some special values for the owner field */
  #define AMDGPU_FENCE_OWNER_UNDEFINED  ((void*)0ul)
  #define AMDGPU_FENCE_OWNER_VM         ((void*)1ul)
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
  #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
@@ -44,30 +45,31 @@ enum amdgpu_ring_type {
        AMDGPU_RING_TYPE_GFX,
        AMDGPU_RING_TYPE_COMPUTE,
        AMDGPU_RING_TYPE_SDMA,
        AMDGPU_RING_TYPE_UVD,
        AMDGPU_RING_TYPE_VCE,
        AMDGPU_RING_TYPE_KIQ,
        AMDGPU_RING_TYPE_UVD_ENC,
        AMDGPU_RING_TYPE_VCN_DEC,
        AMDGPU_RING_TYPE_VCN_ENC
  };
struct amdgpu_device;
  struct amdgpu_ring;
  struct amdgpu_ib;
  struct amdgpu_cs_parser;
+struct amdgpu_job;
/*
   * Fences.
   */
  struct amdgpu_fence_driver {
        uint64_t                        gpu_addr;
        volatile uint32_t               *cpu_addr;
        /* sync_seq is protected by ring emission lock */
        uint32_t                        sync_seq;
        atomic_t                        last_seq;
        bool                            initialized;
        struct amdgpu_irq_src           *irq_src;
        unsigned                        irq_type;
        struct timer_list               fallback_timer;
        unsigned                        num_fences_mask;
@@ -135,30 +137,33 @@ struct amdgpu_ring_funcs {
        void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
        void (*insert_start)(struct amdgpu_ring *ring);
        void (*insert_end)(struct amdgpu_ring *ring);
        /* pad the indirect buffer to the necessary number of dw */
        void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
        unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
        void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
        /* note usage for clock and power gating */
        void (*begin_use)(struct amdgpu_ring *ring);
        void (*end_use)(struct amdgpu_ring *ring);
        void (*emit_switch_buffer) (struct amdgpu_ring *ring);
        void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
        void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
        void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
        void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+       /* priority functions */
+       void (*set_priority) (struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority);
  };
struct amdgpu_ring {
        struct amdgpu_device            *adev;
        const struct amdgpu_ring_funcs  *funcs;
        struct amdgpu_fence_driver      fence_drv;
        struct amd_gpu_scheduler        sched;
        struct list_head                lru_list;
struct amdgpu_bo *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr_offs;
        u64                     wptr;
        u64                     wptr_old;
        unsigned                ring_size;
@@ -175,40 +180,50 @@ struct amdgpu_ring {
        struct amdgpu_bo        *mqd_obj;
        uint64_t                mqd_gpu_addr;
        void                    *mqd_ptr;
        uint64_t                eop_gpu_addr;
        u32                     doorbell_index;
        bool                    use_doorbell;
        unsigned                wptr_offs;
        unsigned                fence_offs;
        uint64_t                current_ctx;
        char                    name[16];
        unsigned                cond_exe_offs;
        u64                     cond_exe_gpu_addr;
        volatile u32            *cond_exe_cpu_addr;
        unsigned                vm_inv_eng;
        bool                    has_compute_vm_bug;
+
+       atomic_t                num_jobs[AMD_SCHED_PRIORITY_MAX];
+       struct mutex            priority_mutex;
+       /* protected by priority_mutex */
+       int                     priority;
+
  #if defined(CONFIG_DEBUG_FS)
        struct dentry *ent;
  #endif
  };
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
  void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
  void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib 
*ib);
  void amdgpu_ring_commit(struct amdgpu_ring *ring);
  void amdgpu_ring_undo(struct amdgpu_ring *ring);
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority);
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority);
  int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned ring_size, struct amdgpu_irq_src *irq_src,
                     unsigned irq_type);
  void amdgpu_ring_fini(struct amdgpu_ring *ring);
  int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
                        int num_blacklist, struct amdgpu_ring **ring);
  void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring 
*ring);
  static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
  {
        int i = 0;
        while (i <= ring->buf_mask)
                ring->ring[i++] = ring->funcs->nop;
} diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 38e622c..dbcaa2e 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -158,16 +158,23 @@ int amd_sched_fence_slab_init(void);
  void amd_sched_fence_slab_fini(void);
struct amd_sched_fence *amd_sched_fence_create(
        struct amd_sched_entity *s_entity, void *owner);
  void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
  void amd_sched_fence_finished(struct amd_sched_fence *fence);
  int amd_sched_job_init(struct amd_sched_job *job,
                       struct amd_gpu_scheduler *sched,
                       struct amd_sched_entity *entity,
                       void *owner);
  void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
  void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
  bool amd_sched_dependency_optimized(struct dma_fence* fence,
                                    struct amd_sched_entity *entity);
  void amd_sched_job_kickout(struct amd_sched_job *s_job);
+
+static inline enum amd_sched_priority
+amd_sched_get_job_priority(struct amd_sched_job *job)
+{
+       return (job->s_entity->rq - job->sched->sched_rq);
+}
+
  #endif


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to