From: Chunming Zhou <david1.z...@amd.com>

Signed-off-by: Chunming Zhou <david1.zhou at amd.com>
Acked-by: Christian K?nig <christian.koenig at amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 36 ++++++++++++++++++++++++++++++++-
 2 files changed, 37 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a311029..12ac818 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -993,10 +993,12 @@ struct amdgpu_vm_manager {
 struct amdgpu_ctx_ring {
        uint64_t        sequence;
        struct fence    *fences[AMDGPU_CTX_MAX_CS_PENDING];
+       struct amd_context_entity c_entity;
 };

 struct amdgpu_ctx {
        struct kref             refcount;
+       struct amdgpu_device    *adev;
        unsigned                reset_counter;
        spinlock_t              ring_lock;
        struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 144edc9..557fb60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -28,13 +28,23 @@
 static void amdgpu_ctx_do_release(struct kref *ref)
 {
        struct amdgpu_ctx *ctx;
+       struct amdgpu_device *adev;
        unsigned i, j;

        ctx = container_of(ref, struct amdgpu_ctx, refcount);
+       adev = ctx->adev;
+

        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
                        fence_put(ctx->rings[i].fences[j]);
+
+       if (amdgpu_enable_scheduler) {
+               for (i = 0; i < adev->num_rings; i++)
+                       amd_context_entity_fini(adev->rings[i]->scheduler,
+                                               &ctx->rings[i].c_entity);
+       }
+
        kfree(ctx);
 }

@@ -43,7 +53,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct 
amdgpu_fpriv *fpriv,
 {
        struct amdgpu_ctx *ctx;
        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
-       int i, r;
+       int i, j, r;

        ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
@@ -59,11 +69,35 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct 
amdgpu_fpriv *fpriv,
        *id = (uint32_t)r;

        memset(ctx, 0, sizeof(*ctx));
+       ctx->adev = adev;
        kref_init(&ctx->refcount);
        spin_lock_init(&ctx->ring_lock);
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                ctx->rings[i].sequence = 1;
        mutex_unlock(&mgr->lock);
+       if (amdgpu_enable_scheduler) {
+               /* create context entity for each ring */
+               for (i = 0; i < adev->num_rings; i++) {
+                       struct amd_run_queue *rq;
+                       if (fpriv)
+                               rq = &adev->rings[i]->scheduler->sched_rq;
+                       else
+                               rq = &adev->rings[i]->scheduler->kernel_rq;
+                       r = amd_context_entity_init(adev->rings[i]->scheduler,
+                                                   &ctx->rings[i].c_entity,
+                                                   NULL, rq, *id);
+                       if (r)
+                               break;
+               }
+
+               if (i < adev->num_rings) {
+                       for (j = 0; j < i; j++)
+                               
amd_context_entity_fini(adev->rings[j]->scheduler,
+                                                       
&ctx->rings[j].c_entity);
+                       kfree(ctx);
+                       return -EINVAL;
+               }
+       }

        return 0;
 }
-- 
1.8.3.1

Reply via email to