Userspace relies on the ring field of gpu_scheduler tracepoints to
identify a drm_gpu_scheduler.  The value of the ring field is taken from
sched->name.

Because we typically have multiple schedulers running in parallel in
each process, assign unique names to schedulers such that userspace can
distinguish them.

Signed-off-by: Chia-I Wu <olva...@gmail.com>
---
 drivers/gpu/drm/panthor/panthor_sched.c | 32 ++++++++++++++++++-------
 1 file changed, 23 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
b/drivers/gpu/drm/panthor/panthor_sched.c
index ba5dc3e443d9c..26616b6cb110d 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -360,6 +360,9 @@ struct panthor_queue {
        /** @entity: DRM scheduling entity used for this queue. */
        struct drm_sched_entity entity;
 
+       /** @name: DRM scheduler name for this queue. */
+       char name[32];
+
        /**
         * @remaining_time: Time remaining before the job timeout expires.
         *
@@ -3308,9 +3311,10 @@ static u32 calc_profiling_ringbuf_num_slots(struct 
panthor_device *ptdev,
 
 static struct panthor_queue *
 group_create_queue(struct panthor_group *group,
-                  const struct drm_panthor_queue_create *args)
+                  const struct drm_panthor_queue_create *args, u32 gid,
+                  u32 qid)
 {
-       const struct drm_sched_init_args sched_args = {
+       struct drm_sched_init_args sched_args = {
                .ops = &panthor_queue_sched_ops,
                .submit_wq = group->ptdev->scheduler->wq,
                .num_rqs = 1,
@@ -3323,7 +3327,7 @@ group_create_queue(struct panthor_group *group,
                .credit_limit = args->ringbuf_size / sizeof(u64),
                .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
                .timeout_wq = group->ptdev->reset.wq,
-               .name = "panthor-queue",
+               .name = NULL, /* will point to queue->name */
                .dev = group->ptdev->base.dev,
        };
        struct drm_gpu_scheduler *drm_sched;
@@ -3398,6 +3402,11 @@ group_create_queue(struct panthor_group *group,
        if (ret)
                goto err_free_queue;
 
+       /* assign a unique name */
+       snprintf(queue->name, sizeof(queue->name), "panthor-queue-%d-%d", gid,
+                qid);
+       sched_args.name = queue->name;
+
        ret = drm_sched_init(&queue->scheduler, &sched_args);
        if (ret)
                goto err_free_queue;
@@ -3540,12 +3549,18 @@ int panthor_group_create(struct panthor_file *pfile,
        memset(group->syncobjs->kmap, 0,
               group_args->queues.count * sizeof(struct panthor_syncobj_64b));
 
+       ret = xa_alloc(&gpool->xa, &gid, group,
+                      XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
+       if (ret)
+               goto err_put_group;
+
        for (i = 0; i < group_args->queues.count; i++) {
-               group->queues[i] = group_create_queue(group, &queue_args[i]);
+               group->queues[i] =
+                       group_create_queue(group, &queue_args[i], gid, i);
                if (IS_ERR(group->queues[i])) {
                        ret = PTR_ERR(group->queues[i]);
                        group->queues[i] = NULL;
-                       goto err_put_group;
+                       goto err_erase_gid;
                }
 
                group->queue_count++;
@@ -3553,10 +3568,6 @@ int panthor_group_create(struct panthor_file *pfile,
 
        group->idle_queues = GENMASK(group->queue_count - 1, 0);
 
-       ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, 
MAX_GROUPS_PER_POOL), GFP_KERNEL);
-       if (ret)
-               goto err_put_group;
-
        mutex_lock(&sched->reset.lock);
        if (atomic_read(&sched->reset.in_progress)) {
                panthor_group_stop(group);
@@ -3575,6 +3586,9 @@ int panthor_group_create(struct panthor_file *pfile,
 
        return gid;
 
+err_erase_gid:
+       xa_erase(&gpool->xa, gid);
+
 err_put_group:
        group_put(group);
        return ret;
-- 
2.51.0.318.gd7df087d1a-goog

Reply via email to