On Fri, Jan 23, 2026 at 5:11 AM Jesse.Zhang <[email protected]> wrote:
>
> Extend the AMDGPU_USERQ ioctl to support dynamic modification of
> existing user mode queues after creation. This provides userspace
> with the ability to update queue attributes without requiring
> destruction and recreation of queues.
>
> v2: add a new op for AMDGPU_USERQ. E.g., AMDGPU_USERQ_OP_MODIFY. (Alex)
>    make the target xcc an explicit parameter. (Alex)
>
> Suggested-by: Alex Deucher <[email protected]>
> Signed-off-by: Jesse Zhang <[email protected]>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  3 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c  | 96 +++++++++++++++++++++-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h  |  3 +
>  drivers/gpu/drm/amd/amdgpu/mes_userqueue.c |  1 +
>  include/uapi/drm/amdgpu_drm.h              | 17 ++++
>  5 files changed, 116 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index a8f4f73fa0ce..ad136145316b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -819,6 +819,9 @@ struct amdgpu_mqd_prop {
>         uint32_t cu_mask_count;
>         uint32_t cu_flags;
>         bool is_user_cu_masked;
> +       uint32_t queue_percentage;
> +       /* used in gfx9 and gfx12.1 */
> +       uint32_t pm4_target_xcc;
>  };
>
>  struct amdgpu_mqd {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> index 4d7841f47dd3..de267135af69 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> @@ -34,6 +34,26 @@
>  #include "amdgpu_hmm.h"
>  #include "amdgpu_userq_fence.h"
>
> +/* Mapping queue priority to pipe priority, indexed by queue priority */
> +int amdgpu_userq_pipe_priority_map[] = {
> +       AMDGPU_RING_PRIO_0,
> +       AMDGPU_RING_PRIO_0,
> +       AMDGPU_RING_PRIO_0,
> +       AMDGPU_RING_PRIO_0,
> +       AMDGPU_RING_PRIO_0,
> +       AMDGPU_RING_PRIO_0,
> +       AMDGPU_RING_PRIO_0,
> +       AMDGPU_RING_PRIO_1,
> +       AMDGPU_RING_PRIO_1,
> +       AMDGPU_RING_PRIO_1,
> +       AMDGPU_RING_PRIO_1,
> +       AMDGPU_RING_PRIO_2,
> +       AMDGPU_RING_PRIO_2,
> +       AMDGPU_RING_PRIO_2,
> +       AMDGPU_RING_PRIO_2,
> +       AMDGPU_RING_PRIO_2
> +};
> +
>  u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
>  {
>         int i;
> @@ -906,7 +926,6 @@ static int amdgpu_userq_update_queue(struct 
> amdgpu_usermode_queue *queue)
>         struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
>         struct amdgpu_device *adev = uq_mgr->adev;
>         const struct amdgpu_userq_funcs *uq_funcs;
> -       bool unmap_queue = false;
>         int r;
>
>         uq_funcs = adev->userq_funcs[queue->queue_type];
> @@ -922,12 +941,17 @@ static int amdgpu_userq_update_queue(struct 
> amdgpu_usermode_queue *queue)
>                 r = amdgpu_userq_unmap_helper(queue);
>                 if (r)
>                         return r;
> -               unmap_queue = true;
>         }
>
>         r = uq_funcs->mqd_update(queue);
> -
> -       if (unmap_queue) {
> +       if (r)
> +               return r;
> +       /*
> +        * If the queue is considered active (has valid size, address, and 
> percentage),
> +        * we attempt to map it. This effectively starts the queue or 
> restarts it
> +        * if it was previously running.
> +        */
> +       if (AMDGPU_USERQ_IS_ACTIVE(queue)) {
>                 r = amdgpu_userq_map_helper(queue);
>                 if (r)
>                         drm_file_err(uq_mgr->file, "Failed to remap queue 
> %llu after update\n",
> @@ -937,6 +961,65 @@ static int amdgpu_userq_update_queue(struct 
> amdgpu_usermode_queue *queue)
>         return r;
>  }
>
> +static int amdgpu_modify_queue(struct drm_file *filp, union drm_amdgpu_userq 
> *args)
> +{
> +       struct amdgpu_fpriv *fpriv = filp->driver_priv;
> +       struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
> +       struct amdgpu_usermode_queue *queue;
> +       struct amdgpu_mqd_prop *props;
> +       int r;
> +
> +       if (args->in.queue_percentage > AMDGPU_USERQ_MAX_QUEUE_PERCENTAGE) {
> +               drm_file_err(uq_mgr->file, "Queue percentage must be between 
> 0 to AMDGPU_USERQ_MAX_QUEUE_PERCENTAGE\n");
> +               return -EINVAL;
> +       }
> +
> +       /* Validate priority */
> +       if (args->in.hqd_queue_priority > AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM) {
> +               drm_file_err(uq_mgr->file, "Queue priority must be between 0 
> to KFD_MAX_QUEUE_PRIORITY\n");
> +               return -EINVAL;
> +       }
> +
> +       /* Validate ring size */
> +       if (!is_power_of_2(args->in.queue_size) && (args->in.queue_size != 
> 0)) {
> +               drm_file_err(uq_mgr->file, "Ring size must be a power of 2 or 
> 0\n");
> +               return -EINVAL;
> +       }
> +
> +       if (args->in.queue_size > 0 && args->in.queue_size < 
> AMDGPU_GPU_PAGE_SIZE) {
> +               args->in.queue_size = AMDGPU_GPU_PAGE_SIZE;
> +               drm_file_err(uq_mgr->file, "Size clamped to 
> AMDGPU_GPU_PAGE_SIZE\n");
> +       }
> +
> +       if ((args->in.queue_va) &&
> +               (!access_ok((const void __user *) args->in.queue_va,
> +                       sizeof(uint64_t)))) {
> +               drm_file_err(uq_mgr->file, "Can't access ring base 
> address\n");
> +               return -EFAULT;
> +       }
> +
> +       mutex_lock(&uq_mgr->userq_mutex);
> +       queue = amdgpu_userq_find(uq_mgr, args->in.queue_id);
> +       if (!queue) {
> +               mutex_unlock(&uq_mgr->userq_mutex);
> +               return -EINVAL;
> +       }
> +
> +       props = queue->userq_prop;
> +       props->queue_size = args->in.queue_size;
> +       props->hqd_base_gpu_addr = args->in.queue_va;
> +       props->queue_percentage = args->in.queue_percentage;
> +       props->pm4_target_xcc = args->in.pm4_target_xcc;
> +       props->hqd_pipe_priority = 
> amdgpu_userq_pipe_priority_map[args->in.hqd_queue_priority];
> +       props->hqd_queue_priority = args->in.hqd_queue_priority;
> +
> +       r = amdgpu_userq_update_queue(queue);
> +
> +       mutex_unlock(&uq_mgr->userq_mutex);
> +
> +       return r;
> +}
> +
>  static int amdgpu_userq_set_cu_mask(struct drm_file *filp,  union 
> drm_amdgpu_userq *args)
>  {
>         struct amdgpu_fpriv *fpriv = filp->driver_priv;
> @@ -1031,6 +1114,11 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void 
> *data,
>                 amdgpu_userq_set_cu_mask(filp, args);
>                 break;
>
> +       case AMDGPU_USERQ_OP_MODIFY:
> +               r = amdgpu_modify_queue(filp, args);
> +               if (r)
> +                       drm_file_err(filp, "Failed to modify usermode 
> queue\n");
> +               break;
>         case AMDGPU_USERQ_OP_FREE:
>                 r = amdgpu_userq_destroy(filp, args->in.queue_id);
>                 if (r)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
> index c80d69368196..078f424e9415 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
> @@ -31,6 +31,9 @@
>  #define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
>  #define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
>  #define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, 
> name)
> +#define AMDGPU_USERQ_IS_ACTIVE(q) ((q)->userq_prop->queue_size > 0 &&  \
> +                           (q)->userq_prop->hqd_base_gpu_addr != 0 &&  \
> +                           (q)->userq_prop->queue_percentage > 0)
>
>  enum amdgpu_userq_state {
>         AMDGPU_USERQ_STATE_UNMAPPED = 0,
> diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c 
> b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
> index 0565986949bd..1a0079a2b47c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
> @@ -304,6 +304,7 @@ static int mes_userq_mqd_create(struct 
> amdgpu_usermode_queue *queue,
>         userq_props->use_doorbell = true;
>         userq_props->doorbell_index = queue->doorbell_index;
>         userq_props->fence_address = queue->fence_drv->gpu_addr;
> +       userq_props->queue_percentage = AMDGPU_USERQ_MAX_QUEUE_PERCENTAGE;
>
>         if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
>                 struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index cfc3a9313229..fab2914ac25c 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -331,6 +331,7 @@ union drm_amdgpu_ctx {
>  #define AMDGPU_USERQ_OP_CREATE 1
>  #define AMDGPU_USERQ_OP_FREE   2
>  #define AMDGPU_USERQ_OP_MODIFY_CU_MASK 3
> +#define AMDGPU_USERQ_OP_MODIFY 4
>
>  /* queue priority levels */
>  /* low < normal low < normal high < high */
> @@ -342,6 +343,7 @@ union drm_amdgpu_ctx {
>  #define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH 3 /* admin only */
>  /* for queues that need access to protected content */
>  #define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE  (1 << 2)
> +#define AMDGPU_USERQ_MAX_QUEUE_PERCENTAGE      100
>
>  /*
>   * This structure is a container to pass input configuration
> @@ -423,6 +425,21 @@ struct drm_amdgpu_userq_in {
>          * represents 32 CUs/WGPs)
>          */
>         __u32 cu_mask_count;
> +       /**
> +        * @queue_percentage: Queue resource allocation percentage (0-100)
> +        * Defines the percentage of GPU resources allocated to this queue
> +        */
> +       __u32 queue_percentage;
> +       /**
> +        * @hqd_queue_priority: Hqd Queue priority (0-15)
> +        * Higher values indicate higher scheduling priority for the queue
> +        */
> +       __u32 hqd_queue_priority;
> +       /**
> +        * @pm4_target_xcc: PM4 target XCC identifier (for gfx9/gfx12.1)
> +        * Specifies the target XCC (Cross Compute Complex) for PM4 commands
> +        */
> +       __u32 pm4_target_xcc;

Same comment as patch 6.

Alex

>  };
>
>  /* The structure to carry output of userqueue ops */
> --
> 2.49.0
>

Reply via email to