On Tue, Sep 27, 2016 at 3:47 PM, Edward O'Callaghan <funfunc...@folklore1984.net> wrote: > Use a struct to carry the calculated const state inside the > main kfd_dev state to use where we need it. Minor cleanups > while we are here. > > Signed-off-by: Edward O'Callaghan <funfunc...@folklore1984.net> > --- > drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c | 9 ++------- > drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 +++++++ > drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 13 ++++++------- > drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 3 --- > drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 7 +++++++ > drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 2 +- > 6 files changed, 23 insertions(+), 18 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c > b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c > index d5e19b5..2114c66 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c > @@ -800,13 +800,8 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, > struct kfd_process *p) > union GRBM_GFX_INDEX_BITS reg_gfx_index; > struct kfd_process_device *pdd; > struct dbg_wave_control_info wac_info; > - int temp; > - int first_vmid_to_scan = 8; > - int last_vmid_to_scan = 15; > - > - first_vmid_to_scan = ffs(dev->shared_resources.compute_vmid_bitmap) - > 1; > - temp = dev->shared_resources.compute_vmid_bitmap >> > first_vmid_to_scan; > - last_vmid_to_scan = first_vmid_to_scan + ffz(temp); > + int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; > + int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; > > reg_sq_cmd.u32All = 0; > status = 0; > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c > b/drivers/gpu/drm/amd/amdkfd/kfd_device.c > index 3f95f7c..2417b44 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c > @@ -223,9 +223,16 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, > const struct kgd2kfd_shared_resources *gpu_resources) > { > unsigned int size; > + unsigned int vmid_bitmap_kfd; > > kfd->shared_resources = *gpu_resources; > > + vmid_bitmap_kfd = kfd->shared_resources.compute_vmid_bitmap; > + kfd->vm_info.first_vmid_kfd = ffs(vmid_bitmap_kfd) - 1; > + kfd->vm_info.last_vmid_kfd = fls(vmid_bitmap_kfd) - 1; > + kfd->vm_info.vmid_num_kfd = 1 + kfd->vm_info.last_vmid_kfd > + - kfd->vm_info.first_vmid_kfd; > + > /* calculate max size of mqds needed for queues */ > size = max_num_of_queues_per_device * > kfd->device_info->mqd_size_aligned; > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c > b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c > index f49c551..f13058c 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c > @@ -100,11 +100,11 @@ static int allocate_vmid(struct device_queue_manager > *dqm, > if (dqm->vmid_bitmap == 0) > return -ENOMEM; > > - bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, > CIK_VMID_NUM); > + bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, > + dqm->dev->vm_info.vmid_num_kfd); > clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap); > > - /* Kaveri kfd vmid's starts from vmid 8 */ > - allocated_vmid = bit + KFD_VMID_START_OFFSET; > + allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd; > pr_debug("kfd: vmid allocation %d\n", allocated_vmid); > qpd->vmid = allocated_vmid; > q->properties.vmid = allocated_vmid; > @@ -119,7 +119,7 @@ static void deallocate_vmid(struct device_queue_manager > *dqm, > struct qcm_process_device *qpd, > struct queue *q) > { > - int bit = qpd->vmid - KFD_VMID_START_OFFSET; > + int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd; > > /* Release the vmid mapping */ > set_pasid_vmid_mapping(dqm, 0, qpd->vmid); > @@ -570,7 +570,7 @@ static int initialize_nocpsch(struct device_queue_manager > *dqm) > for (i = 0; i < get_pipes_num(dqm); i++) > dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1; > > - dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1; > + dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1; > dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1; > > init_scheduler(dqm); > @@ -684,8 +684,7 @@ static int set_sched_resources(struct > device_queue_manager *dqm) > > queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE; > queue_mask = (1 << queue_num) - 1; > - res.vmid_mask = (1 << VMID_PER_DEVICE) - 1; > - res.vmid_mask <<= KFD_VMID_START_OFFSET; > + res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap; > res.queue_mask = queue_mask << (get_first_pipe(dqm) * > QUEUES_PER_PIPE); > res.gws_mask = res.oac_mask = res.gds_heap_base = > res.gds_heap_size = 0; > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h > b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h > index a625b91..bdf9541 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h > @@ -32,9 +32,6 @@ > #define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500) > #define QUEUES_PER_PIPE (8) > #define PIPE_PER_ME_CP_SCHEDULING (3) > -#define CIK_VMID_NUM (8) > -#define KFD_VMID_START_OFFSET (8) > -#define VMID_PER_DEVICE CIK_VMID_NUM > #define KFD_DQM_FIRST_PIPE (0) > #define CIK_SDMA_QUEUES (4) > #define CIK_SDMA_QUEUES_PER_ENGINE (2) > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h > b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h > index 4750cab..e824922 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h > @@ -141,6 +141,12 @@ struct kfd_mem_obj { > uint32_t *cpu_ptr; > }; > > +struct kfd_vmid_info { > + uint32_t first_vmid_kfd; > + uint32_t last_vmid_kfd; > + uint32_t vmid_num_kfd;
Could you please drop the "_kfd" suffix from the field names ? It is redundant as the structure's name implies its kfd. > +}; > + > struct kfd_dev { > struct kgd_dev *kgd; > > @@ -165,6 +171,7 @@ struct kfd_dev { > */ > > struct kgd2kfd_shared_resources shared_resources; > + struct kfd_vmid_info vm_info; > > const struct kfd2kgd_calls *kfd2kgd; > struct mutex doorbell_mutex; > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c > b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c > index e1fb40b..766312b 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c > @@ -208,7 +208,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, > case KFD_QUEUE_TYPE_COMPUTE: > /* check if there is over subscription */ > if ((sched_policy == > KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && > - ((dev->dqm->processes_count >= VMID_PER_DEVICE) || > + ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) || > (dev->dqm->queue_count >= PIPE_PER_ME_CP_SCHEDULING * > QUEUES_PER_PIPE))) { > pr_err("kfd: over-subscription is not allowed in > radeon_kfd.sched_policy == 1\n"); > retval = -EPERM; > -- > 2.7.4 > > _______________________________________________ > amd-gfx mailing list > amd-gfx@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/amd-gfx With the above comment fixed, this patch is: Reviewed-by: Oded Gabbay <oded.gab...@gmail.com> _______________________________________________ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx