Am 2021-10-26 um 4:31 p.m. schrieb Sider, Graham:
> [AMD Official Use Only]
>
>> -----Original Message-----
>> From: Kuehling, Felix <felix.kuehl...@amd.com>
>> Sent: Tuesday, October 26, 2021 4:07 PM
>> To: Sider, Graham <graham.si...@amd.com>; amd-
>> g...@lists.freedesktop.org
>> Cc: Joshi, Mukul <mukul.jo...@amd.com>
>> Subject: Re: [PATCH 02/13] drm/amdkfd: replace kgd_dev in static gfx v7
>> funcs
>>
>> Am 2021-10-19 um 5:13 p.m. schrieb Graham Sider:
>>> Static funcs in amdgpu_amdkfd_gfx_v7.c now using amdgpu_device.
>> Doesn't this cause pointer type mismatch errors when assigning the function
>> pointers in gfx_v7_kfd2kgd? Those only get updated in patch 7.
>>
>> Regards,
>>   Felix
>>
> The function definitions changed in patches 2 through 6 are only used 
> internally
> within these files and aren't assigned to any of the gfx_v*_kfd2kgd entries. 
> Patches
> 7 through 11 deal with the kfd2kgd functions that would cause a mismatch if 
> done
> file-by-file.

I see. I saw you're changing e.g. kgd_hqd_load, but you're not changing
its signature here, so no problem.


>
> Best,
> Graham
>
>>> Signed-off-by: Graham Sider <graham.si...@amd.com>
>>> ---
>>>  .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 51
>>> +++++++++----------
>>>  1 file changed, 23 insertions(+), 28 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
>>> index b91d27e39bad..d00ba8d65a6d 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
>>> @@ -87,38 +87,33 @@ static inline struct amdgpu_device
>> *get_amdgpu_device(struct kgd_dev *kgd)
>>>     return (struct amdgpu_device *)kgd;
>>>  }
>>>
>>> -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t
>>> pipe,
>>> +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec,
>>> +uint32_t pipe,
>>>                     uint32_t queue, uint32_t vmid)
>>>  {
>>> -   struct amdgpu_device *adev = get_amdgpu_device(kgd);
>>>     uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) |
>>> QUEUEID(queue);
>>>
>>>     mutex_lock(&adev->srbm_mutex);
>>>     WREG32(mmSRBM_GFX_CNTL, value);
>>>  }
>>>
>>> -static void unlock_srbm(struct kgd_dev *kgd)
>>> +static void unlock_srbm(struct amdgpu_device *adev)
>>>  {
>>> -   struct amdgpu_device *adev = get_amdgpu_device(kgd);
>>> -
>>>     WREG32(mmSRBM_GFX_CNTL, 0);
>>>     mutex_unlock(&adev->srbm_mutex);
>>>  }
>>>
>>> -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
>>> +static void acquire_queue(struct amdgpu_device *adev, uint32_t
>>> +pipe_id,
>>>                             uint32_t queue_id)
>>>  {
>>> -   struct amdgpu_device *adev = get_amdgpu_device(kgd);
>>> -
>>>     uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
>>>     uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
>>>
>>> -   lock_srbm(kgd, mec, pipe, queue_id, 0);
>>> +   lock_srbm(adev, mec, pipe, queue_id, 0);
>>>  }
>>>
>>> -static void release_queue(struct kgd_dev *kgd)
>>> +static void release_queue(struct amdgpu_device *adev)
>>>  {
>>> -   unlock_srbm(kgd);
>>> +   unlock_srbm(adev);
>>>  }
>>>
>>>  static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t
>>> vmid, @@ -129,14 +124,14 @@ static void
>>> kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,  {
>>>     struct amdgpu_device *adev = get_amdgpu_device(kgd);
>>>
>>> -   lock_srbm(kgd, 0, 0, 0, vmid);
>>> +   lock_srbm(adev, 0, 0, 0, vmid);
>>>
>>>     WREG32(mmSH_MEM_CONFIG, sh_mem_config);
>>>     WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
>>>     WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
>>>     WREG32(mmSH_MEM_BASES, sh_mem_bases);
>>>
>>> -   unlock_srbm(kgd);
>>> +   unlock_srbm(adev);
>>>  }
>>>
>>>  static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
>>> @@ -174,12 +169,12 @@ static int kgd_init_interrupts(struct kgd_dev
>> *kgd, uint32_t pipe_id)
>>>     mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
>>>     pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
>>>
>>> -   lock_srbm(kgd, mec, pipe, 0, 0);
>>> +   lock_srbm(adev, mec, pipe, 0, 0);
>>>
>>>     WREG32(mmCPC_INT_CNTL,
>> CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
>>      CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
>>> -   unlock_srbm(kgd);
>>> +   unlock_srbm(adev);
>>>
>>>     return 0;
>>>  }
>>> @@ -220,7 +215,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void
>>> *mqd, uint32_t pipe_id,
>>>
>>>     m = get_mqd(mqd);
>>>
>>> -   acquire_queue(kgd, pipe_id, queue_id);
>>> +   acquire_queue(adev, pipe_id, queue_id);
>>>
>>>     /* HQD registers extend from CP_MQD_BASE_ADDR to
>> CP_MQD_CONTROL. */
>>>     mqd_hqd = &m->cp_mqd_base_addr_lo;
>>> @@ -239,16 +234,16 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void
>> *mqd, uint32_t pipe_id,
>>>      * release srbm_mutex to avoid circular dependency between
>>>      * srbm_mutex->mm_sem->reservation_ww_class_mutex-
>>> srbm_mutex.
>>>      */
>>> -   release_queue(kgd);
>>> +   release_queue(adev);
>>>     valid_wptr = read_user_wptr(mm, wptr, wptr_val);
>>> -   acquire_queue(kgd, pipe_id, queue_id);
>>> +   acquire_queue(adev, pipe_id, queue_id);
>>>     if (valid_wptr)
>>>             WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) &
>> wptr_mask);
>>>     data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE,
>> 1);
>>>     WREG32(mmCP_HQD_ACTIVE, data);
>>>
>>> -   release_queue(kgd);
>>> +   release_queue(adev);
>>>
>>>     return 0;
>>>  }
>>> @@ -271,7 +266,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
>>>     if (*dump == NULL)
>>>             return -ENOMEM;
>>>
>>> -   acquire_queue(kgd, pipe_id, queue_id);
>>> +   acquire_queue(adev, pipe_id, queue_id);
>>>
>>>     DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
>>>     DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
>>> @@ -281,7 +276,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
>>>     for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL;
>> reg++)
>>>             DUMP_REG(reg);
>>>
>>> -   release_queue(kgd);
>>> +   release_queue(adev);
>>>
>>>     WARN_ON_ONCE(i != HQD_N_REGS);
>>>     *n_regs = i;
>>> @@ -380,7 +375,7 @@ static bool kgd_hqd_is_occupied(struct kgd_dev
>> *kgd, uint64_t queue_address,
>>>     bool retval = false;
>>>     uint32_t low, high;
>>>
>>> -   acquire_queue(kgd, pipe_id, queue_id);
>>> +   acquire_queue(adev, pipe_id, queue_id);
>>>     act = RREG32(mmCP_HQD_ACTIVE);
>>>     if (act) {
>>>             low = lower_32_bits(queue_address >> 8); @@ -390,7 +385,7
>> @@ static
>>> bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
>>>                             high == RREG32(mmCP_HQD_PQ_BASE_HI))
>>>                     retval = true;
>>>     }
>>> -   release_queue(kgd);
>>> +   release_queue(adev);
>>>     return retval;
>>>  }
>>>
>>> @@ -426,7 +421,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd,
>> void *mqd,
>>>     if (amdgpu_in_reset(adev))
>>>             return -EIO;
>>>
>>> -   acquire_queue(kgd, pipe_id, queue_id);
>>> +   acquire_queue(adev, pipe_id, queue_id);
>>>     WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
>>>
>>>     switch (reset_type) {
>>> @@ -504,13 +499,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd,
>> void *mqd,
>>>                     break;
>>>             if (time_after(jiffies, end_jiffies)) {
>>>                     pr_err("cp queue preemption time out\n");
>>> -                   release_queue(kgd);
>>> +                   release_queue(adev);
>>>                     return -ETIME;
>>>             }
>>>             usleep_range(500, 1000);
>>>     }
>>>
>>> -   release_queue(kgd);
>>> +   release_queue(adev);
>>>     return 0;
>>>  }
>>>
>>> @@ -651,9 +646,9 @@ static void set_scratch_backing_va(struct kgd_dev
>>> *kgd,  {
>>>     struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
>>>
>>> -   lock_srbm(kgd, 0, 0, 0, vmid);
>>> +   lock_srbm(adev, 0, 0, 0, vmid);
>>>     WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
>>> -   unlock_srbm(kgd);
>>> +   unlock_srbm(adev);
>>>  }
>>>
>>>  static void set_vm_context_page_table_base(struct kgd_dev *kgd,
>>> uint32_t vmid,

Reply via email to