From: Mukul Joshi <[email protected]> We would need to reserve SDMA queues per KFD node. As a result, rework the SDMA reserved queue handling to make it per KFD node.
Signed-off-by: Mukul Joshi <[email protected]> Reviewed-by: Harish Kasiviswanathan <[email protected]> Signed-off-by: Alex Deucher <[email protected]> --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 28 +++---------------- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 7 +++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 - 3 files changed, 8 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index e9cfb80bd4366..e3da2f149ae6f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -94,6 +94,8 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */ case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ + kfd->device_info.num_sdma_queues_per_engine = 8; + break; case IP_VERSION(6, 0, 0): case IP_VERSION(6, 0, 1): case IP_VERSION(6, 0, 2): @@ -105,6 +107,8 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) case IP_VERSION(7, 0, 0): case IP_VERSION(7, 0, 1): kfd->device_info.num_sdma_queues_per_engine = 8; + /* Reserve 1 for paging and 1 for gfx */ + kfd->device_info.num_reserved_sdma_queues_per_engine = 2; break; default: dev_warn(kfd_device, @@ -112,30 +116,6 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) sdma_version); kfd->device_info.num_sdma_queues_per_engine = 8; } - - bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES); - - switch (sdma_version) { - case IP_VERSION(6, 0, 0): - case IP_VERSION(6, 0, 1): - case IP_VERSION(6, 0, 2): - case IP_VERSION(6, 0, 3): - case IP_VERSION(6, 1, 0): - case IP_VERSION(6, 1, 1): - case IP_VERSION(6, 1, 2): - case IP_VERSION(6, 1, 3): - case IP_VERSION(7, 0, 0): - case IP_VERSION(7, 0, 1): - /* Reserve 1 for paging and 1 for gfx */ - kfd->device_info.num_reserved_sdma_queues_per_engine = 2; - /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */ - bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0, - kfd->adev->sdma.num_instances * - kfd->device_info.num_reserved_sdma_queues_per_engine); - break; - default: - break; - } } static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 4fbe865ff2796..5d3d10b9b4b7f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -137,9 +137,10 @@ static void init_sdma_bitmaps(struct device_queue_manager *dqm) bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm)); /* Mask out the reserved queues */ - bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap, - dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap, - KFD_MAX_SDMA_QUEUES); + bitmap_clear(dqm->sdma_bitmap, 0, kfd_get_num_sdma_engines(dqm->dev) * + dqm->dev->kfd->device_info.num_reserved_sdma_queues_per_engine); + bitmap_clear(dqm->xgmi_sdma_bitmap, 0, kfd_get_num_xgmi_sdma_engines(dqm->dev) * + dqm->dev->kfd->device_info.num_reserved_sdma_queues_per_engine); } void program_sh_mem_settings(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 70ef051511bb1..6c05d7f57196e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -241,7 +241,6 @@ struct kfd_device_info { uint32_t no_atomic_fw_version; unsigned int num_sdma_queues_per_engine; unsigned int num_reserved_sdma_queues_per_engine; - DECLARE_BITMAP(reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES); }; unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); -- 2.51.0
