On Fri, Oct 10, 2025 at 2:49 PM Ellen Pan <[email protected]> wrote:
>
> 1. Added VF logic to init data exchange region using the offsets from
> dynamic(v2) critical regions;
>
> Signed-off-by: Ellen Pan <[email protected]>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 88 ++++++++++++++++++++----
> drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 1 +
> 2 files changed, 77 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 4a7125122ae7..d99120b98188 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -670,6 +670,8 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device
> *adev)
>
> void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
> {
> + uint32_t *pfvf_data = NULL;
> +
> adev->virt.fw_reserve.p_pf2vf = NULL;
> adev->virt.fw_reserve.p_vf2pf = NULL;
> adev->virt.vf2pf_update_interval_ms = 0;
> @@ -685,11 +687,34 @@ void amdgpu_virt_init_data_exchange(struct
> amdgpu_device *adev)
> schedule_delayed_work(&(adev->virt.vf2pf_work),
> msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
> } else if (adev->bios != NULL) {
> /* got through this logic in early init stage to get
> necessary flags, e.g. rlcg_acc related*/
> - adev->virt.fw_reserve.p_pf2vf =
> - (struct amd_sriov_msg_pf2vf_info_header *)
> - (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 <<
> 10));
> + if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
> + pfvf_data =
> +
> kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb
> << 10,
> + GFP_KERNEL);
> + if (!pfvf_data) {
> + DRM_ERROR("Failed to allocate memory for
> pfvf_data\n");
> + return;
> + }
>
> - amdgpu_virt_read_pf2vf_data(adev);
> + if (amdgpu_virt_read_exchange_data_from_mem(adev,
> pfvf_data))
> + goto free_pfvf_data;
> +
> + adev->virt.fw_reserve.p_pf2vf =
> + (struct amd_sriov_msg_pf2vf_info_header
> *)pfvf_data;
> +
> + amdgpu_virt_read_pf2vf_data(adev);
> +
> +free_pfvf_data:
> + kfree(pfvf_data);
> + pfvf_data = NULL;
> + adev->virt.fw_reserve.p_pf2vf = NULL;
> + } else {
> + adev->virt.fw_reserve.p_pf2vf =
> + (struct amd_sriov_msg_pf2vf_info_header *)
> + (adev->bios +
> (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
> +
> + amdgpu_virt_read_pf2vf_data(adev);
> + }
> }
> }
>
> @@ -702,14 +727,29 @@ void amdgpu_virt_exchange_data(struct amdgpu_device
> *adev)
>
> if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
> if (adev->mman.fw_vram_usage_va) {
> - adev->virt.fw_reserve.p_pf2vf =
> - (struct amd_sriov_msg_pf2vf_info_header *)
> - (adev->mman.fw_vram_usage_va +
> (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
> - adev->virt.fw_reserve.p_vf2pf =
> - (struct amd_sriov_msg_vf2pf_info_header *)
> - (adev->mman.fw_vram_usage_va +
> (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
> - adev->virt.fw_reserve.ras_telemetry =
> - (adev->mman.fw_vram_usage_va +
> (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
> + if (adev->virt.req_init_data_ver ==
> GPU_CRIT_REGION_V2) {
> + adev->virt.fw_reserve.p_pf2vf =
> + (struct
> amd_sriov_msg_pf2vf_info_header *)
> + (adev->mman.fw_vram_usage_va +
> +
> adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset);
> + adev->virt.fw_reserve.p_vf2pf =
> + (struct
> amd_sriov_msg_vf2pf_info_header *)
> + (adev->mman.fw_vram_usage_va +
> +
> adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset +
> + (AMD_SRIOV_MSG_SIZE_KB_V1 << 10));
> + adev->virt.fw_reserve.ras_telemetry =
> + (adev->mman.fw_vram_usage_va +
> +
> adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset);
> + } else {
> + adev->virt.fw_reserve.p_pf2vf =
> + (struct
> amd_sriov_msg_pf2vf_info_header *)
> + (adev->mman.fw_vram_usage_va +
> (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
> + adev->virt.fw_reserve.p_vf2pf =
> + (struct
> amd_sriov_msg_vf2pf_info_header *)
> + (adev->mman.fw_vram_usage_va +
> (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
> + adev->virt.fw_reserve.ras_telemetry =
> + (adev->mman.fw_vram_usage_va +
> (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
> + }
> } else if (adev->mman.drv_vram_usage_va) {
> adev->virt.fw_reserve.p_pf2vf =
> (struct amd_sriov_msg_pf2vf_info_header *)
> @@ -1018,6 +1058,30 @@ int amdgpu_virt_get_dynamic_data_info(struct
> amdgpu_device *adev,
> return 0;
> }
>
> +int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev,
> uint32_t *pfvf_data)
This function can be static as it's only used in this file.
Alex
> +{
> + uint32_t dataexchange_offset =
> +
> adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset;
> + uint32_t dataexchange_size =
> +
> adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10;
> + uint64_t pos = 0;
> +
> + dev_info(adev->dev,
> + "Got data exchange info from dynamic
> crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
> + dataexchange_offset, dataexchange_size);
> +
> + if (!IS_ALIGNED(dataexchange_offset, 4) ||
> !IS_ALIGNED(dataexchange_size, 4)) {
> + DRM_ERROR("Data exchange data not aligned to 4 bytes\n");
> + return -EINVAL;
> + }
> +
> + pos = (uint64_t)dataexchange_offset;
> + amdgpu_device_vram_access(adev, pos, pfvf_data,
> + dataexchange_size, false);
> +
> + return 0;
> +}
> +
> void amdgpu_virt_init(struct amdgpu_device *adev)
> {
> bool is_sriov = false;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 5d8e3260f677..4e9489ff295c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -440,6 +440,7 @@ void amdgpu_virt_init(struct amdgpu_device *adev);
> int amdgpu_virt_init_critical_region(struct amdgpu_device *adev);
> int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
> int data_id, uint8_t *binary, uint64_t *size);
> +int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev,
> uint32_t *pfvf_data);
>
> bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
> int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
> --
> 2.34.1
>