Switch to using a job structure for IBs.

Signed-off-by: Alex Deucher <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 138 +++++++++++++-------------
 1 file changed, 71 insertions(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7e9d753f4a808..36f0300a21bfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1224,9 +1224,9 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring 
*ring)
 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_ib ib;
+       struct amdgpu_job *job;
+       struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-
        unsigned index;
        uint64_t gpu_addr;
        uint32_t tmp;
@@ -1238,22 +1238,26 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring 
*ring, long timeout)
 
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
-       memset(&ib, 0, sizeof(ib));
 
-       r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+       r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 20,
+                                    AMDGPU_IB_POOL_DIRECT, &job,
+                                    AMDGPU_KERNEL_JOB_ID_GFX_RING_TEST);
        if (r)
                goto err1;
 
-       ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
-       ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
-       ib.ptr[2] = lower_32_bits(gpu_addr);
-       ib.ptr[3] = upper_32_bits(gpu_addr);
-       ib.ptr[4] = 0xDEADBEEF;
-       ib.length_dw = 5;
+       ib = &job->ibs[0];
+       ib->ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+       ib->ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+       ib->ptr[2] = lower_32_bits(gpu_addr);
+       ib->ptr[3] = upper_32_bits(gpu_addr);
+       ib->ptr[4] = 0xDEADBEEF;
+       ib->length_dw = 5;
 
-       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
-       if (r)
+       r = amdgpu_job_submit_direct(job, ring, &f);
+       if (r) {
+               amdgpu_job_free(job);
                goto err2;
+       }
 
        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
@@ -1270,7 +1274,6 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring 
*ring, long timeout)
                r = -EINVAL;
 
 err2:
-       amdgpu_ib_free(&ib, NULL);
        dma_fence_put(f);
 err1:
        amdgpu_device_wb_free(adev, index);
@@ -4624,7 +4627,8 @@ static int gfx_v9_0_do_edc_gds_workarounds(struct 
amdgpu_device *adev)
 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
-       struct amdgpu_ib ib;
+       struct amdgpu_job *job;
+       struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
        int r, i;
        unsigned total_size, vgpr_offset, sgpr_offset;
@@ -4670,9 +4674,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct 
amdgpu_device *adev)
        total_size += sizeof(sgpr_init_compute_shader);
 
        /* allocate an indirect buffer to put the commands in */
-       memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, total_size,
-                                       AMDGPU_IB_POOL_DIRECT, &ib);
+       r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, total_size,
+                                    AMDGPU_IB_POOL_DIRECT, &job,
+                                    AMDGPU_KERNEL_JOB_ID_RUN_SHADER);
        if (r) {
                drm_err(adev_to_drm(adev), "failed to get ib (%d).\n", r);
                return r;
@@ -4680,102 +4684,103 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct 
amdgpu_device *adev)
 
        /* load the compute shaders */
        for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
-               ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
+               ib->ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
 
        for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
-               ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
+               ib->ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
 
        /* init the ib length to 0 */
-       ib.length_dw = 0;
+       ib->length_dw = 0;
 
        /* VGPR */
        /* write the register state for the compute dispatch */
        for (i = 0; i < gpr_reg_size; i++) {
-               ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
-               ib.ptr[ib.length_dw++] = 
SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
+               ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+               ib->ptr[ib->length_dw++] = 
SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
                                                                - 
PACKET3_SET_SH_REG_START;
-               ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
+               ib->ptr[ib->length_dw++] = vgpr_init_regs_ptr[i].reg_value;
        }
        /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
-       gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
-       ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
-       ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
+       gpu_addr = (ib->gpu_addr + (u64)vgpr_offset) >> 8;
+       ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+       ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
                                                        - 
PACKET3_SET_SH_REG_START;
-       ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
-       ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+       ib->ptr[ib->length_dw++] = lower_32_bits(gpu_addr);
+       ib->ptr[ib->length_dw++] = upper_32_bits(gpu_addr);
 
        /* write dispatch packet */
-       ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
-       ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
-       ib.ptr[ib.length_dw++] = 1; /* y */
-       ib.ptr[ib.length_dw++] = 1; /* z */
-       ib.ptr[ib.length_dw++] =
+       ib->ptr[ib->length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+       ib->ptr[ib->length_dw++] = compute_dim_x * 2; /* x */
+       ib->ptr[ib->length_dw++] = 1; /* y */
+       ib->ptr[ib->length_dw++] = 1; /* z */
+       ib->ptr[ib->length_dw++] =
                REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 
1);
 
        /* write CS partial flush packet */
-       ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
-       ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+       ib->ptr[ib->length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+       ib->ptr[ib->length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
 
        /* SGPR1 */
        /* write the register state for the compute dispatch */
        for (i = 0; i < gpr_reg_size; i++) {
-               ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
-               ib.ptr[ib.length_dw++] = 
SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
+               ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+               ib->ptr[ib->length_dw++] = 
SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
                                                                - 
PACKET3_SET_SH_REG_START;
-               ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
+               ib->ptr[ib->length_dw++] = sgpr1_init_regs[i].reg_value;
        }
        /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
-       gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
-       ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
-       ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
+       gpu_addr = (ib->gpu_addr + (u64)sgpr_offset) >> 8;
+       ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+       ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
                                                        - 
PACKET3_SET_SH_REG_START;
-       ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
-       ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+       ib->ptr[ib->length_dw++] = lower_32_bits(gpu_addr);
+       ib->ptr[ib->length_dw++] = upper_32_bits(gpu_addr);
 
        /* write dispatch packet */
-       ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
-       ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x 
*/
-       ib.ptr[ib.length_dw++] = 1; /* y */
-       ib.ptr[ib.length_dw++] = 1; /* z */
-       ib.ptr[ib.length_dw++] =
+       ib->ptr[ib->length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+       ib->ptr[ib->length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* 
x */
+       ib->ptr[ib->length_dw++] = 1; /* y */
+       ib->ptr[ib->length_dw++] = 1; /* z */
+       ib->ptr[ib->length_dw++] =
                REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 
1);
 
        /* write CS partial flush packet */
-       ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
-       ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+       ib->ptr[ib->length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+       ib->ptr[ib->length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
 
        /* SGPR2 */
        /* write the register state for the compute dispatch */
        for (i = 0; i < gpr_reg_size; i++) {
-               ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
-               ib.ptr[ib.length_dw++] = 
SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
+               ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+               ib->ptr[ib->length_dw++] = 
SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
                                                                - 
PACKET3_SET_SH_REG_START;
-               ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
+               ib->ptr[ib->length_dw++] = sgpr2_init_regs[i].reg_value;
        }
        /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
-       gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
-       ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
-       ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
+       gpu_addr = (ib->gpu_addr + (u64)sgpr_offset) >> 8;
+       ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+       ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
                                                        - 
PACKET3_SET_SH_REG_START;
-       ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
-       ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+       ib->ptr[ib->length_dw++] = lower_32_bits(gpu_addr);
+       ib->ptr[ib->length_dw++] = upper_32_bits(gpu_addr);
 
        /* write dispatch packet */
-       ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
-       ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x 
*/
-       ib.ptr[ib.length_dw++] = 1; /* y */
-       ib.ptr[ib.length_dw++] = 1; /* z */
-       ib.ptr[ib.length_dw++] =
+       ib->ptr[ib->length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+       ib->ptr[ib->length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* 
x */
+       ib->ptr[ib->length_dw++] = 1; /* y */
+       ib->ptr[ib->length_dw++] = 1; /* z */
+       ib->ptr[ib->length_dw++] =
                REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 
1);
 
        /* write CS partial flush packet */
-       ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
-       ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+       ib->ptr[ib->length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+       ib->ptr[ib->length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
 
        /* shedule the ib on the ring */
-       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+       r = amdgpu_job_submit_direct(job, ring, &f);
        if (r) {
                drm_err(adev_to_drm(adev), "ib schedule failed (%d).\n", r);
+               amdgpu_job_free(job);
                goto fail;
        }
 
@@ -4787,7 +4792,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct 
amdgpu_device *adev)
        }
 
 fail:
-       amdgpu_ib_free(&ib, NULL);
        dma_fence_put(f);
 
        return r;
-- 
2.52.0

Reply via email to