Switch to using a job structure for IBs.
Signed-off-by: Alex Deucher <[email protected]>
---
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 139 +++++++++++++-------------
1 file changed, 72 insertions(+), 67 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index a6b4c8f41dc11..4736216cd0211 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -868,9 +868,9 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_ib ib;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
-
unsigned int index;
uint64_t gpu_addr;
uint32_t tmp;
@@ -882,22 +882,26 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring
*ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 20,
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_GFX_RING_TEST);
if (r)
goto err1;
- ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
- ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
- ib.ptr[2] = lower_32_bits(gpu_addr);
- ib.ptr[3] = upper_32_bits(gpu_addr);
- ib.ptr[4] = 0xDEADBEEF;
- ib.length_dw = 5;
+ ib = &job->ibs[0];
+ ib->ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib->ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib->ptr[2] = lower_32_bits(gpu_addr);
+ ib->ptr[3] = upper_32_bits(gpu_addr);
+ ib->ptr[4] = 0xDEADBEEF;
+ ib->length_dw = 5;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
- if (r)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r) {
+ amdgpu_job_free(job);
goto err2;
+ }
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
@@ -914,7 +918,6 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring,
long timeout)
r = -EINVAL;
err2:
- amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -1474,7 +1477,8 @@ static const u32 sec_ded_counter_registers[] =
static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
- struct amdgpu_ib ib;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
int r, i;
u32 tmp;
@@ -1505,106 +1509,108 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct
amdgpu_device *adev)
total_size += sizeof(sgpr_init_compute_shader);
/* allocate an indirect buffer to put the commands in */
- memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, total_size,
- AMDGPU_IB_POOL_DIRECT, &ib);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, total_size,
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_RUN_SHADER);
if (r) {
drm_err(adev_to_drm(adev), "failed to get ib (%d).\n", r);
return r;
}
+ ib = &job->ibs[0];
/* load the compute shaders */
for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
- ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
+ ib->ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
- ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
+ ib->ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
/* init the ib length to 0 */
- ib.length_dw = 0;
+ ib->length_dw = 0;
/* VGPR */
/* write the register state for the compute dispatch */
for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = vgpr_init_regs[i] -
PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ib->ptr[ib->length_dw++] = vgpr_init_regs[i] -
PACKET3_SET_SH_REG_START;
+ ib->ptr[ib->length_dw++] = vgpr_init_regs[i + 1];
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
- gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
- ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
- ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+ gpu_addr = (ib->gpu_addr + (u64)vgpr_offset) >> 8;
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ib->ptr[ib->length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
+ ib->ptr[ib->length_dw++] = lower_32_bits(gpu_addr);
+ ib->ptr[ib->length_dw++] = upper_32_bits(gpu_addr);
/* write dispatch packet */
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 8; /* x */
- ib.ptr[ib.length_dw++] = 1; /* y */
- ib.ptr[ib.length_dw++] = 1; /* z */
- ib.ptr[ib.length_dw++] =
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+ ib->ptr[ib->length_dw++] = 8; /* x */
+ ib->ptr[ib->length_dw++] = 1; /* y */
+ ib->ptr[ib->length_dw++] = 1; /* z */
+ ib->ptr[ib->length_dw++] =
REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN,
1);
/* write CS partial flush packet */
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
- ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+ ib->ptr[ib->length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* SGPR1 */
/* write the register state for the compute dispatch */
for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] -
PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ib->ptr[ib->length_dw++] = sgpr1_init_regs[i] -
PACKET3_SET_SH_REG_START;
+ ib->ptr[ib->length_dw++] = sgpr1_init_regs[i + 1];
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
- gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
- ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
- ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+ gpu_addr = (ib->gpu_addr + (u64)sgpr_offset) >> 8;
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ib->ptr[ib->length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
+ ib->ptr[ib->length_dw++] = lower_32_bits(gpu_addr);
+ ib->ptr[ib->length_dw++] = upper_32_bits(gpu_addr);
/* write dispatch packet */
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 8; /* x */
- ib.ptr[ib.length_dw++] = 1; /* y */
- ib.ptr[ib.length_dw++] = 1; /* z */
- ib.ptr[ib.length_dw++] =
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+ ib->ptr[ib->length_dw++] = 8; /* x */
+ ib->ptr[ib->length_dw++] = 1; /* y */
+ ib->ptr[ib->length_dw++] = 1; /* z */
+ ib->ptr[ib->length_dw++] =
REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN,
1);
/* write CS partial flush packet */
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
- ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+ ib->ptr[ib->length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* SGPR2 */
/* write the register state for the compute dispatch */
for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] -
PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ib->ptr[ib->length_dw++] = sgpr2_init_regs[i] -
PACKET3_SET_SH_REG_START;
+ ib->ptr[ib->length_dw++] = sgpr2_init_regs[i + 1];
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
- gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
- ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
- ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+ gpu_addr = (ib->gpu_addr + (u64)sgpr_offset) >> 8;
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ib->ptr[ib->length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
+ ib->ptr[ib->length_dw++] = lower_32_bits(gpu_addr);
+ ib->ptr[ib->length_dw++] = upper_32_bits(gpu_addr);
/* write dispatch packet */
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 8; /* x */
- ib.ptr[ib.length_dw++] = 1; /* y */
- ib.ptr[ib.length_dw++] = 1; /* z */
- ib.ptr[ib.length_dw++] =
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+ ib->ptr[ib->length_dw++] = 8; /* x */
+ ib->ptr[ib->length_dw++] = 1; /* y */
+ ib->ptr[ib->length_dw++] = 1; /* z */
+ ib->ptr[ib->length_dw++] =
REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN,
1);
/* write CS partial flush packet */
- ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
- ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+ ib->ptr[ib->length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r) {
drm_err(adev_to_drm(adev), "ib submit failed (%d).\n", r);
+ amdgpu_job_free(job);
goto fail;
}
@@ -1629,7 +1635,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct
amdgpu_device *adev)
RREG32(sec_ded_counter_registers[i]);
fail:
- amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
--
2.52.0