Both vcn_v3_0 and vcn_v4_0 use the same interface, so unify the code.
Signed-off-by: Benjamin Cheng <[email protected]>
---
v2: Moved RDECODE_* defines to header in patch #1.
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 100 +++++++++++++++++++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 5 ++
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 102 +-----------------------
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 101 +----------------------
4 files changed, 109 insertions(+), 199 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 03d95dca93d7..10aff7da52b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -1641,3 +1641,103 @@ void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block
*ip_block, struct drm_prin
}
}
}
+
+int amdgpu_vcn_dec_msg_limit_sched(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job, uint64_t addr,
+ int (*limit_sched)(struct amdgpu_cs_parser *,
+ struct amdgpu_job *))
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_device *adev = p->adev;
+ struct amdgpu_bo_va_mapping *map;
+ uint32_t *msg, num_buffers, len_dw;
+ struct amdgpu_bo *bo;
+ uint64_t start, end;
+ unsigned int i;
+ void *ptr;
+ int r;
+
+ addr &= AMDGPU_GMC_HOLE_MASK;
+ r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
+ if (r) {
+ DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
+ return r;
+ }
+
+ start = map->start * AMDGPU_GPU_PAGE_SIZE;
+ end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
+ if (addr & 0x7) {
+ DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ if (end - addr < 16) {
+ DRM_ERROR("VCN messages must be at least 4 DWORDs!\n");
+ return -EINVAL;
+ }
+
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(bo, &ptr);
+ if (r) {
+ DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
+ return r;
+ }
+
+ msg = ptr + addr - start;
+
+ if (msg[1] > end - addr) {
+ DRM_ERROR("VCN message header does not fit in BO!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (msg[3] != VCN_DEC_MSG_CREATE)
+ goto out;
+
+ len_dw = msg[1] / 4;
+ num_buffers = msg[2];
+
+ /* Verify that all indices fit within the claimed length. Each index is
4 DWORDs */
+ if (num_buffers > len_dw || 6 + num_buffers * 4 > len_dw) {
+ DRM_ERROR("VCN message has too many buffers!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
+ uint32_t offset, size, *create;
+
+ if (msg[0] != VCN_DEC_MESSAGE_CREATE)
+ continue;
+
+ offset = msg[1];
+ size = msg[2];
+
+ if (size < 4 || offset + size > end - addr) {
+ DRM_ERROR("VCN message buffer exceeds BO bounds!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ create = ptr + addr + offset - start;
+
+ /* H264, HEVC and VP9 can run on any instance */
+ if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
+ continue;
+
+ r = limit_sched(p, job);
+ if (r)
+ goto out;
+ }
+
+out:
+ amdgpu_bo_kunmap(bo);
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index e72687246235..ad6ca7aa74bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -573,4 +573,9 @@ void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block
*ip_block, struct drm_prin
void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
+int amdgpu_vcn_dec_msg_limit_sched(struct amdgpu_cs_parser *p, struct
amdgpu_job *job,
+ uint64_t addr,
+ int (*limit_sched)(struct amdgpu_cs_parser *,
+ struct amdgpu_job *));
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 64531ad56c48..38a4fcf5872e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1900,104 +1900,6 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser
*p,
return 0;
}
-static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
- uint64_t addr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_device *adev = p->adev;
- struct amdgpu_bo_va_mapping *map;
- uint32_t *msg, num_buffers, len_dw;
- struct amdgpu_bo *bo;
- uint64_t start, end;
- unsigned int i;
- void *ptr;
- int r;
-
- addr &= AMDGPU_GMC_HOLE_MASK;
- r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
- if (r) {
- DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return r;
- }
-
- start = map->start * AMDGPU_GPU_PAGE_SIZE;
- end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
- if (addr & 0x7) {
- DRM_ERROR("VCN messages must be 8 byte aligned!\n");
- return -EINVAL;
- }
-
- if (end - addr < 16) {
- DRM_ERROR("VCN messages must be at least 4 DWORDs!\n");
- return -EINVAL;
- }
-
- bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r) {
- DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(bo, &ptr);
- if (r) {
- DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
- return r;
- }
-
- msg = ptr + addr - start;
-
- if (msg[1] > end - addr) {
- DRM_ERROR("VCN message header does not fit in BO!\n");
- r = -EINVAL;
- goto out;
- }
-
- if (msg[3] != VCN_DEC_MSG_CREATE)
- goto out;
-
- len_dw = msg[1] / 4;
- num_buffers = msg[2];
-
- /* Verify that all indices fit within the claimed length. Each index is
4 DWORDs */
- if (num_buffers > len_dw || 6 + num_buffers * 4 > len_dw) {
- DRM_ERROR("VCN message has too many buffers!\n");
- r = -EINVAL;
- goto out;
- }
-
- for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
- uint32_t offset, size, *create;
-
- if (msg[0] != VCN_DEC_MESSAGE_CREATE)
- continue;
-
- offset = msg[1];
- size = msg[2];
-
- if (size < 4 || offset + size > end - addr) {
- DRM_ERROR("VCN message buffer exceeds BO bounds!\n");
- r = -EINVAL;
- goto out;
- }
-
- create = ptr + addr + offset - start;
-
- /* H264, HEVC and VP9 can run on any instance */
- if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
- continue;
-
- r = vcn_v3_0_limit_sched(p, job);
- if (r)
- goto out;
- }
-
-out:
- amdgpu_bo_kunmap(bo);
- return r;
-}
-
static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
@@ -2021,8 +1923,8 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct
amdgpu_cs_parser *p,
msg_hi = val;
} else if (reg ==
PACKET0(p->adev->vcn.inst[ring->me].internal.cmd, 0) &&
val == 0) {
- r = vcn_v3_0_dec_msg(p, job,
- ((u64)msg_hi) << 32 | msg_lo);
+ r = amdgpu_vcn_dec_msg_limit_sched(p, job,
((u64)msg_hi) << 32 | msg_lo,
+
vcn_v3_0_limit_sched);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index a89e316a4add..41215ad7dfac 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1817,104 +1817,6 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser
*p,
return 0;
}
-static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
- uint64_t addr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_device *adev = p->adev;
- struct amdgpu_bo_va_mapping *map;
- uint32_t *msg, num_buffers, len_dw;
- struct amdgpu_bo *bo;
- uint64_t start, end;
- unsigned int i;
- void *ptr;
- int r;
-
- addr &= AMDGPU_GMC_HOLE_MASK;
- r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
- if (r) {
- DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
- return r;
- }
-
- start = map->start * AMDGPU_GPU_PAGE_SIZE;
- end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
- if (addr & 0x7) {
- DRM_ERROR("VCN messages must be 8 byte aligned!\n");
- return -EINVAL;
- }
-
- if (end - addr < 16) {
- DRM_ERROR("VCN messages must be at least 4 DWORDs!\n");
- return -EINVAL;
- }
-
- bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r) {
- DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(bo, &ptr);
- if (r) {
- DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
- return r;
- }
-
- msg = ptr + addr - start;
-
- if (msg[1] > end - addr) {
- DRM_ERROR("VCN message header does not fit in BO!\n");
- r = -EINVAL;
- goto out;
- }
-
- if (msg[3] != VCN_DEC_MSG_CREATE)
- goto out;
-
- len_dw = msg[1] / 4;
- num_buffers = msg[2];
-
- /* Verify that all indices fit within the claimed length. Each index is
4 DWORDs */
- if (num_buffers > len_dw || 6 + num_buffers * 4 > len_dw) {
- DRM_ERROR("VCN message has too many buffers!\n");
- r = -EINVAL;
- goto out;
- }
-
- for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
- uint32_t offset, size, *create;
-
- if (msg[0] != VCN_DEC_MESSAGE_CREATE)
- continue;
-
- offset = msg[1];
- size = msg[2];
-
- if (size < 4 || offset + size > end - addr) {
- DRM_ERROR("VCN message buffer exceeds BO bounds!\n");
- r = -EINVAL;
- goto out;
- }
-
- create = ptr + addr + offset - start;
-
- /* H264, HEVC and VP9 can run on any instance */
- if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
- continue;
-
- r = vcn_v4_0_limit_sched(p, job);
- if (r)
- goto out;
- }
-
-out:
- amdgpu_bo_kunmap(bo);
- return r;
-}
-
#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
#define RADEON_VCN_ENGINE_INFO (0x30000001)
@@ -1957,7 +1859,8 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct
amdgpu_cs_parser *p,
msg_buffer_addr = ((u64)amdgpu_ib_get_value(ib, idx +
7)) << 32 |
amdgpu_ib_get_value(ib, idx + 8);
- return vcn_v4_0_dec_msg(p, job, msg_buffer_addr);
+ return amdgpu_vcn_dec_msg_limit_sched(p, job,
msg_buffer_addr,
+
vcn_v4_0_limit_sched);
} else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
sidx = vcn_v4_0_enc_find_ib_param(ib,
RENCODE_IB_PARAM_SESSION_INIT, idx);
if (sidx >= 0 &&
--
2.53.0