+{
+
+ if (!ring || !ref_and_mask || !reg_mem_engine) {
+ DRM_INFO("%s:invalid params\n", __func__);
+ return;
+ }
+
+ const struct nbio_hdp_flush_reg *nbio_hf_reg =
ring->adev->nbio.hdp_flush_reg;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
+ *reg_mem_engine = 1; /* pfp */
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
+ *reg_mem_engine = 0;
+ break;
+ case AMDGPU_RING_TYPE_MES:
+ *ref_and_mask = nbio_hf_reg->ref_and_mask_cp8;
+ *reg_mem_engine = 0;
+ break;
+ case AMDGPU_RING_TYPE_KIQ:
+ *ref_and_mask = nbio_hf_reg->ref_and_mask_cp9;
+ *reg_mem_engine = 0;
+ break;
+ default:
+ DRM_ERROR("%s:unsupported ring type %d\n", __func__,
ring->funcs->type);
+ return;
+ }
+}
+
int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
{
signed long r, cnt = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index efd61a1ccc66..e7718485eae3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -356,6 +356,8 @@ struct amdgpu_gfx_funcs {
int num_xccs_per_xcp);
int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node);
int (*get_xccs_per_xcp)(struct amdgpu_device *adev);
+ void (*get_ref_and_mask)(struct amdgpu_ring *ring,
+ uint32_t *ref_and_mask, uint32_t
*reg_mem_engine);
};
struct sq_work {
@@ -615,6 +617,8 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t
xcc_id);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t xcc_id);
+void amdgpu_gfx_get_ref_mask(struct amdgpu_ring *ring,
+ uint32_t *ref_and_mask, uint32_t *reg_mem_engine);
int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t
ucode_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 895b841b9626..77d25317973e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -556,11 +556,20 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device
*adev,
int amdgpu_mes_hdp_flush(struct amdgpu_device *adev)
{
- uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask;
+ uint32_t hdp_flush_req_offset, hdp_flush_done_offset;
+ struct amdgpu_ring *mes_ring;
+ uint32_t ref_and_mask = 0, reg_mem_engine = 0;
+ if (!adev->gfx.funcs->get_ref_and_mask) {
+ dev_err(adev->dev, "mes hdp flush is not supported.\n");
+ return -EINVAL;
+ }
+
+ mes_ring = &adev->mes.ring[0];
hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev);
hdp_flush_done_offset =
adev->nbio.funcs->get_hdp_flush_done_offset(adev);
- ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
+
+ adev->gfx.funcs->get_ref_and_mask(mes_ring, &ref_and_mask,
®_mem_engine);
return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset,
ref_and_mask, ref_and_mask, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index aaed24f7e716..57ed8bf6b78a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4575,6 +4575,7 @@ static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs
= {
.select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
.init_spm_golden = &gfx_v10_0_init_spm_golden_registers,
.update_perfmon_mgcg = &gfx_v10_0_update_perfmon_mgcg,
+ .get_ref_and_mask = &amdgpu_gfx_get_ref_mask,
};
static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
@@ -8614,25 +8615,13 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct
amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- switch (ring->me) {
- case 1:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 <<
ring->pipe;
- break;
- case 2:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 <<
ring->pipe;
- break;
- default:
- return;
- }
- reg_mem_engine = 0;
- } else {
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
- reg_mem_engine = 1; /* pfp */
+ if (!adev->gfx.funcs->get_ref_and_mask) {
+ dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n",
__func__);
+ return;
}
+ adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine);
gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index f4d4dd5dd07b..11c866d7a49b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1081,6 +1081,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs
= {
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
.get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
+ .get_ref_and_mask = &amdgpu_gfx_get_ref_mask,
};
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -5833,25 +5834,13 @@ static void gfx_v11_0_ring_emit_hdp_flush(struct
amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- switch (ring->me) {
- case 1:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 <<
ring->pipe;
- break;
- case 2:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 <<
ring->pipe;
- break;
- default:
- return;
- }
- reg_mem_engine = 0;
- } else {
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
- reg_mem_engine = 1; /* pfp */
+ if (!adev->gfx.funcs->get_ref_and_mask) {
+ dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n",
__func__);
+ return;
}
+ adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine);
gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index f9cae6666697..fc8e28d7921c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -938,6 +938,7 @@ static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.select_me_pipe_q = &gfx_v12_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v12_0_update_perf_clk,
.get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info,
+ .get_ref_and_mask = &amdgpu_gfx_get_ref_mask,
};
static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev)
@@ -4389,25 +4390,13 @@ static void gfx_v12_0_ring_emit_hdp_flush(struct
amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- switch (ring->me) {
- case 1:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 <<
ring->pipe;
- break;
- case 2:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 <<
ring->pipe;
- break;
- default:
- return;
- }
- reg_mem_engine = 0;
- } else {
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
- reg_mem_engine = 1; /* pfp */
+ if (!adev->gfx.funcs->get_ref_and_mask) {
+ dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n",
__func__);
+ return;
}
+ adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine);
gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 66a4e4998106..29a6378cbf04 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2068,23 +2068,15 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring
*ring)
static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask;
- int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
+ int usepfp;
+ struct amdgpu_device *adev = ring->adev;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- switch (ring->me) {
- case 1:
- ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK <<
ring->pipe;
- break;
- case 2:
- ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK <<
ring->pipe;
- break;
- default:
- return;
- }
- } else {
- ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
+ if (!adev->gfx.funcs->get_ref_and_mask) {
+ dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n",
__func__);
+ return;
}
+ adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, &usepfp);
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait,
write */
WAIT_REG_MEM_FUNCTION(3) | /* == */
@@ -4075,12 +4067,49 @@ static void gfx_v7_0_select_me_pipe_q(struct
amdgpu_device *adev,
cik_srbm_select(adev, me, pipe, q, vm);
}
+/**
+ * gfx_v7_0_get_ref_and_mask - get the reference and mask for HDP flush
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ref_and_mask: pointer to store the reference and mask
+ * @reg_mem_engine: pointer to store the register memory engine
+ *
+ * Calculates the reference and mask for HDP flush based on the ring type and
me.
+ */
+static void gfx_v7_0_get_ref_and_mask(struct amdgpu_ring *ring,
+ uint32_t *ref_and_mask, uint32_t
*reg_mem_engine)
+{
+ if (!ring || !ref_and_mask || !reg_mem_engine) {
+ DRM_INFO("%s:invalid params\n", __func__);
+ return;
+ }
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
+ switch (ring->me) {
+ case 1:
+ *ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK <<
ring->pipe;
+ break;
+ case 2:
+ *ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK <<
ring->pipe;
+ break;
+ default:
+ return;
+ }
+ *reg_mem_engine = 0;
+ } else {
+ *ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
+ *reg_mem_engine = 1;
+ }
+}
+
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
.read_wave_data = &gfx_v7_0_read_wave_data,
.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
- .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
+ .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q,
+ .get_ref_and_mask = &gfx_v7_0_get_ref_and_mask,
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5d6e8e0601cb..0fd17fdf2cb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5211,13 +5211,49 @@ static void gfx_v8_0_read_wave_sgprs(struct
amdgpu_device *adev, uint32_t xcc_id
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+/**
+ * gfx_v8_0_get_ref_and_mask - get the reference and mask for HDP flush
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ref_and_mask: pointer to store the reference and mask
+ * @reg_mem_engine: pointer to store the register memory engine
+ *
+ * Calculates the reference and mask for HDP flush based on the ring type and
me.
+ */
+static void gfx_v8_0_get_ref_and_mask(struct amdgpu_ring *ring,
+ uint32_t *ref_and_mask, uint32_t
*reg_mem_engine)
+{
+ if (!ring || !ref_and_mask || !reg_mem_engine) {
+ DRM_INFO("%s:invalid params\n", __func__);
+ return;
+ }
+
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
+ (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
+ switch (ring->me) {
+ case 1:
+ *ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK <<
ring->pipe;
+ break;
+ case 2:
+ *ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK <<
ring->pipe;
+ break;
+ default:
+ return;
+ }
+ *reg_mem_engine = 0;
+ } else {
+ *ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
+ *reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
+ }
+}
static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v8_0_select_se_sh,
.read_wave_data = &gfx_v8_0_read_wave_data,
.read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
- .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
+ .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q,
+ .get_ref_and_mask = &gfx_v8_0_get_ref_and_mask,
};
static int gfx_v8_0_early_init(struct amdgpu_ip_block *ip_block)
@@ -6000,25 +6036,14 @@ static void gfx_v8_0_ring_set_wptr_gfx(struct
amdgpu_ring *ring)
static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
+ struct amdgpu_device *adev = ring->adev;
- if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
- (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
- switch (ring->me) {
- case 1:
- ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK <<
ring->pipe;
- break;
- case 2:
- ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK <<
ring->pipe;
- break;
- default:
- return;
- }
- reg_mem_engine = 0;
- } else {
- ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
- reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
+ if (!adev->gfx.funcs->get_ref_and_mask) {
+ dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n",
__func__);
+ return;
}
+ adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine);
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait,
write */
WAIT_REG_MEM_FUNCTION(3) | /* == */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index e6187be27385..9e31a27a1a7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2004,6 +2004,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs =
{
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+ .get_ref_and_mask = &amdgpu_gfx_get_ref_mask,
};
const struct amdgpu_ras_block_hw_ops gfx_v9_0_ras_ops = {
@@ -5380,25 +5381,13 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct
amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- switch (ring->me) {
- case 1:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 <<
ring->pipe;
- break;
- case 2:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 <<
ring->pipe;
- break;
- default:
- return;
- }
- reg_mem_engine = 0;
- } else {
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
- reg_mem_engine = 1; /* pfp */
+ if (!adev->gfx.funcs->get_ref_and_mask) {
+ dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n",
__func__);
+ return;
}
+ adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine);
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 89253df5ffc8..d428ab2a4313 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -848,6 +848,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs =
{
.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
.get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
+ .get_ref_and_mask = &amdgpu_gfx_get_ref_mask,
};
static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
@@ -2818,25 +2819,13 @@ static void gfx_v9_4_3_ring_emit_hdp_flush(struct
amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- switch (ring->me) {
- case 1:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 <<
ring->pipe;
- break;
- case 2:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 <<
ring->pipe;
- break;
- default:
- return;
- }
- reg_mem_engine = 0;
- } else {
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
- reg_mem_engine = 1; /* pfp */
+ if (!adev->gfx.funcs->get_ref_and_mask) {
+ dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n",
__func__);
+ return;
}
+ adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine);
gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
adev->nbio.funcs->get_hdp_flush_done_offset(adev),