From: James Zhu <james....@amd.com>

Arcturus has dual-VCN. Need Restruct amdgpu_device::vcn to support
multiple vcns. There are no any logical changes here

Signed-off-by: James Zhu <james....@amd.com>
Reviewed-by: Leo Liu <leo....@amd.com>
Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c |  68 +++++++--------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  24 ++++--
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c   | 110 ++++++++++++------------
 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   | 106 +++++++++++------------
 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c   |  87 ++++++++++---------
 7 files changed, 210 insertions(+), 197 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a28a3d722ba2..86cc3092a5e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -155,15 +155,15 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
                        num_rings = 1;
                        break;
                case AMDGPU_HW_IP_VCN_DEC:
-                       rings[0] = &adev->vcn.ring_dec;
+                       rings[0] = &adev->vcn.inst[0].ring_dec;
                        num_rings = 1;
                        break;
                case AMDGPU_HW_IP_VCN_ENC:
-                       rings[0] = &adev->vcn.ring_enc[0];
+                       rings[0] = &adev->vcn.inst[0].ring_enc[0];
                        num_rings = 1;
                        break;
                case AMDGPU_HW_IP_VCN_JPEG:
-                       rings[0] = &adev->vcn.ring_jpeg;
+                       rings[0] = &adev->vcn.inst[0].ring_jpeg;
                        num_rings = 1;
                        break;
                }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e2c9d8d31ed8..b56d0f15e457 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -405,7 +405,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
                break;
        case AMDGPU_HW_IP_VCN_DEC:
                type = AMD_IP_BLOCK_TYPE_VCN;
-               if (adev->vcn.ring_dec.sched.ready)
+               if (adev->vcn.inst[0].ring_dec.sched.ready)
                        ++num_rings;
                ib_start_alignment = 16;
                ib_size_alignment = 16;
@@ -413,14 +413,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_VCN_ENC:
                type = AMD_IP_BLOCK_TYPE_VCN;
                for (i = 0; i < adev->vcn.num_enc_rings; i++)
-                       if (adev->vcn.ring_enc[i].sched.ready)
+                       if (adev->vcn.inst[0].ring_enc[i].sched.ready)
                                ++num_rings;
                ib_start_alignment = 64;
                ib_size_alignment = 1;
                break;
        case AMDGPU_HW_IP_VCN_JPEG:
                type = AMD_IP_BLOCK_TYPE_VCN;
-               if (adev->vcn.ring_jpeg.sched.ready)
+               if (adev->vcn.inst[0].ring_jpeg.sched.ready)
                        ++num_rings;
                ib_start_alignment = 16;
                ib_size_alignment = 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index bb0d1ef50c9c..330f355b93a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -146,8 +146,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
                bo_size += 
AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
        r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
-                                   AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
-                                   &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
+                                   AMDGPU_GEM_DOMAIN_VRAM, 
&adev->vcn.inst[0].vcpu_bo,
+                                   &adev->vcn.inst[0].gpu_addr, 
&adev->vcn.inst[0].cpu_addr);
        if (r) {
                dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
                return r;
@@ -170,7 +170,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 {
        int i;
 
-       kvfree(adev->vcn.saved_bo);
+       kvfree(adev->vcn.inst[0].saved_bo);
 
        if (adev->vcn.indirect_sram) {
                amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
@@ -178,16 +178,16 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
                              (void **)&adev->vcn.dpg_sram_cpu_addr);
        }
 
-       amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
-                             &adev->vcn.gpu_addr,
-                             (void **)&adev->vcn.cpu_addr);
+       amdgpu_bo_free_kernel(&adev->vcn.inst[0].vcpu_bo,
+                             &adev->vcn.inst[0].gpu_addr,
+                             (void **)&adev->vcn.inst[0].cpu_addr);
 
-       amdgpu_ring_fini(&adev->vcn.ring_dec);
+       amdgpu_ring_fini(&adev->vcn.inst[0].ring_dec);
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
-               amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
+               amdgpu_ring_fini(&adev->vcn.inst[0].ring_enc[i]);
 
-       amdgpu_ring_fini(&adev->vcn.ring_jpeg);
+       amdgpu_ring_fini(&adev->vcn.inst[0].ring_jpeg);
 
        release_firmware(adev->vcn.fw);
 
@@ -201,17 +201,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
 
        cancel_delayed_work_sync(&adev->vcn.idle_work);
 
-       if (adev->vcn.vcpu_bo == NULL)
+       if (adev->vcn.inst[0].vcpu_bo == NULL)
                return 0;
 
-       size = amdgpu_bo_size(adev->vcn.vcpu_bo);
-       ptr = adev->vcn.cpu_addr;
+       size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo);
+       ptr = adev->vcn.inst[0].cpu_addr;
 
-       adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
-       if (!adev->vcn.saved_bo)
+       adev->vcn.inst[0].saved_bo = kvmalloc(size, GFP_KERNEL);
+       if (!adev->vcn.inst[0].saved_bo)
                return -ENOMEM;
 
-       memcpy_fromio(adev->vcn.saved_bo, ptr, size);
+       memcpy_fromio(adev->vcn.inst[0].saved_bo, ptr, size);
 
        return 0;
 }
@@ -221,16 +221,16 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
        unsigned size;
        void *ptr;
 
-       if (adev->vcn.vcpu_bo == NULL)
+       if (adev->vcn.inst[0].vcpu_bo == NULL)
                return -EINVAL;
 
-       size = amdgpu_bo_size(adev->vcn.vcpu_bo);
-       ptr = adev->vcn.cpu_addr;
+       size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo);
+       ptr = adev->vcn.inst[0].cpu_addr;
 
-       if (adev->vcn.saved_bo != NULL) {
-               memcpy_toio(ptr, adev->vcn.saved_bo, size);
-               kvfree(adev->vcn.saved_bo);
-               adev->vcn.saved_bo = NULL;
+       if (adev->vcn.inst[0].saved_bo != NULL) {
+               memcpy_toio(ptr, adev->vcn.inst[0].saved_bo, size);
+               kvfree(adev->vcn.inst[0].saved_bo);
+               adev->vcn.inst[0].saved_bo = NULL;
        } else {
                const struct common_firmware_header *hdr;
                unsigned offset;
@@ -238,7 +238,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
                hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
                if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
                        offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-                       memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + 
offset,
+                       memcpy_toio(adev->vcn.inst[0].cpu_addr, 
adev->vcn.fw->data + offset,
                                    le32_to_cpu(hdr->ucode_size_bytes));
                        size -= le32_to_cpu(hdr->ucode_size_bytes);
                        ptr += le32_to_cpu(hdr->ucode_size_bytes);
@@ -257,7 +257,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct 
*work)
        unsigned int i;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+               fences += 
amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]);
        }
 
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
@@ -268,7 +268,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct 
*work)
                else
                        new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
-               if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
+               if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg))
                        new_state.jpeg = VCN_DPG_STATE__PAUSE;
                else
                        new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
@@ -276,8 +276,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct 
*work)
                adev->vcn.pause_dpg_mode(adev, &new_state);
        }
 
-       fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
-       fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
+       fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg);
+       fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_dec);
 
        if (fences == 0) {
                amdgpu_gfx_off_ctrl(adev, true);
@@ -311,14 +311,14 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
                unsigned int i;
 
                for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-                       fences += 
amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+                       fences += 
amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]);
                }
                if (fences)
                        new_state.fw_based = VCN_DPG_STATE__PAUSE;
                else
                        new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
-               if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
+               if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg))
                        new_state.jpeg = VCN_DPG_STATE__PAUSE;
                else
                        new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
@@ -344,7 +344,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
        unsigned i;
        int r;
 
-       WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
+       WREG32(adev->vcn.inst[0].external.scratch9, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
        if (r)
                return r;
@@ -352,7 +352,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, 0xDEADBEEF);
        amdgpu_ring_commit(ring);
        for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32(adev->vcn.external.scratch9);
+               tmp = RREG32(adev->vcn.inst[0].external.scratch9);
                if (tmp == 0xDEADBEEF)
                        break;
                DRM_UDELAY(1);
@@ -663,7 +663,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
        unsigned i;
        int r;
 
-       WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD);
+       WREG32(adev->vcn.inst[0].external.jpeg_pitch, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
        if (r)
                return r;
@@ -673,7 +673,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
        amdgpu_ring_commit(ring);
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32(adev->vcn.external.jpeg_pitch);
+               tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch);
                if (tmp == 0xDEADBEEF)
                        break;
                DRM_UDELAY(1);
@@ -747,7 +747,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, 
long timeout)
        }
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32(adev->vcn.external.jpeg_pitch);
+               tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch);
                if (tmp == 0xDEADBEEF)
                        break;
                DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index bfd8c3cea13a..d2fc47a954ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -30,6 +30,8 @@
 #define AMDGPU_VCN_FIRMWARE_OFFSET     256
 #define AMDGPU_VCN_MAX_ENC_RINGS       3
 
+#define AMDGPU_MAX_VCN_INSTANCES       2
+
 #define VCN_DEC_CMD_FENCE              0x00000000
 #define VCN_DEC_CMD_TRAP               0x00000001
 #define VCN_DEC_CMD_WRITE_REG          0x00000004
@@ -155,30 +157,38 @@ struct amdgpu_vcn_reg{
        unsigned        jpeg_pitch;
 };
 
-struct amdgpu_vcn {
+struct amdgpu_vcn_inst {
        struct amdgpu_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
-       unsigned                fw_version;
        void                    *saved_bo;
-       struct delayed_work     idle_work;
-       const struct firmware   *fw;    /* VCN firmware */
        struct amdgpu_ring      ring_dec;
        struct amdgpu_ring      ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
        struct amdgpu_ring      ring_jpeg;
        struct amdgpu_irq_src   irq;
+       struct amdgpu_vcn_reg   external;
+};
+
+struct amdgpu_vcn {
+       unsigned                fw_version;
+       struct delayed_work     idle_work;
+       const struct firmware   *fw;    /* VCN firmware */
        unsigned                num_enc_rings;
        enum amd_powergating_state cur_state;
        struct dpg_pause_state pause_state;
-       struct amdgpu_vcn_reg   internal, external;
-       int (*pause_dpg_mode)(struct amdgpu_device *adev,
-               struct dpg_pause_state *new_state);
 
        bool                    indirect_sram;
        struct amdgpu_bo        *dpg_sram_bo;
        void                    *dpg_sram_cpu_addr;
        uint64_t                dpg_sram_gpu_addr;
        uint32_t                *dpg_sram_curr_addr;
+
+       uint8_t num_vcn_inst;
+       struct amdgpu_vcn_inst  inst[AMDGPU_MAX_VCN_INSTANCES];
+       struct amdgpu_vcn_reg   internal;
+
+       int (*pause_dpg_mode)(struct amdgpu_device *adev,
+               struct dpg_pause_state *new_state);
 };
 
 int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 09dc9c87ebd1..07a2f36ea7ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -63,6 +63,7 @@ static int vcn_v1_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vcn.num_vcn_inst = 1;
        adev->vcn.num_enc_rings = 2;
 
        vcn_v1_0_set_dec_ring_funcs(adev);
@@ -87,20 +88,21 @@ static int vcn_v1_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* VCN DEC TRAP */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 
VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+                       VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, 
&adev->vcn.inst->irq);
        if (r)
                return r;
 
        /* VCN ENC TRAP */
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 
VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
-                                       &adev->vcn.irq);
+                                       &adev->vcn.inst->irq);
                if (r)
                        return r;
        }
 
        /* VCN JPEG TRAP */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, 
&adev->vcn.inst->irq);
        if (r)
                return r;
 
@@ -122,39 +124,39 @@ static int vcn_v1_0_sw_init(void *handle)
        if (r)
                return r;
 
-       ring = &adev->vcn.ring_dec;
+       ring = &adev->vcn.inst->ring_dec;
        sprintf(ring->name, "vcn_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
        if (r)
                return r;
 
-       adev->vcn.internal.scratch9 = adev->vcn.external.scratch9 =
+       adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
-       adev->vcn.internal.data0 = adev->vcn.external.data0 =
+       adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
-       adev->vcn.internal.data1 = adev->vcn.external.data1 =
+       adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
-       adev->vcn.internal.cmd = adev->vcn.external.cmd =
+       adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
-       adev->vcn.internal.nop = adev->vcn.external.nop =
+       adev->vcn.internal.nop = adev->vcn.inst->external.nop =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst->ring_enc[i];
                sprintf(ring->name, "vcn_enc%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
                if (r)
                        return r;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        sprintf(ring->name, "vcn_jpeg");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
        if (r)
                return r;
 
        adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
-       adev->vcn.internal.jpeg_pitch = adev->vcn.external.jpeg_pitch =
+       adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
 
        return 0;
@@ -191,7 +193,7 @@ static int vcn_v1_0_sw_fini(void *handle)
 static int vcn_v1_0_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        int i, r;
 
        r = amdgpu_ring_test_helper(ring);
@@ -199,14 +201,14 @@ static int vcn_v1_0_hw_init(void *handle)
                goto done;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst->ring_enc[i];
                ring->sched.ready = true;
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        goto done;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        r = amdgpu_ring_test_helper(ring);
        if (r)
                goto done;
@@ -229,7 +231,7 @@ static int vcn_v1_0_hw_init(void *handle)
 static int vcn_v1_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
 
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
                RREG32_SOC15(VCN, 0, mmUVD_STATUS))
@@ -304,9 +306,9 @@ static void vcn_v1_0_mc_resume_spg_mode(struct 
amdgpu_device *adev)
                offset = 0;
        } else {
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-                       lower_32_bits(adev->vcn.gpu_addr));
+                       lower_32_bits(adev->vcn.inst->gpu_addr));
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-                       upper_32_bits(adev->vcn.gpu_addr));
+                       upper_32_bits(adev->vcn.inst->gpu_addr));
                offset = size;
                WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
                             AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
@@ -316,17 +318,17 @@ static void vcn_v1_0_mc_resume_spg_mode(struct 
amdgpu_device *adev)
 
        /* cache window 1: stack */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-                    lower_32_bits(adev->vcn.gpu_addr + offset));
+                    lower_32_bits(adev->vcn.inst->gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-                    upper_32_bits(adev->vcn.gpu_addr + offset));
+                    upper_32_bits(adev->vcn.inst->gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 
        /* cache window 2: context */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-                    lower_32_bits(adev->vcn.gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
+                    lower_32_bits(adev->vcn.inst->gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-                    upper_32_bits(adev->vcn.gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
+                    upper_32_bits(adev->vcn.inst->gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 
@@ -374,9 +376,9 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct 
amdgpu_device *adev)
                offset = 0;
        } else {
                WREG32_SOC15_DPG_MODE(UVD, 0, 
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-                       lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
+                       lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
                WREG32_SOC15_DPG_MODE(UVD, 0, 
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-                       upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
+                       upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
                offset = size;
                WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
                             AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
@@ -386,9 +388,9 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct 
amdgpu_device *adev)
 
        /* cache window 1: stack */
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-                    lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+                    lower_32_bits(adev->vcn.inst->gpu_addr + offset), 
0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-                    upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+                    upper_32_bits(adev->vcn.inst->gpu_addr + offset), 
0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
                             0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, 
AMDGPU_VCN_STACK_SIZE,
@@ -396,10 +398,10 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct 
amdgpu_device *adev)
 
        /* cache window 2: context */
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-                    lower_32_bits(adev->vcn.gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE),
+                    lower_32_bits(adev->vcn.inst->gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE),
                             0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-                    upper_32_bits(adev->vcn.gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE),
+                    upper_32_bits(adev->vcn.inst->gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE),
                             0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 
0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, 
AMDGPU_VCN_CONTEXT_SIZE,
@@ -779,7 +781,7 @@ static void vcn_1_0_enable_static_power_gating(struct 
amdgpu_device *adev)
  */
 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
        uint32_t lmi_swap_cntl;
        int i, j, r;
@@ -932,21 +934,21 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device 
*adev)
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
                        ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 
-       ring = &adev->vcn.ring_enc[0];
+       ring = &adev->vcn.inst->ring_enc[0];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 
-       ring = &adev->vcn.ring_enc[1];
+       ring = &adev->vcn.inst->ring_enc[1];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
        WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 
UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
                        UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
@@ -968,7 +970,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device 
*adev)
 
 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
        uint32_t lmi_swap_cntl;
 
@@ -1106,7 +1108,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device 
*adev)
                        ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 
        /* initialize JPEG wptr */
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
 
        /* copy patch commands to the jpeg ring */
@@ -1255,21 +1257,21 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device 
*adev,
                                                   
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 
                                /* Restore */
-                               ring = &adev->vcn.ring_enc[0];
+                               ring = &adev->vcn.inst->ring_enc[0];
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, 
ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, 
upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, 
ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, 
lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, 
lower_32_bits(ring->wptr));
 
-                               ring = &adev->vcn.ring_enc[1];
+                               ring = &adev->vcn.inst->ring_enc[1];
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, 
ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, 
upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, 
ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, 
lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, 
lower_32_bits(ring->wptr));
 
-                               ring = &adev->vcn.ring_dec;
+                               ring = &adev->vcn.inst->ring_dec;
                                WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                                                   RREG32_SOC15(UVD, 0, 
mmUVD_SCRATCH2) & 0x7FFFFFFF);
                                SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1315,7 +1317,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device 
*adev,
                                                        
UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
 
                                /* Restore */
-                               ring = &adev->vcn.ring_jpeg;
+                               ring = &adev->vcn.inst->ring_jpeg;
                                WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
                                WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
                                                        
UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
@@ -1329,7 +1331,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device 
*adev,
                                WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
                                                        
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
 
-                               ring = &adev->vcn.ring_dec;
+                               ring = &adev->vcn.inst->ring_dec;
                                WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                                                   RREG32_SOC15(UVD, 0, 
mmUVD_SCRATCH2) & 0x7FFFFFFF);
                                SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1596,7 +1598,7 @@ static uint64_t vcn_v1_0_enc_ring_get_rptr(struct 
amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0])
+       if (ring == &adev->vcn.inst->ring_enc[0])
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
        else
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
@@ -1613,7 +1615,7 @@ static uint64_t vcn_v1_0_enc_ring_get_wptr(struct 
amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0])
+       if (ring == &adev->vcn.inst->ring_enc[0])
                return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
        else
                return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
@@ -1630,7 +1632,7 @@ static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring 
*ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0])
+       if (ring == &adev->vcn.inst->ring_enc[0])
                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
                        lower_32_bits(ring->wptr));
        else
@@ -2114,16 +2116,16 @@ static int vcn_v1_0_process_interrupt(struct 
amdgpu_device *adev,
 
        switch (entry->src_id) {
        case 124:
-               amdgpu_fence_process(&adev->vcn.ring_dec);
+               amdgpu_fence_process(&adev->vcn.inst->ring_dec);
                break;
        case 119:
-               amdgpu_fence_process(&adev->vcn.ring_enc[0]);
+               amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
                break;
        case 120:
-               amdgpu_fence_process(&adev->vcn.ring_enc[1]);
+               amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
                break;
        case 126:
-               amdgpu_fence_process(&adev->vcn.ring_jpeg);
+               amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
                break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -2295,7 +2297,7 @@ static const struct amdgpu_ring_funcs 
vcn_v1_0_jpeg_ring_vm_funcs = {
 
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
+       adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
        DRM_INFO("VCN decode is enabled in VM mode\n");
 }
 
@@ -2304,14 +2306,14 @@ static void vcn_v1_0_set_enc_ring_funcs(struct 
amdgpu_device *adev)
        int i;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
-               adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
+               adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
 
        DRM_INFO("VCN encode is enabled in VM mode\n");
 }
 
 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
+       adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
        DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
 }
 
@@ -2322,8 +2324,8 @@ static const struct amdgpu_irq_src_funcs 
vcn_v1_0_irq_funcs = {
 
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
-       adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
+       adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+       adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
 }
 
 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 2b7bb6364e5a..3cb62e448a37 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -92,6 +92,7 @@ static int vcn_v2_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vcn.num_vcn_inst = 1;
        adev->vcn.num_enc_rings = 2;
 
        vcn_v2_0_set_dec_ring_funcs(adev);
@@ -118,7 +119,7 @@ static int vcn_v2_0_sw_init(void *handle)
        /* VCN DEC TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
                              VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
-                             &adev->vcn.irq);
+                             &adev->vcn.inst->irq);
        if (r)
                return r;
 
@@ -126,15 +127,14 @@ static int vcn_v2_0_sw_init(void *handle)
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
                                      i + 
VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
-                                     &adev->vcn.irq);
+                                     &adev->vcn.inst->irq);
                if (r)
                        return r;
        }
 
        /* VCN JPEG TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-                             VCN_2_0__SRCID__JPEG_DECODE,
-                             &adev->vcn.irq);
+                             VCN_2_0__SRCID__JPEG_DECODE, 
&adev->vcn.inst->irq);
        if (r)
                return r;
 
@@ -156,13 +156,13 @@ static int vcn_v2_0_sw_init(void *handle)
        if (r)
                return r;
 
-       ring = &adev->vcn.ring_dec;
+       ring = &adev->vcn.inst->ring_dec;
 
        ring->use_doorbell = true;
        ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
 
        sprintf(ring->name, "vcn_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
        if (r)
                return r;
 
@@ -174,38 +174,38 @@ static int vcn_v2_0_sw_init(void *handle)
        adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
 
        adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
-       adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
+       adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_SCRATCH9);
        adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
-       adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_GPCOM_VCPU_DATA0);
+       adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_GPCOM_VCPU_DATA0);
        adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
-       adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_GPCOM_VCPU_DATA1);
+       adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_GPCOM_VCPU_DATA1);
        adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
-       adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
+       adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_GPCOM_VCPU_CMD);
        adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
-       adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
+       adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst->ring_enc[i];
                ring->use_doorbell = true;
                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 
1) + 2 + i;
                sprintf(ring->name, "vcn_enc%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
                if (r)
                        return r;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        ring->use_doorbell = true;
        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
        sprintf(ring->name, "vcn_jpeg");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
        if (r)
                return r;
 
        adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
 
        adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
-       adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_JPEG_PITCH);
+       adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_JPEG_PITCH);
 
        return 0;
 }
@@ -241,7 +241,7 @@ static int vcn_v2_0_sw_fini(void *handle)
 static int vcn_v2_0_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        int i, r;
 
        adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
@@ -255,7 +255,7 @@ static int vcn_v2_0_hw_init(void *handle)
        }
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst->ring_enc[i];
                ring->sched.ready = true;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
@@ -264,7 +264,7 @@ static int vcn_v2_0_hw_init(void *handle)
                }
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        ring->sched.ready = true;
        r = amdgpu_ring_test_ring(ring);
        if (r) {
@@ -290,7 +290,7 @@ static int vcn_v2_0_hw_init(void *handle)
 static int vcn_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        int i;
 
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
@@ -301,11 +301,11 @@ static int vcn_v2_0_hw_fini(void *handle)
        ring->sched.ready = false;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst->ring_enc[i];
                ring->sched.ready = false;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        ring->sched.ready = false;
 
        return 0;
@@ -375,9 +375,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
                offset = 0;
        } else {
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-                       lower_32_bits(adev->vcn.gpu_addr));
+                       lower_32_bits(adev->vcn.inst->gpu_addr));
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-                       upper_32_bits(adev->vcn.gpu_addr));
+                       upper_32_bits(adev->vcn.inst->gpu_addr));
                offset = size;
                /* No signed header for now from firmware
                WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
@@ -390,17 +390,17 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
 
        /* cache window 1: stack */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-               lower_32_bits(adev->vcn.gpu_addr + offset));
+               lower_32_bits(adev->vcn.inst->gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-               upper_32_bits(adev->vcn.gpu_addr + offset));
+               upper_32_bits(adev->vcn.inst->gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 
        /* cache window 2: context */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-               lower_32_bits(adev->vcn.gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
+               lower_32_bits(adev->vcn.inst->gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-               upper_32_bits(adev->vcn.gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
+               upper_32_bits(adev->vcn.inst->gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 
@@ -436,10 +436,10 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct 
amdgpu_device *adev, bool indirec
        } else {
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
-                       lower_32_bits(adev->vcn.gpu_addr), 0, indirect);
+                       lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
-                       upper_32_bits(adev->vcn.gpu_addr), 0, indirect);
+                       upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
                offset = size;
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
@@ -457,10 +457,10 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct 
amdgpu_device *adev, bool indirec
        if (!indirect) {
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
-                       lower_32_bits(adev->vcn.gpu_addr + offset), 0, 
indirect);
+                       lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, 
indirect);
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
-                       upper_32_bits(adev->vcn.gpu_addr + offset), 0, 
indirect);
+                       upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, 
indirect);
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
        } else {
@@ -477,10 +477,10 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct 
amdgpu_device *adev, bool indirec
        /* cache window 2: context */
        WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
-               lower_32_bits(adev->vcn.gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE), 0, indirect);
+               lower_32_bits(adev->vcn.inst->gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
-               upper_32_bits(adev->vcn.gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE), 0, indirect);
+               upper_32_bits(adev->vcn.inst->gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
@@ -668,7 +668,7 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct 
amdgpu_device *adev,
  */
 static int jpeg_v2_0_start(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_jpeg;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg;
        uint32_t tmp;
        int r = 0;
 
@@ -930,7 +930,7 @@ static void vcn_v2_0_enable_static_power_gating(struct 
amdgpu_device *adev)
 
 static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
 
        vcn_v2_0_enable_static_power_gating(adev);
@@ -1056,7 +1056,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device 
*adev, bool indirect)
 
 static int vcn_v2_0_start(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
        uint32_t lmi_swap_cntl;
        int i, j, r;
@@ -1207,14 +1207,14 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                        lower_32_bits(ring->wptr));
 
-       ring = &adev->vcn.ring_enc[0];
+       ring = &adev->vcn.inst->ring_enc[0];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 
-       ring = &adev->vcn.ring_enc[1];
+       ring = &adev->vcn.inst->ring_enc[1];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
@@ -1361,14 +1361,14 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device 
*adev,
                                           
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 
                                /* Restore */
-                               ring = &adev->vcn.ring_enc[0];
+                               ring = &adev->vcn.inst->ring_enc[0];
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, 
ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, 
upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, 
ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, 
lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, 
lower_32_bits(ring->wptr));
 
-                               ring = &adev->vcn.ring_enc[1];
+                               ring = &adev->vcn.inst->ring_enc[1];
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, 
ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, 
upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, 
ring->ring_size / 4);
@@ -1660,7 +1660,7 @@ static uint64_t vcn_v2_0_enc_ring_get_rptr(struct 
amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0])
+       if (ring == &adev->vcn.inst->ring_enc[0])
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
        else
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
@@ -1677,7 +1677,7 @@ static uint64_t vcn_v2_0_enc_ring_get_wptr(struct 
amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0]) {
+       if (ring == &adev->vcn.inst->ring_enc[0]) {
                if (ring->use_doorbell)
                        return adev->wb.wb[ring->wptr_offs];
                else
@@ -1701,7 +1701,7 @@ static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring 
*ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0]) {
+       if (ring == &adev->vcn.inst->ring_enc[0]) {
                if (ring->use_doorbell) {
                        adev->wb.wb[ring->wptr_offs] = 
lower_32_bits(ring->wptr);
                        WDOORBELL32(ring->doorbell_index, 
lower_32_bits(ring->wptr));
@@ -2075,16 +2075,16 @@ static int vcn_v2_0_process_interrupt(struct 
amdgpu_device *adev,
 
        switch (entry->src_id) {
        case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
-               amdgpu_fence_process(&adev->vcn.ring_dec);
+               amdgpu_fence_process(&adev->vcn.inst->ring_dec);
                break;
        case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
-               amdgpu_fence_process(&adev->vcn.ring_enc[0]);
+               amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
                break;
        case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
-               amdgpu_fence_process(&adev->vcn.ring_enc[1]);
+               amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
                break;
        case VCN_2_0__SRCID__JPEG_DECODE:
-               amdgpu_fence_process(&adev->vcn.ring_jpeg);
+               amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
                break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -2233,7 +2233,7 @@ static const struct amdgpu_ring_funcs 
vcn_v2_0_jpeg_ring_vm_funcs = {
 
 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
+       adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
        DRM_INFO("VCN decode is enabled in VM mode\n");
 }
 
@@ -2242,14 +2242,14 @@ static void vcn_v2_0_set_enc_ring_funcs(struct 
amdgpu_device *adev)
        int i;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
-               adev->vcn.ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
+               adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
 
        DRM_INFO("VCN encode is enabled in VM mode\n");
 }
 
 static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs;
+       adev->vcn.inst->ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs;
        DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
 }
 
@@ -2260,8 +2260,8 @@ static const struct amdgpu_irq_src_funcs 
vcn_v2_0_irq_funcs = {
 
 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
-       adev->vcn.irq.funcs = &vcn_v2_0_irq_funcs;
+       adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+       adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
 }
 
 const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 75fdb6881ac0..e27351267c9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -66,6 +66,7 @@ static int vcn_v2_5_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vcn.num_vcn_inst = 1;
        adev->vcn.num_enc_rings = 2;
 
        vcn_v2_5_set_dec_ring_funcs(adev);
@@ -91,21 +92,21 @@ static int vcn_v2_5_sw_init(void *handle)
 
        /* VCN DEC TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-                       VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, 
&adev->vcn.irq);
+                       VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, 
&adev->vcn.inst[0].irq);
        if (r)
                return r;
 
        /* VCN ENC TRAP */
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-                       i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, 
&adev->vcn.irq);
+                       i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, 
&adev->vcn.inst[0].irq);
                if (r)
                        return r;
        }
 
        /* VCN JPEG TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-                       VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.irq);
+                       VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[0].irq);
        if (r)
                return r;
 
@@ -127,11 +128,11 @@ static int vcn_v2_5_sw_init(void *handle)
        if (r)
                return r;
 
-       ring = &adev->vcn.ring_dec;
+       ring = &adev->vcn.inst[0].ring_dec;
        ring->use_doorbell = true;
        ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
        sprintf(ring->name, "vcn_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0);
        if (r)
                return r;
 
@@ -143,36 +144,36 @@ static int vcn_v2_5_sw_init(void *handle)
        adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
 
        adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
-       adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
+       adev->vcn.inst[0].external.scratch9 = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_SCRATCH9);
        adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
-       adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_GPCOM_VCPU_DATA0);
+       adev->vcn.inst[0].external.data0 = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_GPCOM_VCPU_DATA0);
        adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
-       adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_GPCOM_VCPU_DATA1);
+       adev->vcn.inst[0].external.data1 = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_GPCOM_VCPU_DATA1);
        adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
-       adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
+       adev->vcn.inst[0].external.cmd = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_GPCOM_VCPU_CMD);
        adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
-       adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
+       adev->vcn.inst[0].external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst[0].ring_enc[i];
                ring->use_doorbell = true;
                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 
1) + 2 + i;
                sprintf(ring->name, "vcn_enc%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 
0);
                if (r)
                        return r;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst[0].ring_jpeg;
        ring->use_doorbell = true;
        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
        sprintf(ring->name, "vcn_jpeg");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0);
        if (r)
                return r;
 
        adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
-       adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_JPEG_PITCH);
+       adev->vcn.inst[0].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, 
mmUVD_JPEG_PITCH);
 
        return 0;
 }
@@ -208,7 +209,7 @@ static int vcn_v2_5_sw_fini(void *handle)
 static int vcn_v2_5_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec;
        int i, r;
 
        adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
@@ -221,7 +222,7 @@ static int vcn_v2_5_hw_init(void *handle)
        }
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst[0].ring_enc[i];
                ring->sched.ready = false;
                continue;
                r = amdgpu_ring_test_ring(ring);
@@ -231,7 +232,7 @@ static int vcn_v2_5_hw_init(void *handle)
                }
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst[0].ring_jpeg;
        r = amdgpu_ring_test_ring(ring);
        if (r) {
                ring->sched.ready = false;
@@ -255,7 +256,7 @@ static int vcn_v2_5_hw_init(void *handle)
 static int vcn_v2_5_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec;
        int i;
 
        if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
@@ -264,11 +265,11 @@ static int vcn_v2_5_hw_fini(void *handle)
        ring->sched.ready = false;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst[0].ring_enc[i];
                ring->sched.ready = false;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst[0].ring_jpeg;
        ring->sched.ready = false;
 
        return 0;
@@ -338,9 +339,9 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
                offset = 0;
        } else {
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-                       lower_32_bits(adev->vcn.gpu_addr));
+                       lower_32_bits(adev->vcn.inst[0].gpu_addr));
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-                       upper_32_bits(adev->vcn.gpu_addr));
+                       upper_32_bits(adev->vcn.inst[0].gpu_addr));
                offset = size;
                /* No signed header for now from firmware
                WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
@@ -352,17 +353,17 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
 
        /* cache window 1: stack */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-               lower_32_bits(adev->vcn.gpu_addr + offset));
+               lower_32_bits(adev->vcn.inst[0].gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-               upper_32_bits(adev->vcn.gpu_addr + offset));
+               upper_32_bits(adev->vcn.inst[0].gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 
        /* cache window 2: context */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-               lower_32_bits(adev->vcn.gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
+               lower_32_bits(adev->vcn.inst[0].gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-               upper_32_bits(adev->vcn.gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
+               upper_32_bits(adev->vcn.inst[0].gpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 }
@@ -548,7 +549,7 @@ static void vcn_v2_5_enable_clock_gating(struct 
amdgpu_device *adev)
  */
 static int jpeg_v2_5_start(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_jpeg;
+       struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_jpeg;
        uint32_t tmp;
 
        /* disable anti hang mechanism */
@@ -639,7 +640,7 @@ static int jpeg_v2_5_stop(struct amdgpu_device *adev)
 
 static int vcn_v2_5_start(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec;
        uint32_t rb_bufsz, tmp;
        int i, j, r;
 
@@ -781,14 +782,14 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
        ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                        lower_32_bits(ring->wptr));
-       ring = &adev->vcn.ring_enc[0];
+       ring = &adev->vcn.inst[0].ring_enc[0];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 
-       ring = &adev->vcn.ring_enc[1];
+       ring = &adev->vcn.inst[0].ring_enc[1];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
@@ -951,7 +952,7 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct 
amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0])
+       if (ring == &adev->vcn.inst[0].ring_enc[0])
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
        else
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
@@ -968,7 +969,7 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct 
amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0]) {
+       if (ring == &adev->vcn.inst[0].ring_enc[0]) {
                if (ring->use_doorbell)
                        return adev->wb.wb[ring->wptr_offs];
                else
@@ -992,7 +993,7 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring 
*ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0]) {
+       if (ring == &adev->vcn.inst[0].ring_enc[0]) {
                if (ring->use_doorbell) {
                        adev->wb.wb[ring->wptr_offs] = 
lower_32_bits(ring->wptr);
                        WDOORBELL32(ring->doorbell_index, 
lower_32_bits(ring->wptr));
@@ -1121,7 +1122,7 @@ static const struct amdgpu_ring_funcs 
vcn_v2_5_jpeg_ring_vm_funcs = {
 
 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
+       adev->vcn.inst[0].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
        DRM_INFO("VCN decode is enabled in VM mode\n");
 }
 
@@ -1130,14 +1131,14 @@ static void vcn_v2_5_set_enc_ring_funcs(struct 
amdgpu_device *adev)
        int i;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
-               adev->vcn.ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
+               adev->vcn.inst[0].ring_enc[i].funcs = 
&vcn_v2_5_enc_ring_vm_funcs;
 
        DRM_INFO("VCN encode is enabled in VM mode\n");
 }
 
 static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
+       adev->vcn.inst[0].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
        DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
 }
 
@@ -1212,16 +1213,16 @@ static int vcn_v2_5_process_interrupt(struct 
amdgpu_device *adev,
 
        switch (entry->src_id) {
        case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
-               amdgpu_fence_process(&adev->vcn.ring_dec);
+               amdgpu_fence_process(&adev->vcn.inst[0].ring_dec);
                break;
        case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
-               amdgpu_fence_process(&adev->vcn.ring_enc[0]);
+               amdgpu_fence_process(&adev->vcn.inst[0].ring_enc[0]);
                break;
        case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
-               amdgpu_fence_process(&adev->vcn.ring_enc[1]);
+               amdgpu_fence_process(&adev->vcn.inst[0].ring_enc[1]);
                break;
        case VCN_2_0__SRCID__JPEG_DECODE:
-               amdgpu_fence_process(&adev->vcn.ring_jpeg);
+               amdgpu_fence_process(&adev->vcn.inst[0].ring_jpeg);
                break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -1239,8 +1240,8 @@ static const struct amdgpu_irq_src_funcs 
vcn_v2_5_irq_funcs = {
 
 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
-       adev->vcn.irq.funcs = &vcn_v2_5_irq_funcs;
+       adev->vcn.inst[0].irq.num_types = adev->vcn.num_enc_rings + 2;
+       adev->vcn.inst[0].irq.funcs = &vcn_v2_5_irq_funcs;
 }
 
 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
-- 
2.20.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to