On 09.10.2015 00:15, Alex Deucher wrote:
> Rework the sdma structures in the driver to
> consolidate all of the sdma info into a single
> structure and allow for asics that may have
> different numbers of sdma instances.
>
> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>

Reviewed-by: Christian König <christian.koenig at amd.com>

> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h               |  22 +--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c |   4 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c |   4 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c            |   7 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c           |  10 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c          |   4 +-
>   drivers/gpu/drm/amd/amdgpu/cik_sdma.c             | 130 ++++++++---------
>   drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c            | 156 ++++++++++----------
>   drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c            | 166 
> +++++++++++-----------
>   9 files changed, 245 insertions(+), 258 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 6647fb2..afc9848 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1708,7 +1708,7 @@ struct amdgpu_vce {
>   /*
>    * SDMA
>    */
> -struct amdgpu_sdma {
> +struct amdgpu_sdma_instance {
>       /* SDMA firmware */
>       const struct firmware   *fw;
>       uint32_t                fw_version;
> @@ -1718,6 +1718,13 @@ struct amdgpu_sdma {
>       bool                    burst_nop;
>   };
>   
> +struct amdgpu_sdma {
> +     struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
> +     struct amdgpu_irq_src   trap_irq;
> +     struct amdgpu_irq_src   illegal_inst_irq;
> +     int                     num_instances;
> +};
> +
>   /*
>    * Firmware
>    */
> @@ -2064,9 +2071,7 @@ struct amdgpu_device {
>       struct amdgpu_gfx               gfx;
>   
>       /* sdma */
> -     struct amdgpu_sdma              sdma[AMDGPU_MAX_SDMA_INSTANCES];
> -     struct amdgpu_irq_src           sdma_trap_irq;
> -     struct amdgpu_irq_src           sdma_illegal_inst_irq;
> +     struct amdgpu_sdma              sdma;
>   
>       /* uvd */
>       bool                            has_uvd;
> @@ -2203,17 +2208,18 @@ static inline void amdgpu_ring_write(struct 
> amdgpu_ring *ring, uint32_t v)
>       ring->ring_free_dw--;
>   }
>   
> -static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct 
> amdgpu_ring *ring)
> +static inline struct amdgpu_sdma_instance *
> +amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
>   {
>       struct amdgpu_device *adev = ring->adev;
>       int i;
>   
> -     for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++)
> -             if (&adev->sdma[i].ring == ring)
> +     for (i = 0; i < adev->sdma.num_instances; i++)
> +             if (&adev->sdma.instance[i].ring == ring)
>                       break;
>   
>       if (i < AMDGPU_MAX_SDMA_INSTANCES)
> -             return &adev->sdma[i];
> +             return &adev->sdma.instance[i];
>       else
>               return NULL;
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
> index dd2037b..0e13763 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
> @@ -649,12 +649,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, 
> enum kgd_engine_type type)
>   
>       case KGD_ENGINE_SDMA1:
>               hdr = (const union amdgpu_firmware_header *)
> -                                                     adev->sdma[0].fw->data;
> +                                                     
> adev->sdma.instance[0].fw->data;
>               break;
>   
>       case KGD_ENGINE_SDMA2:
>               hdr = (const union amdgpu_firmware_header *)
> -                                                     adev->sdma[1].fw->data;
> +                                                     
> adev->sdma.instance[1].fw->data;
>               break;
>   
>       default:
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
> index dfd1d50..79fa5c7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
> @@ -523,12 +523,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, 
> enum kgd_engine_type type)
>   
>       case KGD_ENGINE_SDMA1:
>               hdr = (const union amdgpu_firmware_header *)
> -                                                     adev->sdma[0].fw->data;
> +                                                     
> adev->sdma.instance[0].fw->data;
>               break;
>   
>       case KGD_ENGINE_SDMA2:
>               hdr = (const union amdgpu_firmware_header *)
> -                                                     adev->sdma[1].fw->data;
> +                                                     
> adev->sdma.instance[1].fw->data;
>               break;
>   
>       default:
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 749420f..29fc45c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -104,10 +104,11 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 
> ip_type,
>               }
>               break;
>       case AMDGPU_HW_IP_DMA:
> -             if (ring < 2) {
> -                     *out_ring = &adev->sdma[ring].ring;
> +             if (ring < adev->sdma.num_instances) {
> +                     *out_ring = &adev->sdma.instance[ring].ring;
>               } else {
> -                     DRM_ERROR("only two SDMA rings are supported\n");
> +                     DRM_ERROR("only %d SDMA rings are supported\n",
> +                               adev->sdma.num_instances);
>                       return -EINVAL;
>               }
>               break;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 7823322..3f5f2d5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -218,8 +218,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void 
> *data, struct drm_file
>                       break;
>               case AMDGPU_HW_IP_DMA:
>                       type = AMD_IP_BLOCK_TYPE_SDMA;
> -                     ring_mask = adev->sdma[0].ring.ready ? 1 : 0;
> -                     ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1);
> +                     for (i = 0; i < adev->sdma.num_instances; i++)
> +                             ring_mask |= 
> ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
>                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
>                       ib_size_alignment = 1;
>                       break;
> @@ -341,10 +341,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, 
> void *data, struct drm_file
>                       fw_info.feature = 0;
>                       break;
>               case AMDGPU_INFO_FW_SDMA:
> -                     if (info->query_fw.index >= 2)
> +                     if (info->query_fw.index >= adev->sdma.num_instances)
>                               return -EINVAL;
> -                     fw_info.ver = 
> adev->sdma[info->query_fw.index].fw_version;
> -                     fw_info.feature = 
> adev->sdma[info->query_fw.index].feature_version;
> +                     fw_info.ver = 
> adev->sdma.instance[info->query_fw.index].fw_version;
> +                     fw_info.feature = 
> adev->sdma.instance[info->query_fw.index].feature_version;
>                       break;
>               default:
>                       return -EINVAL;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> index 30dce23..b13a74b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> @@ -540,8 +540,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, 
> void *data)
>   static int amdgpu_gfx_index = offsetof(struct amdgpu_device, 
> gfx.gfx_ring[0]);
>   static int cayman_cp1_index = offsetof(struct amdgpu_device, 
> gfx.compute_ring[0]);
>   static int cayman_cp2_index = offsetof(struct amdgpu_device, 
> gfx.compute_ring[1]);
> -static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring);
> -static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring);
> +static int amdgpu_dma1_index = offsetof(struct amdgpu_device, 
> sdma.instance[0].ring);
> +static int amdgpu_dma2_index = offsetof(struct amdgpu_device, 
> sdma.instance[1].ring);
>   static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
>   static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
>   static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
> diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
> b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> index 9ea9de4..814598e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> @@ -96,7 +96,7 @@ static int cik_sdma_init_microcode(struct amdgpu_device 
> *adev)
>   {
>       const char *chip_name;
>       char fw_name[30];
> -     int err, i;
> +     int err = 0, i;
>   
>       DRM_DEBUG("\n");
>   
> @@ -119,24 +119,24 @@ static int cik_sdma_init_microcode(struct amdgpu_device 
> *adev)
>       default: BUG();
>       }
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               if (i == 0)
>                       snprintf(fw_name, sizeof(fw_name), 
> "radeon/%s_sdma.bin", chip_name);
>               else
>                       snprintf(fw_name, sizeof(fw_name), 
> "radeon/%s_sdma1.bin", chip_name);
> -             err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
> +             err = request_firmware(&adev->sdma.instance[i].fw, fw_name, 
> adev->dev);
>               if (err)
>                       goto out;
> -             err = amdgpu_ucode_validate(adev->sdma[i].fw);
> +             err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
>       }
>   out:
>       if (err) {
>               printk(KERN_ERR
>                      "cik_sdma: Failed to load firmware \"%s\"\n",
>                      fw_name);
> -             for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> -                     release_firmware(adev->sdma[i].fw);
> -                     adev->sdma[i].fw = NULL;
> +             for (i = 0; i < adev->sdma.num_instances; i++) {
> +                     release_firmware(adev->sdma.instance[i].fw);
> +                     adev->sdma.instance[i].fw = NULL;
>               }
>       }
>       return err;
> @@ -168,7 +168,7 @@ static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring 
> *ring)
>   static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
>   {
>       struct amdgpu_device *adev = ring->adev;
> -     u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
> +     u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
>   
>       return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
>   }
> @@ -183,14 +183,14 @@ static uint32_t cik_sdma_ring_get_wptr(struct 
> amdgpu_ring *ring)
>   static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
>   {
>       struct amdgpu_device *adev = ring->adev;
> -     u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
> +     u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
>   
>       WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 
> 0x3fffc);
>   }
>   
>   static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t 
> count)
>   {
> -     struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
> +     struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
>       int i;
>   
>       for (i = 0; i < count; i++)
> @@ -248,7 +248,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct 
> amdgpu_ring *ring)
>                         SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
>       u32 ref_and_mask;
>   
> -     if (ring == &ring->adev->sdma[0].ring)
> +     if (ring == &ring->adev->sdma.instance[0].ring)
>               ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
>       else
>               ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
> @@ -327,8 +327,8 @@ static bool cik_sdma_ring_emit_semaphore(struct 
> amdgpu_ring *ring,
>    */
>   static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
>   {
> -     struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
> -     struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
> +     struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
> +     struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
>       u32 rb_cntl;
>       int i;
>   
> @@ -336,7 +336,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
>           (adev->mman.buffer_funcs_ring == sdma1))
>               amdgpu_ttm_set_active_vram_size(adev, 
> adev->mc.visible_vram_size);
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
>               rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
>               WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
> @@ -376,7 +376,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, 
> bool enable)
>               cik_sdma_rlc_stop(adev);
>       }
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
>               if (enable)
>                       me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
> @@ -402,8 +402,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
>       u32 wb_offset;
>       int i, j, r;
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> -             ring = &adev->sdma[i].ring;
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
> +             ring = &adev->sdma.instance[i].ring;
>               wb_offset = (ring->rptr_offs * 4);
>   
>               mutex_lock(&adev->srbm_mutex);
> @@ -502,26 +502,25 @@ static int cik_sdma_load_microcode(struct amdgpu_device 
> *adev)
>       u32 fw_size;
>       int i, j;
>   
> -     if (!adev->sdma[0].fw || !adev->sdma[1].fw)
> -             return -EINVAL;
> -
>       /* halt the MEs */
>       cik_sdma_enable(adev, false);
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> -             hdr = (const struct sdma_firmware_header_v1_0 
> *)adev->sdma[i].fw->data;
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
> +             if (!adev->sdma.instance[i].fw)
> +                     return -EINVAL;
> +             hdr = (const struct sdma_firmware_header_v1_0 
> *)adev->sdma.instance[i].fw->data;
>               amdgpu_ucode_print_sdma_hdr(&hdr->header);
>               fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
> -             adev->sdma[i].fw_version = 
> le32_to_cpu(hdr->header.ucode_version);
> -             adev->sdma[i].feature_version = 
> le32_to_cpu(hdr->ucode_feature_version);
> -             if (adev->sdma[i].feature_version >= 20)
> -                     adev->sdma[i].burst_nop = true;
> +             adev->sdma.instance[i].fw_version = 
> le32_to_cpu(hdr->header.ucode_version);
> +             adev->sdma.instance[i].feature_version = 
> le32_to_cpu(hdr->ucode_feature_version);
> +             if (adev->sdma.instance[i].feature_version >= 20)
> +                     adev->sdma.instance[i].burst_nop = true;
>               fw_data = (const __le32 *)
> -                     (adev->sdma[i].fw->data + 
> le32_to_cpu(hdr->header.ucode_array_offset_bytes));
> +                     (adev->sdma.instance[i].fw->data + 
> le32_to_cpu(hdr->header.ucode_array_offset_bytes));
>               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
>               for (j = 0; j < fw_size; j++)
>                       WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], 
> le32_to_cpup(fw_data++));
> -             WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 
> adev->sdma[i].fw_version);
> +             WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 
> adev->sdma.instance[i].fw_version);
>       }
>   
>       return 0;
> @@ -830,7 +829,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
>    */
>   static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
>   {
> -     struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
> +     struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
>       u32 pad_count;
>       int i;
>   
> @@ -934,6 +933,8 @@ static int cik_sdma_early_init(void *handle)
>   {
>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>   
> +     adev->sdma.num_instances = SDMA_MAX_INSTANCE;
> +
>       cik_sdma_set_ring_funcs(adev);
>       cik_sdma_set_irq_funcs(adev);
>       cik_sdma_set_buffer_funcs(adev);
> @@ -946,7 +947,7 @@ static int cik_sdma_sw_init(void *handle)
>   {
>       struct amdgpu_ring *ring;
>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> -     int r;
> +     int r, i;
>   
>       r = cik_sdma_init_microcode(adev);
>       if (r) {
> @@ -955,43 +956,33 @@ static int cik_sdma_sw_init(void *handle)
>       }
>   
>       /* SDMA trap event */
> -     r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
> +     r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
>       if (r)
>               return r;
>   
>       /* SDMA Privileged inst */
> -     r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
> +     r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
>       if (r)
>               return r;
>   
>       /* SDMA Privileged inst */
> -     r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
> -     if (r)
> -             return r;
> -
> -     ring = &adev->sdma[0].ring;
> -     ring->ring_obj = NULL;
> -
> -     ring = &adev->sdma[1].ring;
> -     ring->ring_obj = NULL;
> -
> -     ring = &adev->sdma[0].ring;
> -     sprintf(ring->name, "sdma0");
> -     r = amdgpu_ring_init(adev, ring, 256 * 1024,
> -                          SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
> -                          &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
> -                          AMDGPU_RING_TYPE_SDMA);
> +     r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
>       if (r)
>               return r;
>   
> -     ring = &adev->sdma[1].ring;
> -     sprintf(ring->name, "sdma1");
> -     r = amdgpu_ring_init(adev, ring, 256 * 1024,
> -                          SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
> -                          &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
> -                          AMDGPU_RING_TYPE_SDMA);
> -     if (r)
> -             return r;
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
> +             ring = &adev->sdma.instance[i].ring;
> +             ring->ring_obj = NULL;
> +             sprintf(ring->name, "sdma%d", i);
> +             r = amdgpu_ring_init(adev, ring, 256 * 1024,
> +                                  SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
> +                                  &adev->sdma.trap_irq,
> +                                  (i == 0) ?
> +                                  AMDGPU_SDMA_IRQ_TRAP0 : 
> AMDGPU_SDMA_IRQ_TRAP1,
> +                                  AMDGPU_RING_TYPE_SDMA);
> +             if (r)
> +                     return r;
> +     }
>   
>       return r;
>   }
> @@ -999,9 +990,10 @@ static int cik_sdma_sw_init(void *handle)
>   static int cik_sdma_sw_fini(void *handle)
>   {
>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> +     int i;
>   
> -     amdgpu_ring_fini(&adev->sdma[0].ring);
> -     amdgpu_ring_fini(&adev->sdma[1].ring);
> +     for (i = 0; i < adev->sdma.num_instances; i++)
> +             amdgpu_ring_fini(&adev->sdma.instance[i].ring);
>   
>       return 0;
>   }
> @@ -1078,7 +1070,7 @@ static void cik_sdma_print_status(void *handle)
>       dev_info(adev->dev, "CIK SDMA registers\n");
>       dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
>                RREG32(mmSRBM_STATUS2));
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
>                        i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
>               dev_info(adev->dev, "  SDMA%d_ME_CNTL=0x%08X\n",
> @@ -1223,7 +1215,7 @@ static int cik_sdma_process_trap_irq(struct 
> amdgpu_device *adev,
>       case 0:
>               switch (queue_id) {
>               case 0:
> -                     amdgpu_fence_process(&adev->sdma[0].ring);
> +                     amdgpu_fence_process(&adev->sdma.instance[0].ring);
>                       break;
>               case 1:
>                       /* XXX compute */
> @@ -1236,7 +1228,7 @@ static int cik_sdma_process_trap_irq(struct 
> amdgpu_device *adev,
>       case 1:
>               switch (queue_id) {
>               case 0:
> -                     amdgpu_fence_process(&adev->sdma[1].ring);
> +                     amdgpu_fence_process(&adev->sdma.instance[1].ring);
>                       break;
>               case 1:
>                       /* XXX compute */
> @@ -1334,8 +1326,10 @@ static const struct amdgpu_ring_funcs 
> cik_sdma_ring_funcs = {
>   
>   static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
>   {
> -     adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs;
> -     adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs;
> +     int i;
> +
> +     for (i = 0; i < adev->sdma.num_instances; i++)
> +             adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
>   }
>   
>   static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
> @@ -1349,9 +1343,9 @@ static const struct amdgpu_irq_src_funcs 
> cik_sdma_illegal_inst_irq_funcs = {
>   
>   static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
>   {
> -     adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
> -     adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs;
> -     adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
> +     adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
> +     adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
> +     adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
>   }
>   
>   /**
> @@ -1416,7 +1410,7 @@ static void cik_sdma_set_buffer_funcs(struct 
> amdgpu_device *adev)
>   {
>       if (adev->mman.buffer_funcs == NULL) {
>               adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
> -             adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
> +             adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
>       }
>   }
>   
> @@ -1431,7 +1425,7 @@ static void cik_sdma_set_vm_pte_funcs(struct 
> amdgpu_device *adev)
>   {
>       if (adev->vm_manager.vm_pte_funcs == NULL) {
>               adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
> -             adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
> +             adev->vm_manager.vm_pte_funcs_ring = 
> &adev->sdma.instance[0].ring;
>               adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
>       }
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 
> b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> index 14e8723..f8b868c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> @@ -118,7 +118,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device 
> *adev)
>   {
>       const char *chip_name;
>       char fw_name[30];
> -     int err, i;
> +     int err = 0, i;
>       struct amdgpu_firmware_info *info = NULL;
>       const struct common_firmware_header *header = NULL;
>       const struct sdma_firmware_header_v1_0 *hdr;
> @@ -132,27 +132,27 @@ static int sdma_v2_4_init_microcode(struct 
> amdgpu_device *adev)
>       default: BUG();
>       }
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               if (i == 0)
>                       snprintf(fw_name, sizeof(fw_name), 
> "amdgpu/%s_sdma.bin", chip_name);
>               else
>                       snprintf(fw_name, sizeof(fw_name), 
> "amdgpu/%s_sdma1.bin", chip_name);
> -             err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
> +             err = request_firmware(&adev->sdma.instance[i].fw, fw_name, 
> adev->dev);
>               if (err)
>                       goto out;
> -             err = amdgpu_ucode_validate(adev->sdma[i].fw);
> +             err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
>               if (err)
>                       goto out;
> -             hdr = (const struct sdma_firmware_header_v1_0 
> *)adev->sdma[i].fw->data;
> -             adev->sdma[i].fw_version = 
> le32_to_cpu(hdr->header.ucode_version);
> -             adev->sdma[i].feature_version = 
> le32_to_cpu(hdr->ucode_feature_version);
> -             if (adev->sdma[i].feature_version >= 20)
> -                     adev->sdma[i].burst_nop = true;
> +             hdr = (const struct sdma_firmware_header_v1_0 
> *)adev->sdma.instance[i].fw->data;
> +             adev->sdma.instance[i].fw_version = 
> le32_to_cpu(hdr->header.ucode_version);
> +             adev->sdma.instance[i].feature_version = 
> le32_to_cpu(hdr->ucode_feature_version);
> +             if (adev->sdma.instance[i].feature_version >= 20)
> +                     adev->sdma.instance[i].burst_nop = true;
>   
>               if (adev->firmware.smu_load) {
>                       info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
>                       info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
> -                     info->fw = adev->sdma[i].fw;
> +                     info->fw = adev->sdma.instance[i].fw;
>                       header = (const struct common_firmware_header 
> *)info->fw->data;
>                       adev->firmware.fw_size +=
>                               ALIGN(le32_to_cpu(header->ucode_size_bytes), 
> PAGE_SIZE);
> @@ -164,9 +164,9 @@ out:
>               printk(KERN_ERR
>                      "sdma_v2_4: Failed to load firmware \"%s\"\n",
>                      fw_name);
> -             for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> -                     release_firmware(adev->sdma[i].fw);
> -                     adev->sdma[i].fw = NULL;
> +             for (i = 0; i < adev->sdma.num_instances; i++) {
> +                     release_firmware(adev->sdma.instance[i].fw);
> +                     adev->sdma.instance[i].fw = NULL;
>               }
>       }
>       return err;
> @@ -199,7 +199,7 @@ static uint32_t sdma_v2_4_ring_get_rptr(struct 
> amdgpu_ring *ring)
>   static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
>   {
>       struct amdgpu_device *adev = ring->adev;
> -     int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
> +     int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
>       u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
>   
>       return wptr;
> @@ -215,14 +215,14 @@ static uint32_t sdma_v2_4_ring_get_wptr(struct 
> amdgpu_ring *ring)
>   static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
>   {
>       struct amdgpu_device *adev = ring->adev;
> -     int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
> +     int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
>   
>       WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
>   }
>   
>   static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t 
> count)
>   {
> -     struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
> +     struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
>       int i;
>   
>       for (i = 0; i < count; i++)
> @@ -284,7 +284,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct 
> amdgpu_ring *ring)
>   {
>       u32 ref_and_mask = 0;
>   
> -     if (ring == &ring->adev->sdma[0].ring)
> +     if (ring == &ring->adev->sdma.instance[0].ring)
>               ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, 
> SDMA0, 1);
>       else
>               ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, 
> SDMA1, 1);
> @@ -368,8 +368,8 @@ static bool sdma_v2_4_ring_emit_semaphore(struct 
> amdgpu_ring *ring,
>    */
>   static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
>   {
> -     struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
> -     struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
> +     struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
> +     struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
>       u32 rb_cntl, ib_cntl;
>       int i;
>   
> @@ -377,7 +377,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
>           (adev->mman.buffer_funcs_ring == sdma1))
>               amdgpu_ttm_set_active_vram_size(adev, 
> adev->mc.visible_vram_size);
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
>               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 
> 0);
>               WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
> @@ -419,7 +419,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, 
> bool enable)
>               sdma_v2_4_rlc_stop(adev);
>       }
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
>               if (enable)
>                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, 
> HALT, 0);
> @@ -445,8 +445,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device 
> *adev)
>       u32 wb_offset;
>       int i, j, r;
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> -             ring = &adev->sdma[i].ring;
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
> +             ring = &adev->sdma.instance[i].ring;
>               wb_offset = (ring->rptr_offs * 4);
>   
>               mutex_lock(&adev->srbm_mutex);
> @@ -545,29 +545,23 @@ static int sdma_v2_4_load_microcode(struct 
> amdgpu_device *adev)
>       const __le32 *fw_data;
>       u32 fw_size;
>       int i, j;
> -     bool smc_loads_fw = false; /* XXX fix me */
> -
> -     if (!adev->sdma[0].fw || !adev->sdma[1].fw)
> -             return -EINVAL;
>   
>       /* halt the MEs */
>       sdma_v2_4_enable(adev, false);
>   
> -     if (smc_loads_fw) {
> -             /* XXX query SMC for fw load complete */
> -     } else {
> -             for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> -                     hdr = (const struct sdma_firmware_header_v1_0 
> *)adev->sdma[i].fw->data;
> -                     amdgpu_ucode_print_sdma_hdr(&hdr->header);
> -                     fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
> -                     fw_data = (const __le32 *)
> -                             (adev->sdma[i].fw->data +
> -                              
> le32_to_cpu(hdr->header.ucode_array_offset_bytes));
> -                     WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
> -                     for (j = 0; j < fw_size; j++)
> -                             WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], 
> le32_to_cpup(fw_data++));
> -                     WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 
> adev->sdma[i].fw_version);
> -             }
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
> +             if (!adev->sdma.instance[i].fw)
> +                     return -EINVAL;
> +             hdr = (const struct sdma_firmware_header_v1_0 
> *)adev->sdma.instance[i].fw->data;
> +             amdgpu_ucode_print_sdma_hdr(&hdr->header);
> +             fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
> +             fw_data = (const __le32 *)
> +                     (adev->sdma.instance[i].fw->data +
> +                      le32_to_cpu(hdr->header.ucode_array_offset_bytes));
> +             WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
> +             for (j = 0; j < fw_size; j++)
> +                     WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], 
> le32_to_cpup(fw_data++));
> +             WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 
> adev->sdma.instance[i].fw_version);
>       }
>   
>       return 0;
> @@ -894,7 +888,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
>    */
>   static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
>   {
> -     struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
> +     struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
>       u32 pad_count;
>       int i;
>   
> @@ -952,6 +946,8 @@ static int sdma_v2_4_early_init(void *handle)
>   {
>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>   
> +     adev->sdma.num_instances = SDMA_MAX_INSTANCE;
> +
>       sdma_v2_4_set_ring_funcs(adev);
>       sdma_v2_4_set_buffer_funcs(adev);
>       sdma_v2_4_set_vm_pte_funcs(adev);
> @@ -963,21 +959,21 @@ static int sdma_v2_4_early_init(void *handle)
>   static int sdma_v2_4_sw_init(void *handle)
>   {
>       struct amdgpu_ring *ring;
> -     int r;
> +     int r, i;
>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>   
>       /* SDMA trap event */
> -     r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
> +     r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
>       if (r)
>               return r;
>   
>       /* SDMA Privileged inst */
> -     r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
> +     r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
>       if (r)
>               return r;
>   
>       /* SDMA Privileged inst */
> -     r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
> +     r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
>       if (r)
>               return r;
>   
> @@ -987,31 +983,20 @@ static int sdma_v2_4_sw_init(void *handle)
>               return r;
>       }
>   
> -     ring = &adev->sdma[0].ring;
> -     ring->ring_obj = NULL;
> -     ring->use_doorbell = false;
> -
> -     ring = &adev->sdma[1].ring;
> -     ring->ring_obj = NULL;
> -     ring->use_doorbell = false;
> -
> -     ring = &adev->sdma[0].ring;
> -     sprintf(ring->name, "sdma0");
> -     r = amdgpu_ring_init(adev, ring, 256 * 1024,
> -                          SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
> -                          &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
> -                          AMDGPU_RING_TYPE_SDMA);
> -     if (r)
> -             return r;
> -
> -     ring = &adev->sdma[1].ring;
> -     sprintf(ring->name, "sdma1");
> -     r = amdgpu_ring_init(adev, ring, 256 * 1024,
> -                          SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
> -                          &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
> -                          AMDGPU_RING_TYPE_SDMA);
> -     if (r)
> -             return r;
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
> +             ring = &adev->sdma.instance[i].ring;
> +             ring->ring_obj = NULL;
> +             ring->use_doorbell = false;
> +             sprintf(ring->name, "sdma%d", i);
> +             r = amdgpu_ring_init(adev, ring, 256 * 1024,
> +                                  SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
> +                                  &adev->sdma.trap_irq,
> +                                  (i == 0) ?
> +                                  AMDGPU_SDMA_IRQ_TRAP0 : 
> AMDGPU_SDMA_IRQ_TRAP1,
> +                                  AMDGPU_RING_TYPE_SDMA);
> +             if (r)
> +                     return r;
> +     }
>   
>       return r;
>   }
> @@ -1019,9 +1004,10 @@ static int sdma_v2_4_sw_init(void *handle)
>   static int sdma_v2_4_sw_fini(void *handle)
>   {
>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> +     int i;
>   
> -     amdgpu_ring_fini(&adev->sdma[0].ring);
> -     amdgpu_ring_fini(&adev->sdma[1].ring);
> +     for (i = 0; i < adev->sdma.num_instances; i++)
> +             amdgpu_ring_fini(&adev->sdma.instance[i].ring);
>   
>       return 0;
>   }
> @@ -1100,7 +1086,7 @@ static void sdma_v2_4_print_status(void *handle)
>       dev_info(adev->dev, "VI SDMA registers\n");
>       dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
>                RREG32(mmSRBM_STATUS2));
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
>                        i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
>               dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
> @@ -1243,7 +1229,7 @@ static int sdma_v2_4_process_trap_irq(struct 
> amdgpu_device *adev,
>       case 0:
>               switch (queue_id) {
>               case 0:
> -                     amdgpu_fence_process(&adev->sdma[0].ring);
> +                     amdgpu_fence_process(&adev->sdma.instance[0].ring);
>                       break;
>               case 1:
>                       /* XXX compute */
> @@ -1256,7 +1242,7 @@ static int sdma_v2_4_process_trap_irq(struct 
> amdgpu_device *adev,
>       case 1:
>               switch (queue_id) {
>               case 0:
> -                     amdgpu_fence_process(&adev->sdma[1].ring);
> +                     amdgpu_fence_process(&adev->sdma.instance[1].ring);
>                       break;
>               case 1:
>                       /* XXX compute */
> @@ -1345,8 +1331,10 @@ static const struct amdgpu_ring_funcs 
> sdma_v2_4_ring_funcs = {
>   
>   static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
>   {
> -     adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs;
> -     adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs;
> +     int i;
> +
> +     for (i = 0; i < adev->sdma.num_instances; i++)
> +             adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
>   }
>   
>   static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
> @@ -1360,9 +1348,9 @@ static const struct amdgpu_irq_src_funcs 
> sdma_v2_4_illegal_inst_irq_funcs = {
>   
>   static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
>   {
> -     adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
> -     adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
> -     adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
> +     adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
> +     adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
> +     adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
>   }
>   
>   /**
> @@ -1428,7 +1416,7 @@ static void sdma_v2_4_set_buffer_funcs(struct 
> amdgpu_device *adev)
>   {
>       if (adev->mman.buffer_funcs == NULL) {
>               adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
> -             adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
> +             adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
>       }
>   }
>   
> @@ -1443,7 +1431,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct 
> amdgpu_device *adev)
>   {
>       if (adev->vm_manager.vm_pte_funcs == NULL) {
>               adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
> -             adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
> +             adev->vm_manager.vm_pte_funcs_ring = 
> &adev->sdma.instance[0].ring;
>               adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
>       }
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 
> b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> index 9bfe92d..670555a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> @@ -184,7 +184,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device 
> *adev)
>   {
>       const char *chip_name;
>       char fw_name[30];
> -     int err, i;
> +     int err = 0, i;
>       struct amdgpu_firmware_info *info = NULL;
>       const struct common_firmware_header *header = NULL;
>       const struct sdma_firmware_header_v1_0 *hdr;
> @@ -204,27 +204,27 @@ static int sdma_v3_0_init_microcode(struct 
> amdgpu_device *adev)
>       default: BUG();
>       }
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               if (i == 0)
>                       snprintf(fw_name, sizeof(fw_name), 
> "amdgpu/%s_sdma.bin", chip_name);
>               else
>                       snprintf(fw_name, sizeof(fw_name), 
> "amdgpu/%s_sdma1.bin", chip_name);
> -             err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
> +             err = request_firmware(&adev->sdma.instance[i].fw, fw_name, 
> adev->dev);
>               if (err)
>                       goto out;
> -             err = amdgpu_ucode_validate(adev->sdma[i].fw);
> +             err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
>               if (err)
>                       goto out;
> -             hdr = (const struct sdma_firmware_header_v1_0 
> *)adev->sdma[i].fw->data;
> -             adev->sdma[i].fw_version = 
> le32_to_cpu(hdr->header.ucode_version);
> -             adev->sdma[i].feature_version = 
> le32_to_cpu(hdr->ucode_feature_version);
> -             if (adev->sdma[i].feature_version >= 20)
> -                     adev->sdma[i].burst_nop = true;
> +             hdr = (const struct sdma_firmware_header_v1_0 
> *)adev->sdma.instance[i].fw->data;
> +             adev->sdma.instance[i].fw_version = 
> le32_to_cpu(hdr->header.ucode_version);
> +             adev->sdma.instance[i].feature_version = 
> le32_to_cpu(hdr->ucode_feature_version);
> +             if (adev->sdma.instance[i].feature_version >= 20)
> +                     adev->sdma.instance[i].burst_nop = true;
>   
>               if (adev->firmware.smu_load) {
>                       info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
>                       info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
> -                     info->fw = adev->sdma[i].fw;
> +                     info->fw = adev->sdma.instance[i].fw;
>                       header = (const struct common_firmware_header 
> *)info->fw->data;
>                       adev->firmware.fw_size +=
>                               ALIGN(le32_to_cpu(header->ucode_size_bytes), 
> PAGE_SIZE);
> @@ -235,9 +235,9 @@ out:
>               printk(KERN_ERR
>                      "sdma_v3_0: Failed to load firmware \"%s\"\n",
>                      fw_name);
> -             for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> -                     release_firmware(adev->sdma[i].fw);
> -                     adev->sdma[i].fw = NULL;
> +             for (i = 0; i < adev->sdma.num_instances; i++) {
> +                     release_firmware(adev->sdma.instance[i].fw);
> +                     adev->sdma.instance[i].fw = NULL;
>               }
>       }
>       return err;
> @@ -276,7 +276,7 @@ static uint32_t sdma_v3_0_ring_get_wptr(struct 
> amdgpu_ring *ring)
>               /* XXX check if swapping is necessary on BE */
>               wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
>       } else {
> -             int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
> +             int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
>   
>               wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
>       }
> @@ -300,7 +300,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring 
> *ring)
>               adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
>               WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
>       } else {
> -             int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
> +             int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
>   
>               WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
>       }
> @@ -308,7 +308,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring 
> *ring)
>   
>   static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t 
> count)
>   {
> -     struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
> +     struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
>       int i;
>   
>       for (i = 0; i < count; i++)
> @@ -369,7 +369,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct 
> amdgpu_ring *ring)
>   {
>       u32 ref_and_mask = 0;
>   
> -     if (ring == &ring->adev->sdma[0].ring)
> +     if (ring == &ring->adev->sdma.instance[0].ring)
>               ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, 
> SDMA0, 1);
>       else
>               ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, 
> SDMA1, 1);
> @@ -454,8 +454,8 @@ static bool sdma_v3_0_ring_emit_semaphore(struct 
> amdgpu_ring *ring,
>    */
>   static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
>   {
> -     struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
> -     struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
> +     struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
> +     struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
>       u32 rb_cntl, ib_cntl;
>       int i;
>   
> @@ -463,7 +463,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
>           (adev->mman.buffer_funcs_ring == sdma1))
>               amdgpu_ttm_set_active_vram_size(adev, 
> adev->mc.visible_vram_size);
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
>               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 
> 0);
>               WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
> @@ -500,7 +500,7 @@ static void sdma_v3_0_ctx_switch_enable(struct 
> amdgpu_device *adev, bool enable)
>       u32 f32_cntl;
>       int i;
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
>               if (enable)
>                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
> @@ -530,7 +530,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, 
> bool enable)
>               sdma_v3_0_rlc_stop(adev);
>       }
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
>               if (enable)
>                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, 
> HALT, 0);
> @@ -557,8 +557,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device 
> *adev)
>       u32 doorbell;
>       int i, j, r;
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> -             ring = &adev->sdma[i].ring;
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
> +             ring = &adev->sdma.instance[i].ring;
>               wb_offset = (ring->rptr_offs * 4);
>   
>               mutex_lock(&adev->srbm_mutex);
> @@ -669,23 +669,22 @@ static int sdma_v3_0_load_microcode(struct 
> amdgpu_device *adev)
>       u32 fw_size;
>       int i, j;
>   
> -     if (!adev->sdma[0].fw || !adev->sdma[1].fw)
> -             return -EINVAL;
> -
>       /* halt the MEs */
>       sdma_v3_0_enable(adev, false);
>   
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> -             hdr = (const struct sdma_firmware_header_v1_0 
> *)adev->sdma[i].fw->data;
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
> +             if (!adev->sdma.instance[i].fw)
> +                     return -EINVAL;
> +             hdr = (const struct sdma_firmware_header_v1_0 
> *)adev->sdma.instance[i].fw->data;
>               amdgpu_ucode_print_sdma_hdr(&hdr->header);
>               fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
>               fw_data = (const __le32 *)
> -                     (adev->sdma[i].fw->data +
> +                     (adev->sdma.instance[i].fw->data +
>                               
> le32_to_cpu(hdr->header.ucode_array_offset_bytes));
>               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
>               for (j = 0; j < fw_size; j++)
>                       WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], 
> le32_to_cpup(fw_data++));
> -             WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 
> adev->sdma[i].fw_version);
> +             WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 
> adev->sdma.instance[i].fw_version);
>       }
>   
>       return 0;
> @@ -701,21 +700,21 @@ static int sdma_v3_0_load_microcode(struct 
> amdgpu_device *adev)
>    */
>   static int sdma_v3_0_start(struct amdgpu_device *adev)
>   {
> -     int r;
> +     int r, i;
>   
>       if (!adev->firmware.smu_load) {
>               r = sdma_v3_0_load_microcode(adev);
>               if (r)
>                       return r;
>       } else {
> -             r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
> -                                             AMDGPU_UCODE_ID_SDMA0);
> -             if (r)
> -                     return -EINVAL;
> -             r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
> -                                             AMDGPU_UCODE_ID_SDMA1);
> -             if (r)
> -                     return -EINVAL;
> +             for (i = 0; i < adev->sdma.num_instances; i++) {
> +                     r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
> +                                                                      (i == 
> 0) ?
> +                                                                      
> AMDGPU_UCODE_ID_SDMA0 :
> +                                                                      
> AMDGPU_UCODE_ID_SDMA1);
> +                     if (r)
> +                             return -EINVAL;
> +             }
>       }
>   
>       /* unhalt the MEs */
> @@ -1013,7 +1012,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib 
> *ib,
>    */
>   static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
>   {
> -     struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
> +     struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
>       u32 pad_count;
>       int i;
>   
> @@ -1071,6 +1070,12 @@ static int sdma_v3_0_early_init(void *handle)
>   {
>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>   
> +     switch (adev->asic_type) {
> +     default:
> +             adev->sdma.num_instances = SDMA_MAX_INSTANCE;
> +             break;
> +     }
> +
>       sdma_v3_0_set_ring_funcs(adev);
>       sdma_v3_0_set_buffer_funcs(adev);
>       sdma_v3_0_set_vm_pte_funcs(adev);
> @@ -1082,21 +1087,21 @@ static int sdma_v3_0_early_init(void *handle)
>   static int sdma_v3_0_sw_init(void *handle)
>   {
>       struct amdgpu_ring *ring;
> -     int r;
> +     int r, i;
>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>   
>       /* SDMA trap event */
> -     r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
> +     r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
>       if (r)
>               return r;
>   
>       /* SDMA Privileged inst */
> -     r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
> +     r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
>       if (r)
>               return r;
>   
>       /* SDMA Privileged inst */
> -     r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
> +     r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
>       if (r)
>               return r;
>   
> @@ -1106,33 +1111,23 @@ static int sdma_v3_0_sw_init(void *handle)
>               return r;
>       }
>   
> -     ring = &adev->sdma[0].ring;
> -     ring->ring_obj = NULL;
> -     ring->use_doorbell = true;
> -     ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0;
> -
> -     ring = &adev->sdma[1].ring;
> -     ring->ring_obj = NULL;
> -     ring->use_doorbell = true;
> -     ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1;
> -
> -     ring = &adev->sdma[0].ring;
> -     sprintf(ring->name, "sdma0");
> -     r = amdgpu_ring_init(adev, ring, 256 * 1024,
> -                          SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
> -                          &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
> -                          AMDGPU_RING_TYPE_SDMA);
> -     if (r)
> -             return r;
> -
> -     ring = &adev->sdma[1].ring;
> -     sprintf(ring->name, "sdma1");
> -     r = amdgpu_ring_init(adev, ring, 256 * 1024,
> -                          SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
> -                          &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
> -                          AMDGPU_RING_TYPE_SDMA);
> -     if (r)
> -             return r;
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
> +             ring = &adev->sdma.instance[i].ring;
> +             ring->ring_obj = NULL;
> +             ring->use_doorbell = true;
> +             ring->doorbell_index = (i == 0) ?
> +                     AMDGPU_DOORBELL_sDMA_ENGINE0 : 
> AMDGPU_DOORBELL_sDMA_ENGINE1;
> +
> +             sprintf(ring->name, "sdma%d", i);
> +             r = amdgpu_ring_init(adev, ring, 256 * 1024,
> +                                  SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
> +                                  &adev->sdma.trap_irq,
> +                                  (i == 0) ?
> +                                  AMDGPU_SDMA_IRQ_TRAP0 : 
> AMDGPU_SDMA_IRQ_TRAP1,
> +                                  AMDGPU_RING_TYPE_SDMA);
> +             if (r)
> +                     return r;
> +     }
>   
>       return r;
>   }
> @@ -1140,9 +1135,10 @@ static int sdma_v3_0_sw_init(void *handle)
>   static int sdma_v3_0_sw_fini(void *handle)
>   {
>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> +     int i;
>   
> -     amdgpu_ring_fini(&adev->sdma[0].ring);
> -     amdgpu_ring_fini(&adev->sdma[1].ring);
> +     for (i = 0; i < adev->sdma.num_instances; i++)
> +             amdgpu_ring_fini(&adev->sdma.instance[i].ring);
>   
>       return 0;
>   }
> @@ -1222,7 +1218,7 @@ static void sdma_v3_0_print_status(void *handle)
>       dev_info(adev->dev, "VI SDMA registers\n");
>       dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
>                RREG32(mmSRBM_STATUS2));
> -     for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
> +     for (i = 0; i < adev->sdma.num_instances; i++) {
>               dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
>                        i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
>               dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
> @@ -1367,7 +1363,7 @@ static int sdma_v3_0_process_trap_irq(struct 
> amdgpu_device *adev,
>       case 0:
>               switch (queue_id) {
>               case 0:
> -                     amdgpu_fence_process(&adev->sdma[0].ring);
> +                     amdgpu_fence_process(&adev->sdma.instance[0].ring);
>                       break;
>               case 1:
>                       /* XXX compute */
> @@ -1380,7 +1376,7 @@ static int sdma_v3_0_process_trap_irq(struct 
> amdgpu_device *adev,
>       case 1:
>               switch (queue_id) {
>               case 0:
> -                     amdgpu_fence_process(&adev->sdma[1].ring);
> +                     amdgpu_fence_process(&adev->sdma.instance[1].ring);
>                       break;
>               case 1:
>                       /* XXX compute */
> @@ -1468,8 +1464,10 @@ static const struct amdgpu_ring_funcs 
> sdma_v3_0_ring_funcs = {
>   
>   static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
>   {
> -     adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs;
> -     adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs;
> +     int i;
> +
> +     for (i = 0; i < adev->sdma.num_instances; i++)
> +             adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
>   }
>   
>   static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
> @@ -1483,9 +1481,9 @@ static const struct amdgpu_irq_src_funcs 
> sdma_v3_0_illegal_inst_irq_funcs = {
>   
>   static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
>   {
> -     adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
> -     adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
> -     adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
> +     adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
> +     adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
> +     adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
>   }
>   
>   /**
> @@ -1551,7 +1549,7 @@ static void sdma_v3_0_set_buffer_funcs(struct 
> amdgpu_device *adev)
>   {
>       if (adev->mman.buffer_funcs == NULL) {
>               adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
> -             adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
> +             adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
>       }
>   }
>   
> @@ -1566,7 +1564,7 @@ static void sdma_v3_0_set_vm_pte_funcs(struct 
> amdgpu_device *adev)
>   {
>       if (adev->vm_manager.vm_pte_funcs == NULL) {
>               adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
> -             adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
> +             adev->vm_manager.vm_pte_funcs_ring = 
> &adev->sdma.instance[0].ring;
>               adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
>       }
>   }

Reply via email to