From: Jun Yang <jun.y...@nxp.com>

Return complete index instead of total complete counter
in complete callback.

Signed-off-by: Jun Yang <jun.y...@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 534 ++++++++++++++++++-----------------
 drivers/dma/dpaa/dpaa_qdma.h | 109 +++----
 2 files changed, 330 insertions(+), 313 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 4022ad6469..dc17aa4520 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -15,19 +15,6 @@ qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
        ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
-static inline u64
-qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
-{
-       return ccdf->cfg8b_w1 & 0xff;
-}
-
-static inline int
-qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
-{
-       return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
-               >> QDMA_CCDF_OFFSET;
-}
-
 static inline void
 qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
 {
@@ -59,8 +46,7 @@ qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
        csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
 }
 
-static inline int
-ilog2(int x)
+static inline int ilog2(int x)
 {
        int log = 0;
 
@@ -73,32 +59,50 @@ ilog2(int x)
        return log;
 }
 
-static u32
+static inline int ilog2_qsize(uint32_t q_size)
+{
+       return (ilog2(q_size) - ilog2(64));
+}
+
+static inline int ilog2_qthld(uint32_t q_thld)
+{
+       return (ilog2(q_thld) - ilog2(16));
+}
+
+static inline int
+fsl_qdma_queue_bd_in_hw(struct fsl_qdma_queue *fsl_queue)
+{
+       struct rte_dma_stats *stats = &fsl_queue->stats;
+
+       return (stats->submitted - stats->completed);
+}
+
+static uint32_t
 qdma_readl(void *addr)
 {
        return QDMA_IN(addr);
 }
 
 static void
-qdma_writel(u32 val, void *addr)
+qdma_writel(uint32_t val, void *addr)
 {
        QDMA_OUT(addr, val);
 }
 
-static u32
+static uint32_t
 qdma_readl_be(void *addr)
 {
        return QDMA_IN_BE(addr);
 }
 
 static void
-qdma_writel_be(u32 val, void *addr)
+qdma_writel_be(uint32_t val, void *addr)
 {
        QDMA_OUT_BE(addr, val);
 }
 
-static void
-*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+static void *
+dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
 {
        void *virt_addr;
 
@@ -115,47 +119,48 @@ static void
  * Pre-request command descriptor and compound S/G for enqueue.
  */
 static int
-fsl_qdma_pre_request_enqueue_comp_sd_desc(
-                                       struct fsl_qdma_queue *queue,
-                                       int size, int aligned)
+fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
        struct fsl_qdma_sdf *sdf;
        struct fsl_qdma_ddf *ddf;
-       struct fsl_qdma_format *csgf_desc;
        struct fsl_qdma_format *ccdf;
-       int i, j;
+       uint16_t i, j;
        struct fsl_qdma_format *head;
+       struct fsl_qdma_cmpd_ft *ft;
+       struct fsl_qdma_df *df;
 
-       head = queue->virt_head;
+       head = queue->cmd_desc;
 
-       for (i = 0; i < (int)(queue->n_cq); i++) {
-               dma_addr_t bus_addr = 0, desc_bus_addr = 0;
+       for (i = 0; i < queue->n_cq; i++) {
+               dma_addr_t phy_ft = 0, phy_df = 0;
 
-               queue->virt_addr[i] =
-               dma_pool_alloc(size, aligned, &bus_addr);
-               if (!queue->virt_addr[i])
+               queue->ft[i] =
+                       dma_pool_alloc(sizeof(struct fsl_qdma_cmpd_ft),
+                               RTE_CACHE_LINE_SIZE, &phy_ft);
+               if (!queue->ft[i])
                        goto fail;
 
-               queue->desc_virt_addr[i] =
-               dma_pool_alloc(size, aligned, &desc_bus_addr);
-               if (!queue->desc_virt_addr[i]) {
-                       rte_free(queue->virt_addr[i]);
+               queue->df[i] =
+                       dma_pool_alloc(sizeof(struct fsl_qdma_df),
+                               RTE_CACHE_LINE_SIZE, &phy_df);
+               if (!queue->df[i]) {
+                       rte_free(queue->ft[i]);
                        goto fail;
                }
 
-               memset(queue->virt_addr[i], 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-               memset(queue->desc_virt_addr[i], 0,
-                      FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
+               memset(queue->ft[i], 0, sizeof(struct fsl_qdma_cmpd_ft));
+               memset(queue->df[i], 0, sizeof(struct fsl_qdma_df));
 
-               csgf_desc = (struct fsl_qdma_format *)queue->virt_addr[i] +
-                           QDMA_DESC_OFF;
-               sdf = (struct fsl_qdma_sdf *)queue->desc_virt_addr[i];
-               ddf = (struct fsl_qdma_ddf *)sdf + QDMA_DESC_OFF;
+               ft = queue->ft[i];
+               df = queue->df[i];
+               sdf = &df->sdf;
+               ddf = &df->ddf;
                /* Compound Command Descriptor(Frame List Table) */
-               qdma_desc_addr_set64(csgf_desc, desc_bus_addr);
+               qdma_desc_addr_set64(&ft->desc_buf, phy_df);
 
                /* It must be 32 as Compound S/G Descriptor */
-               qdma_csgf_set_len(csgf_desc, 32);
+               qdma_csgf_set_len(&ft->desc_buf,
+                       sizeof(struct fsl_qdma_df));
                /* Descriptor Buffer */
                sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
                               FSL_QDMA_CMD_RWTTYPE_OFFSET);
@@ -167,73 +172,72 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
                ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
                                FSL_QDMA_CMD_LWC_OFFSET);
 
-               ccdf = (struct fsl_qdma_format *)queue->virt_head;
-               qdma_desc_addr_set64(ccdf, bus_addr + 16);
-               qdma_ccdf_set_format(ccdf, 
qdma_ccdf_get_offset(queue->virt_addr[i]));
-               qdma_ccdf_set_ser(ccdf, 
qdma_ccdf_get_status(queue->virt_addr[i]));
-               queue->virt_head++;
+               ccdf = queue->cmd_desc;
+               qdma_desc_addr_set64(ccdf, phy_ft);
+               qdma_ccdf_set_format(ccdf, 0);
+               qdma_ccdf_set_ser(ccdf, 0);
+               queue->cmd_desc++;
        }
-       queue->virt_head = head;
+       queue->cmd_desc = head;
        queue->ci = 0;
 
        return 0;
 
 fail:
        for (j = 0; j < i; j++) {
-               rte_free(queue->virt_addr[j]);
-               rte_free(queue->desc_virt_addr[j]);
+               rte_free(queue->ft[j]);
+               rte_free(queue->df[j]);
        }
 
        return -ENOMEM;
 }
 
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma, int k, int b)
+static struct fsl_qdma_queue *
+fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
+       int queue_id, int block_id)
 {
        struct fsl_qdma_queue *queue_temp;
+       uint32_t queue_size;
 
-       queue_temp = rte_zmalloc("qdma: queue head", sizeof(*queue_temp), 0);
+       queue_temp = rte_zmalloc("qdma: queue head",
+               sizeof(struct fsl_qdma_queue), 0);
        if (!queue_temp) {
-               printf("no memory to allocate queues\n");
+               DPAA_QDMA_ERR("no memory to allocate queues\n");
                return NULL;
        }
+       queue_size = sizeof(struct fsl_qdma_format) * QDMA_QUEUE_SIZE;
 
-       queue_temp->cq =
-       dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-                      QDMA_QUEUE_SIZE,
-                      sizeof(struct fsl_qdma_format) *
-                      QDMA_QUEUE_SIZE, &queue_temp->bus_addr);
-
+       queue_temp->cq = dma_pool_alloc(queue_size,
+               queue_size, &queue_temp->bus_addr);
        if (!queue_temp->cq) {
                rte_free(queue_temp);
                return NULL;
        }
 
-       memset(queue_temp->cq, 0x0, QDMA_QUEUE_SIZE *
-              sizeof(struct fsl_qdma_format));
+       memset(queue_temp->cq, 0x0, queue_size);
 
-       queue_temp->queue_base = fsl_qdma->block_base +
-               FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, b);
+       queue_temp->block_vir = fsl_qdma->block_base +
+               FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
        queue_temp->n_cq = QDMA_QUEUE_SIZE;
-       queue_temp->id = k;
+       queue_temp->queue_id = queue_id;
        queue_temp->pending = 0;
-       queue_temp->virt_head = queue_temp->cq;
-       queue_temp->virt_addr = rte_malloc("queue virt addr",
+       queue_temp->cmd_desc = queue_temp->cq;
+       queue_temp->ft = rte_malloc("Compound Frame Table",
                        sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-       if (!queue_temp->virt_addr) {
+       if (!queue_temp->ft) {
                rte_free(queue_temp->cq);
                rte_free(queue_temp);
                return NULL;
        }
-       queue_temp->desc_virt_addr = rte_malloc("queue desc virt addr",
+       queue_temp->df = rte_malloc("Descriptor Buffer",
                        sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-       if (!queue_temp->desc_virt_addr) {
-               rte_free(queue_temp->virt_addr);
+       if (!queue_temp->df) {
+               rte_free(queue_temp->ft);
                rte_free(queue_temp->cq);
                rte_free(queue_temp);
                return NULL;
        }
-       queue_temp->stats = (struct rte_dma_stats){0};
+       memset(&queue_temp->stats, 0, sizeof(struct rte_dma_stats));
 
        return queue_temp;
 }
@@ -241,45 +245,43 @@ static struct fsl_qdma_queue
 static void
 fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
 {
-       rte_free(queue->desc_virt_addr);
-       rte_free(queue->virt_addr);
+       rte_free(queue->df);
+       rte_free(queue->ft);
        rte_free(queue->cq);
        rte_free(queue);
 }
 
-static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
-                                          u32 id)
+static struct fsl_qdma_queue *
+fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+       uint32_t block_id)
 {
        struct fsl_qdma_queue *status_head;
-       unsigned int status_size;
+       uint32_t status_size;
 
-       status_size = QDMA_STATUS_SIZE;
+       status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
 
-       status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
+       status_head = rte_zmalloc("qdma: status head",
+               sizeof(*status_head), 0);
        if (!status_head)
                return NULL;
 
        /*
         * Buffer for queue command
         */
-       status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-                                        status_size,
-                                        sizeof(struct fsl_qdma_format) *
-                                        status_size,
-                                        &status_head->bus_addr);
+       status_head->cq = dma_pool_alloc(status_size,
+               status_size, &status_head->bus_addr);
 
        if (!status_head->cq) {
                rte_free(status_head);
                return NULL;
        }
 
-       memset(status_head->cq, 0x0, status_size *
-              sizeof(struct fsl_qdma_format));
+       memset(status_head->cq, 0x0, status_size);
        status_head->n_cq = status_size;
-       status_head->virt_head = status_head->cq;
-       status_head->queue_base = fsl_qdma->block_base +
-               FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
+       status_head->cmd_desc = status_head->cq;
+       status_head->block_id = block_id;
+       status_head->block_vir = fsl_qdma->block_base +
+               FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
        return status_head;
 }
@@ -294,11 +296,11 @@ fsl_qdma_free_status_queue(struct fsl_qdma_queue *status)
 static int
 fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 {
-       void *ctrl = fsl_qdma->ctrl_base;
-       void *block;
+       uint8_t *ctrl = fsl_qdma->ctrl_base;
+       uint8_t *block;
        int i, count = RETRIES;
        unsigned int j;
-       u32 reg;
+       uint32_t reg;
 
        /* Disable the command queue and wait for idle state. */
        reg = qdma_readl(ctrl + FSL_QDMA_DMR);
@@ -337,11 +339,13 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
-                                enum rte_dma_status_code *status)
+fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
+       const uint16_t nb_cpls,
+       enum rte_dma_status_code *status)
 {
-       u32 reg;
+       uint32_t reg;
        int count = 0;
+       uint8_t *block = fsl_queue->block_vir;
 
        while (count < nb_cpls) {
                reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
@@ -351,9 +355,11 @@ fsl_qdma_queue_transfer_complete(void *block, const 
uint16_t nb_cpls,
                qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
                if (status != NULL)
                        status[count] = RTE_DMA_STATUS_SUCCESSFUL;
+               fsl_queue->complete++;
+               if (unlikely(fsl_queue->complete >= fsl_queue->n_cq))
+                       fsl_queue->complete = 0;
 
                count++;
-
        }
        return count;
 }
@@ -363,9 +369,9 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
        struct fsl_qdma_queue *temp;
        void *ctrl = fsl_qdma->ctrl_base;
-       void *block;
-       u32 i, j;
-       u32 reg;
+       uint8_t *block;
+       uint32_t i, j, k = 0;
+       uint32_t reg;
        int ret, val;
 
        /* Try to halt the qDMA engine first. */
@@ -375,13 +381,11 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
                return ret;
        }
 
-       int k = 0;
        for (j = 0; j < fsl_qdma->num_blocks; j++) {
                block = fsl_qdma->block_base +
                        FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-               k = 0;
-               for (i = (j * QDMA_QUEUES); i < ((j * QDMA_QUEUES) + 
QDMA_QUEUES); i++) {
-                       temp = fsl_qdma->queue[i];
+               for (i = 0; i < QDMA_QUEUES; i++) {
+                       temp = fsl_qdma->queue[k];
                        /*
                         * Initialize Command Queue registers to
                         * point to the first
@@ -391,19 +395,20 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
                         */
 
                        qdma_writel(lower_32_bits(temp->bus_addr),
-                                   block + FSL_QDMA_BCQDPA_SADDR(k));
+                                   block + FSL_QDMA_BCQDPA_SADDR(i));
                        qdma_writel(upper_32_bits(temp->bus_addr),
-                                   block + FSL_QDMA_BCQEDPA_SADDR(k));
+                                   block + FSL_QDMA_BCQEDPA_SADDR(i));
                        qdma_writel(lower_32_bits(temp->bus_addr),
-                                   block + FSL_QDMA_BCQEPA_SADDR(k));
+                                   block + FSL_QDMA_BCQEPA_SADDR(i));
                        qdma_writel(upper_32_bits(temp->bus_addr),
-                                   block + FSL_QDMA_BCQEEPA_SADDR(k));
+                                   block + FSL_QDMA_BCQEEPA_SADDR(i));
 
                        /* Initialize the queue mode. */
                        reg = FSL_QDMA_BCQMR_EN;
-                       reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
-                       reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
-                       qdma_writel(reg, block + FSL_QDMA_BCQMR(k));
+                       reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2_qthld(temp->n_cq));
+                       reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
+                       temp->le_cqmr = reg;
+                       qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
                        k++;
                }
 
@@ -423,18 +428,15 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
                 * Enqueue Pointer Address Registers
                 */
 
-               qdma_writel(
-                           upper_32_bits(fsl_qdma->status[j]->bus_addr),
-                           block + FSL_QDMA_SQEEPAR);
-               qdma_writel(
-                           lower_32_bits(fsl_qdma->status[j]->bus_addr),
-                           block + FSL_QDMA_SQEPAR);
-               qdma_writel(
-                           upper_32_bits(fsl_qdma->status[j]->bus_addr),
-                           block + FSL_QDMA_SQEDPAR);
-               qdma_writel(
-                           lower_32_bits(fsl_qdma->status[j]->bus_addr),
-                           block + FSL_QDMA_SQDPAR);
+               temp = fsl_qdma->status[j];
+               qdma_writel(upper_32_bits(temp->bus_addr),
+                       block + FSL_QDMA_SQEEPAR);
+               qdma_writel(lower_32_bits(temp->bus_addr),
+                       block + FSL_QDMA_SQEPAR);
+               qdma_writel(upper_32_bits(temp->bus_addr),
+                       block + FSL_QDMA_SQEDPAR);
+               qdma_writel(lower_32_bits(temp->bus_addr),
+                       block + FSL_QDMA_SQDPAR);
                /* Desiable status queue interrupt. */
 
                qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
@@ -443,7 +445,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
                /* Initialize the status queue mode. */
                reg = FSL_QDMA_BSQMR_EN;
-               val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
+               val = ilog2_qsize(temp->n_cq);
                reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
                qdma_writel(reg, block + FSL_QDMA_BSQMR);
        }
@@ -455,52 +457,51 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
        return 0;
 }
 
-
 static int
 fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
-                                 uint64_t flags, dma_addr_t dst,
-                                 dma_addr_t src, size_t len)
+       uint64_t flags, dma_addr_t dst,
+       dma_addr_t src, size_t len)
 {
-       void *block = fsl_queue->queue_base;
+       uint8_t *block = fsl_queue->block_vir;
        struct fsl_qdma_format *csgf_src, *csgf_dest;
+       struct fsl_qdma_cmpd_ft *ft;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
        struct fsl_qdma_sdf *sdf;
-       u32 cfg = 0;
+       uint32_t cfg = 0;
 #endif
 
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-       u32 reg;
+       uint32_t reg;
 
        /* retrieve and store the register value in big endian
         * to avoid bits swap
         */
        reg = qdma_readl_be(block +
-                        FSL_QDMA_BCQSR(fsl_queue->id));
+                        FSL_QDMA_BCQSR(fsl_queue->queue_id));
        if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
                DPAA_QDMA_ERR("QDMA Engine is busy\n");
-               return -1;
+               return -EBUSY;
        }
 #else
        /* check whether critical watermark level reached,
         * below check is valid for only single queue per block
         */
-       if ((fsl_queue->stats.submitted - fsl_queue->stats.completed)
-                       >= QDMA_QUEUE_CR_WM) {
+       if (fsl_qdma_queue_bd_in_hw(fsl_queue) >= QDMA_QUEUE_CR_WM) {
                DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
-               return -1;
+               return -ENOSPC;
        }
 #endif
+
        if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
                DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
-               return -1;
+               return -ENOSPC;
        }
 
-       csgf_src = (struct fsl_qdma_format 
*)fsl_queue->virt_addr[fsl_queue->ci] +
-                  QDMA_SGF_SRC_OFF;
-       csgf_dest = (struct fsl_qdma_format 
*)fsl_queue->virt_addr[fsl_queue->ci] +
-                   QDMA_SGF_DST_OFF;
+       ft = fsl_queue->ft[fsl_queue->ci];
+       csgf_src = &ft->desc_sbuf;
+       csgf_dest = &ft->desc_dbuf;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-       sdf = (struct fsl_qdma_sdf *)fsl_queue->desc_virt_addr[fsl_queue->ci];
+       sdf = fsl_queue->df[fsl_queue->ci];
        sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
                        FSL_QDMA_CMD_RWTTYPE_OFFSET);
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
@@ -527,67 +528,57 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
                fsl_queue->ci = 0;
 
        if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-               qdma_writel_be(FSL_QDMA_BCQMR_EI,
-                              block + FSL_QDMA_BCQMR(fsl_queue->id));
+               qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+                       block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
                fsl_queue->stats.submitted++;
        } else {
                fsl_queue->pending++;
        }
-       return 0;
+
+       if (fsl_queue->ci)
+               return fsl_queue->ci - 1;
+       else
+               return fsl_queue->n_cq;
 }
 
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-             uint32_t info_sz)
+       __rte_unused uint32_t info_sz)
 {
-#define DPAADMA_MAX_DESC        64
-#define DPAADMA_MIN_DESC        64
-
-       RTE_SET_USED(dev);
-       RTE_SET_USED(info_sz);
+       struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
        dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-                            RTE_DMA_CAPA_MEM_TO_DEV |
-                            RTE_DMA_CAPA_DEV_TO_DEV |
-                            RTE_DMA_CAPA_DEV_TO_MEM |
-                            RTE_DMA_CAPA_SILENT |
-                            RTE_DMA_CAPA_OPS_COPY;
-       dev_info->max_vchans = 4;
-       dev_info->max_desc = DPAADMA_MAX_DESC;
-       dev_info->min_desc = DPAADMA_MIN_DESC;
+               RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY;
+       dev_info->max_vchans = fsl_qdma->n_queues;
+       dev_info->max_desc = QDMA_QUEUE_SIZE;
+       dev_info->min_desc = QDMA_QUEUE_SIZE;
 
        return 0;
 }
 
 static int
-dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
+dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
+       uint16_t vchan)
 {
-       u32 i;
        int ret;
-       struct fsl_qdma_queue *fsl_queue;
-
-       if (fsl_qdma->free_block_id == QDMA_BLOCKS) {
-               DPAA_QDMA_ERR("Maximum 4 queues can be configured\n");
-               return -1;
-       }
+       struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
-       i = fsl_qdma->free_block_id * QDMA_QUEUES;
+       if (fsl_queue->used)
+               return 0;
 
-       fsl_queue = fsl_qdma->queue[i];
-       ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-                       FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
+       ret = fsl_qdma_pre_comp_sd_desc(fsl_queue);
        if (ret)
                return ret;
 
-       fsl_qdma->vchan_map[vchan] = i;
-       fsl_qdma->free_block_id++;
+       fsl_queue->used = 1;
+
        return 0;
 }
 
 static int
 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-                   __rte_unused const struct rte_dma_conf *dev_conf,
-                   __rte_unused uint32_t conf_sz)
+       __rte_unused const struct rte_dma_conf *dev_conf,
+       __rte_unused uint32_t conf_sz)
 {
        return 0;
 }
@@ -606,9 +597,9 @@ dpaa_qdma_close(__rte_unused struct rte_dma_dev *dev)
 
 static int
 dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
-                     uint16_t vchan,
-                     __rte_unused const struct rte_dma_vchan_conf *conf,
-                     __rte_unused uint32_t conf_sz)
+       uint16_t vchan,
+       __rte_unused const struct rte_dma_vchan_conf *conf,
+       __rte_unused uint32_t conf_sz)
 {
        struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
 
@@ -618,13 +609,13 @@ dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
 static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
-       struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine 
*)dev_private;
-       struct fsl_qdma_queue *fsl_queue =
-               fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
-       void *block = fsl_queue->queue_base;
+       struct fsl_qdma_engine *fsl_qdma = dev_private;
+       struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+       void *block = fsl_queue->block_vir;
 
        while (fsl_queue->pending) {
-               qdma_writel_be(FSL_QDMA_BCQMR_EI, block + 
FSL_QDMA_BCQMR(fsl_queue->id));
+               qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+                       block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
                fsl_queue->pending--;
                fsl_queue->stats.submitted++;
        }
@@ -634,37 +625,31 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 
 static int
 dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
-                 rte_iova_t src, rte_iova_t dst,
-                 uint32_t length, uint64_t flags)
+       rte_iova_t src, rte_iova_t dst,
+       uint32_t length, uint64_t flags)
 {
-       struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine 
*)dev_private;
-       struct fsl_qdma_queue *fsl_queue =
-               fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
-       int ret, idx;
-
-       idx = (uint16_t)(fsl_queue->stats.submitted + fsl_queue->pending);
+       struct fsl_qdma_engine *fsl_qdma = dev_private;
+       struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+       int ret;
 
-       ret = fsl_qdma_enqueue_desc(fsl_queue, flags, (dma_addr_t)dst, 
(dma_addr_t)src, length);
-       if (ret < 0)
-               return ret;
+       ret = fsl_qdma_enqueue_desc(fsl_queue, flags, dst, src, length);
 
-       return idx;
+       return ret;
 }
 
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-                        const uint16_t nb_cpls, uint16_t *last_idx,
-                        enum rte_dma_status_code *st)
+       const uint16_t nb_cpls, uint16_t *last_idx,
+       enum rte_dma_status_code *st)
 {
-       struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine 
*)dev_private;
+       struct fsl_qdma_engine *fsl_qdma = dev_private;
        int ret;
-       struct fsl_qdma_queue *fsl_queue =
-               fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+       struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
        void *status = fsl_qdma->status_base;
        int intr;
 
-       ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
-                                              nb_cpls, st);
+       ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+                       nb_cpls, st);
        if (!ret) {
                intr = qdma_readl_be(status + FSL_QDMA_DEDR);
                if (intr) {
@@ -690,8 +675,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
        }
 
        fsl_queue->stats.completed += ret;
-       if (last_idx != NULL)
-               *last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
+       if (last_idx) {
+               if (unlikely(!fsl_queue->complete))
+                       *last_idx = fsl_queue->n_cq - 1;
+               else
+                       *last_idx = fsl_queue->complete - 1;
+       }
 
        return ret;
 }
@@ -699,21 +688,20 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t 
vchan,
 
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
-                 uint16_t vchan, const uint16_t nb_cpls,
-                 uint16_t *last_idx, bool *has_error)
+       uint16_t vchan, const uint16_t nb_cpls,
+       uint16_t *last_idx, bool *has_error)
 {
-       struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine 
*)dev_private;
+       struct fsl_qdma_engine *fsl_qdma = dev_private;
        int ret;
-       struct fsl_qdma_queue *fsl_queue =
-               fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+       struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
        void *status = fsl_qdma->status_base;
        int intr;
 #endif
 
        *has_error = false;
-       ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
-                                              nb_cpls, NULL);
+       ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+                       nb_cpls, NULL);
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
        if (!ret) {
                intr = qdma_readl_be(status + FSL_QDMA_DEDR);
@@ -740,23 +728,27 @@ dpaa_qdma_dequeue(void *dev_private,
        }
 #endif
        fsl_queue->stats.completed += ret;
-       if (last_idx != NULL)
-               *last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
+       if (last_idx) {
+               if (unlikely(!fsl_queue->complete))
+                       *last_idx = fsl_queue->n_cq - 1;
+               else
+                       *last_idx = fsl_queue->complete - 1;
+       }
+
        return ret;
 }
 
 static int
-dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-                   struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
+       uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
        struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-       struct fsl_qdma_queue *fsl_queue =
-               fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+       struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
        struct rte_dma_stats *stats = &fsl_queue->stats;
 
        if (size < sizeof(rte_stats))
                return -EINVAL;
-       if (rte_stats == NULL)
+       if (!rte_stats)
                return -EINVAL;
 
        *rte_stats = *stats;
@@ -768,10 +760,9 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
        struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-       struct fsl_qdma_queue *fsl_queue =
-               fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+       struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
-       fsl_queue->stats = (struct rte_dma_stats){0};
+       memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
        return 0;
 }
@@ -779,9 +770,8 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t 
vchan)
 static uint16_t
 dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 {
-       const struct fsl_qdma_engine *fsl_qdma  = dev_private;
-       struct fsl_qdma_queue *fsl_queue =
-               fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+       const struct fsl_qdma_engine *fsl_qdma = dev_private;
+       struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
        return fsl_queue->n_cq - fsl_queue->pending;
 }
@@ -804,8 +794,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
        int ccsr_qdma_fd;
        int regs_size;
        int ret;
-       u32 i, k = 0;
-       int j;
+       uint32_t i, j, k;
 
        fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
        fsl_qdma->num_blocks = QDMA_BLOCKS;
@@ -814,47 +803,63 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
        ccsr_qdma_fd = open("/dev/mem", O_RDWR);
        if (unlikely(ccsr_qdma_fd < 0)) {
                DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-               return -1;
+               return ccsr_qdma_fd;
        }
 
-       regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
+       regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+       regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
        phys_addr = QDMA_CCSR_BASE;
-       fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
-                                        PROT_WRITE, MAP_SHARED,
-                                        ccsr_qdma_fd, phys_addr);
+       fsl_qdma->reg_base = mmap(NULL, regs_size,
+               PROT_READ | PROT_WRITE, MAP_SHARED,
+               ccsr_qdma_fd, phys_addr);
 
        close(ccsr_qdma_fd);
-       if (fsl_qdma->ctrl_base == MAP_FAILED) {
-               DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
-                      "size %d\n", phys_addr, regs_size);
-               return -1;
+       if (fsl_qdma->reg_base == MAP_FAILED) {
+               DPAA_QDMA_ERR("Map qdma reg: Phys(0x%"PRIu64"), size(%d)",
+                       phys_addr, regs_size);
+               return -ENOMEM;
        }
 
-       fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
-       fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
-
-       fsl_qdma->status = rte_malloc("status queue", sizeof(struct 
fsl_qdma_queue) * 4, 0);
-       if (!fsl_qdma->status)
+       fsl_qdma->ctrl_base =
+               fsl_qdma->reg_base + QDMA_CTRL_REGION_OFFSET;
+       fsl_qdma->status_base =
+               fsl_qdma->reg_base + QDMA_STATUS_REGION_OFFSET;
+       fsl_qdma->block_base =
+               fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
+
+       fsl_qdma->status = rte_malloc("status queue",
+               sizeof(struct fsl_qdma_queue) * QDMA_BLOCKS, 0);
+       if (!fsl_qdma->status) {
+               ret = -ENOMEM;
                goto err;
+       }
 
-       fsl_qdma->queue = rte_malloc("cmd queue", sizeof(struct fsl_qdma_queue) 
* 32, 0);
+       fsl_qdma->queue = rte_malloc("cmd queue",
+               sizeof(struct fsl_qdma_queue) * fsl_qdma->n_queues, 0);
        if (!fsl_qdma->queue) {
                rte_free(fsl_qdma->status);
+               ret = -ENOMEM;
                goto err;
        }
 
+       k = 0;
        for (i = 0; i < fsl_qdma->num_blocks; i++) {
-               fsl_qdma->status[i] = fsl_qdma_prep_status_queue(fsl_qdma, i);
-               if (!fsl_qdma->status[i])
+               fsl_qdma->status[i] =
+                       fsl_qdma_prep_status_queue(fsl_qdma, i);
+               if (!fsl_qdma->status[i]) {
+                       ret = -ENOMEM;
                        goto mem_free;
+               }
                j = 0;
-               for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + 
QDMA_QUEUES); k++) {
-                       fsl_qdma->queue[k] = 
fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
-                       if (!fsl_qdma->queue[k])
+               for (j = 0; j < QDMA_QUEUES; j++) {
+                       fsl_qdma->queue[k] =
+                               fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
+                       if (!fsl_qdma->queue[k]) {
+                               ret = -ENOMEM;
                                goto mem_free;
-                       j++;
+                       }
+                       k++;
                }
-
        }
 
        ret = fsl_qdma_reg_init(fsl_qdma);
@@ -867,17 +872,20 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
        return 0;
 
 mem_free:
-       for (i = 0; i < fsl_qdma->num_blocks; i++) {
-               for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + 
QDMA_QUEUES); k++)
-                       fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
-               fsl_qdma_free_status_queue(fsl_qdma->status[i]);
-       }
-       rte_free(fsl_qdma->status);
+       for (j = 0; j < k; j++)
+               fsl_qdma_free_queue_resources(fsl_qdma->queue[j]);
+
+       for (j = 0; j < i; j++)
+               fsl_qdma_free_status_queue(fsl_qdma->status[j]);
+
+       if (fsl_qdma->status)
+               rte_free(fsl_qdma->status);
 err:
-       rte_free(fsl_qdma->queue);
+       if (fsl_qdma->queue)
+               rte_free(fsl_qdma->queue);
        munmap(fsl_qdma->ctrl_base, regs_size);
 
-       return -1;
+       return ret;
 }
 
 static int
@@ -921,13 +929,13 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
        struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
        struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-       uint32_t i, k;
+       uint32_t i;
 
-       for (i = 0; i < fsl_qdma->num_blocks; i++) {
-               for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + 
QDMA_QUEUES); k++)
-                       fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
+       for (i = 0; i < fsl_qdma->num_blocks; i++)
                fsl_qdma_free_status_queue(fsl_qdma->status[i]);
-       }
+
+       for (i = 0; i < fsl_qdma->num_blocks * QDMA_QUEUES; i++)
+               fsl_qdma_free_queue_resources(fsl_qdma->queue[i]);
 
        rte_free(fsl_qdma->queue);
        rte_free(fsl_qdma->status);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 8a4517a70a..25954ef3a4 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -11,7 +11,6 @@
 #define BIT(nr)                (1UL << (nr))
 #endif
 
-#define CORE_NUMBER 4
 #define RETRIES        5
 
 #ifndef GENMASK
@@ -20,6 +19,12 @@
                (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
 #endif
 
+#define QDMA_CTRL_REGION_OFFSET 0
+#define QDMA_CTRL_REGION_SIZE 0x10000
+#define QDMA_STATUS_REGION_OFFSET \
+       (QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
+#define QDMA_STATUS_REGION_SIZE 0x10000
+
 #define FSL_QDMA_DMR                   0x0
 #define FSL_QDMA_DSR                   0x4
 #define FSL_QDMA_DEDR                  0xe04
@@ -54,19 +59,18 @@
 #define FSL_QDMA_QUEUE_MAX             8
 
 #define FSL_QDMA_BCQMR_EN              0x80000000
-#define FSL_QDMA_BCQMR_EI_BE           0x40
+#define FSL_QDMA_BCQMR_EI              0x40000000
+
 #define FSL_QDMA_BCQMR_CD_THLD(x)      ((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)      ((x) << 16)
 
 /* Update the value appropriately whenever QDMA_QUEUE_SIZE
  * changes.
  */
-#define FSL_QDMA_BCQMR_EI              0x20c0
 
 #define FSL_QDMA_BCQSR_QF_XOFF_BE      0x1000100
 
 #define FSL_QDMA_BSQMR_EN              0x80000000
-#define FSL_QDMA_BSQMR_DI_BE           0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)      ((x) << 16)
 #define FSL_QDMA_BSQMR_DI              0xc0
 
@@ -75,8 +79,6 @@
 #define FSL_QDMA_DMR_DQD               0x40000000
 #define FSL_QDMA_DSR_DB                        0x80000000
 
-#define FSL_QDMA_COMMAND_BUFFER_SIZE   64
-#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN        64
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX        16384
 #define FSL_QDMA_QUEUE_NUM_MAX         8
@@ -106,16 +108,11 @@
 
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE                        64
-#define QDMA_STATUS_SIZE               64
+#define QDMA_STATUS_SIZE               QDMA_QUEUE_SIZE
 #define QDMA_CCSR_BASE                 0x8380000
-#define VIRT_CHANNELS                  32
 #define QDMA_BLOCK_OFFSET              0x10000
 #define QDMA_BLOCKS                    4
 #define QDMA_QUEUES                    8
-#define QDMA_DELAY                     1000
-#define QDMA_SGF_SRC_OFF               2
-#define QDMA_SGF_DST_OFF               3
-#define QDMA_DESC_OFF                  1
 #define QDMA_QUEUE_CR_WM               32
 
 #define QDMA_BIG_ENDIAN                        1
@@ -134,64 +131,76 @@
 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)                 \
        (((fsl_qdma_engine)->block_offset) * (x))
 
-typedef void (*dma_call_back)(void *params);
-
 /* qDMA Command Descriptor Formats */
 struct fsl_qdma_format {
-       __le32 status; /* ser, status */
-       __le32 cfg;     /* format, offset */
+       uint32_t status; /* ser, status */
+       uint32_t cfg;   /* format, offset */
        union {
                struct {
-                       __le32 addr_lo; /* low 32-bits of 40-bit address */
-                       u8 addr_hi;     /* high 8-bits of 40-bit address */
-                       u8 __reserved1[2];
-                       u8 cfg8b_w1; /* dd, queue */
+                       uint32_t addr_lo; /* low 32-bits of 40-bit address */
+                       uint8_t addr_hi; /* high 8-bits of 40-bit address */
+                       uint8_t __reserved1[2];
+                       uint8_t cfg8b_w1; /* dd, queue */
                };
-               __le64 data;
+               uint64_t data;
        };
 };
 
 /* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-       __le32 rev3;
-       __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-       __le32 rev5;
-       __le32 cmd;
+       uint32_t rev3;
+       uint32_t cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
+       uint32_t rev5;
+       uint32_t cmd;
 };
 
 /* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-       __le32 rev1;
-       __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-       __le32 rev3;
-       __le32 cmd;
+       uint32_t rev1;
+       uint32_t cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
+       uint32_t rev3;
+       uint32_t cmd;
+};
+
+struct fsl_qdma_df {
+       struct fsl_qdma_sdf sdf;
+       struct fsl_qdma_ddf ddf;
+};
+
+struct fsl_qdma_cmpd_ft {
+       struct fsl_qdma_format desc_buf;
+       struct fsl_qdma_format desc_sbuf;
+       struct fsl_qdma_format desc_dbuf;
 };
 
 struct fsl_qdma_queue {
-       struct fsl_qdma_format  *virt_head;
-       void                    **virt_addr;
-       u8                      ci;
-       u8                      n_cq;
-       u8                      id;
-       void                    *queue_base;
-       struct fsl_qdma_format  *cq;
-       struct rte_dma_stats    stats;
-       u8                      pending;
-       dma_addr_t              bus_addr;
-       void                    **desc_virt_addr;
+       struct fsl_qdma_format *cmd_desc;
+       int used;
+       struct fsl_qdma_cmpd_ft **ft;
+       uint16_t ci;
+       uint16_t complete;
+       uint16_t n_cq;
+       uint8_t block_id;
+       uint8_t queue_id;
+       void *block_vir;
+       uint32_t le_cqmr;
+       struct fsl_qdma_format *cq;
+       struct rte_dma_stats stats;
+       uint8_t pending;
+       dma_addr_t bus_addr;
+       struct fsl_qdma_df **df;
 };
 
 struct fsl_qdma_engine {
-       void                    *ctrl_base;
-       void                    *status_base;
-       void                    *block_base;
-       u32                     n_queues;
-       struct fsl_qdma_queue   **queue;
-       struct fsl_qdma_queue   **status;
-       u32                     num_blocks;
-       u8                      free_block_id;
-       u32                     vchan_map[4];
-       int                     block_offset;
+       void *reg_base;
+       void *ctrl_base;
+       void *status_base;
+       void *block_base;
+       uint32_t n_queues;
+       struct fsl_qdma_queue **queue;
+       struct fsl_qdma_queue **status;
+       uint32_t num_blocks;
+       int block_offset;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1

Reply via email to