Add queue ops for IDPF PMD:
        rx_queue_start
        rx_queue_stop
        tx_queue_start
        tx_queue_stop
        rx_queue_setup
        rx_queue_release
        tx_queue_setup
        tx_queue_release

Signed-off-by: Beilei Xing <beilei.x...@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun...@intel.com>
Signed-off-by: Junfeng Guo <junfeng....@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c |   85 +++
 drivers/net/idpf/idpf_ethdev.h |    5 +
 drivers/net/idpf/idpf_rxtx.c   | 1252 ++++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_rxtx.h   |  167 +++++
 drivers/net/idpf/idpf_vchnl.c  |  342 +++++++++
 drivers/net/idpf/meson.build   |    1 +
 6 files changed, 1852 insertions(+)
 create mode 100644 drivers/net/idpf/idpf_rxtx.c
 create mode 100644 drivers/net/idpf/idpf_rxtx.h

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index e34165a87d..511770ed4f 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -12,6 +12,7 @@
 #include <rte_dev.h>
 
 #include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
 
 #define VPORT_NUM              "vport_num"
 
@@ -33,6 +34,14 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
        .dev_start                      = idpf_dev_start,
        .dev_stop                       = idpf_dev_stop,
        .dev_close                      = idpf_dev_close,
+       .rx_queue_start                 = idpf_rx_queue_start,
+       .rx_queue_stop                  = idpf_rx_queue_stop,
+       .tx_queue_start                 = idpf_tx_queue_start,
+       .tx_queue_stop                  = idpf_tx_queue_stop,
+       .rx_queue_setup                 = idpf_rx_queue_setup,
+       .rx_queue_release               = idpf_dev_rx_queue_release,
+       .tx_queue_setup                 = idpf_tx_queue_setup,
+       .tx_queue_release               = idpf_dev_tx_queue_release,
 };
 
 
@@ -193,6 +202,65 @@ idpf_dev_configure(struct rte_eth_dev *dev)
        return ret;
 }
 
+static int
+idpf_config_queues(struct idpf_vport *vport)
+{
+       int err;
+
+       err = idpf_config_rxqs(vport);
+       if (err)
+               return err;
+
+       err = idpf_config_txqs(vport);
+
+       return err;
+}
+
+static int
+idpf_start_queues(struct rte_eth_dev *dev)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       struct idpf_rx_queue *rxq;
+       struct idpf_tx_queue *txq;
+       int i, err = 0;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if (txq->tx_deferred_start)
+                       continue;
+               if (idpf_tx_queue_init(dev, i) != 0) {
+                       PMD_DRV_LOG(ERR, "Fail to init tx queue %u", i);
+                       return -1;
+               }
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               if (rxq->rx_deferred_start)
+                       continue;
+               if (idpf_rx_queue_init(dev, i) != 0) {
+                       PMD_DRV_LOG(ERR, "Fail to init rx queue %u", i);
+                       return -1;
+               }
+       }
+
+       err = idpf_ena_dis_queues(vport, true);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Fail to start queues");
+               return err;
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++)
+               dev->data->tx_queue_state[i] =
+                       RTE_ETH_QUEUE_STATE_STARTED;
+       for (i = 0; i < dev->data->nb_rx_queues; i++)
+               dev->data->rx_queue_state[i] =
+                       RTE_ETH_QUEUE_STATE_STARTED;
+
+       return err;
+}
+
 static int
 idpf_dev_start(struct rte_eth_dev *dev)
 {
@@ -203,6 +271,19 @@ idpf_dev_start(struct rte_eth_dev *dev)
 
        vport->stopped = 0;
 
+       if (idpf_config_queues(vport)) {
+               PMD_DRV_LOG(ERR, "Failed to configure queues");
+               goto err_queue;
+       }
+
+       idpf_set_rx_function(dev);
+       idpf_set_tx_function(dev);
+
+       if (idpf_start_queues(dev)) {
+               PMD_DRV_LOG(ERR, "Failed to start queues");
+               goto err_queue;
+       }
+
        if (idpf_ena_dis_vport(vport, true)) {
                PMD_DRV_LOG(ERR, "Failed to enable vport");
                goto err_vport;
@@ -211,6 +292,8 @@ idpf_dev_start(struct rte_eth_dev *dev)
        return 0;
 
 err_vport:
+       idpf_stop_queues(dev);
+err_queue:
        return -1;
 }
 
@@ -228,6 +311,8 @@ idpf_dev_stop(struct rte_eth_dev *dev)
        if (idpf_ena_dis_vport(vport, false))
                PMD_DRV_LOG(ERR, "disable vport failed");
 
+       idpf_stop_queues(dev);
+
        vport->stopped = 1;
        dev->data->dev_started = 0;
 
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 762d5ff66a..c5aa168d95 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -195,6 +195,11 @@ int idpf_get_caps(struct idpf_adapter *adapter);
 int idpf_create_vport(__rte_unused struct rte_eth_dev *dev);
 int idpf_destroy_vport(struct idpf_vport *vport);
 
+int idpf_config_rxqs(struct idpf_vport *vport);
+int idpf_config_txqs(struct idpf_vport *vport);
+int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
+                     bool rx, bool on);
+int idpf_ena_dis_queues(struct idpf_vport *vport, bool enable);
 int idpf_ena_dis_vport(struct idpf_vport *vport, bool enable);
 
 #endif /* _IDPF_ETHDEV_H_ */
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
new file mode 100644
index 0000000000..770ed52281
--- /dev/null
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -0,0 +1,1252 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <ethdev_driver.h>
+#include <rte_net.h>
+
+#include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
+
+static inline int
+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+{
+       /* The following constraints must be satisfied:
+        *   thresh < rxq->nb_rx_desc
+        */
+       if (thresh >= nb_desc) {
+               PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
+                            thresh, nb_desc);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static inline int
+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
+               uint16_t tx_free_thresh)
+{
+       /* TX descriptors will have their RS bit set after tx_rs_thresh
+        * descriptors have been used. The TX descriptor ring will be cleaned
+        * after tx_free_thresh descriptors are used or if the number of
+        * descriptors required to transmit a packet is greater than the
+        * number of free TX descriptors.
+        *
+        * The following constraints must be satisfied:
+        *  - tx_rs_thresh must be less than the size of the ring minus 2.
+        *  - tx_free_thresh must be less than the size of the ring minus 3.
+        *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
+        *  - tx_rs_thresh must be a divisor of the ring size.
+        *
+        * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+        * race condition, hence the maximum threshold constraints. When set
+        * to zero use default values.
+        */
+       if (tx_rs_thresh >= (nb_desc - 2)) {
+               PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
+                            "number of TX descriptors (%u) minus 2",
+                            tx_rs_thresh, nb_desc);
+               return -EINVAL;
+       }
+       if (tx_free_thresh >= (nb_desc - 3)) {
+               PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
+                            "number of TX descriptors (%u) minus 3.",
+                            tx_free_thresh, nb_desc);
+               return -EINVAL;
+       }
+       if (tx_rs_thresh > tx_free_thresh) {
+               PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
+                            "equal to tx_free_thresh (%u).",
+                            tx_rs_thresh, tx_free_thresh);
+               return -EINVAL;
+       }
+       if ((nb_desc % tx_rs_thresh) != 0) {
+               PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
+                            "number of TX descriptors (%u).",
+                            tx_rs_thresh, nb_desc);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static inline void
+release_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+       uint16_t i;
+
+       if (!rxq->sw_ring)
+               return;
+
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               if (rxq->sw_ring[i]) {
+                       rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+                       rxq->sw_ring[i] = NULL;
+               }
+       }
+}
+
+static inline void
+release_txq_mbufs(struct idpf_tx_queue *txq)
+{
+       uint16_t nb_desc, i;
+
+       if (!txq || !txq->sw_ring) {
+               PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+               return;
+       }
+
+       if (txq->sw_nb_desc) {
+               /* For split queue model, descriptor ring */
+               nb_desc = txq->sw_nb_desc;
+       } else {
+               /* For single queue model */
+               nb_desc = txq->nb_tx_desc;
+       }
+       for (i = 0; i < nb_desc; i++) {
+               if (txq->sw_ring[i].mbuf) {
+                       rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                       txq->sw_ring[i].mbuf = NULL;
+               }
+       }
+}
+
+static const struct idpf_rxq_ops def_rxq_ops = {
+       .release_mbufs = release_rxq_mbufs,
+};
+
+static const struct idpf_txq_ops def_txq_ops = {
+       .release_mbufs = release_txq_mbufs,
+};
+
+static void
+idpf_rx_queue_release(void *rxq)
+{
+       struct idpf_rx_queue *q = (struct idpf_rx_queue *)rxq;
+
+       if (!q)
+               return;
+
+       /* Split queue */
+       if (q->bufq1 && q->bufq2) {
+               q->bufq1->ops->release_mbufs(q->bufq1);
+               rte_free(q->bufq1->sw_ring);
+               rte_memzone_free(q->bufq1->mz);
+               rte_free(q->bufq1);
+               q->bufq2->ops->release_mbufs(q->bufq2);
+               rte_free(q->bufq2->sw_ring);
+               rte_memzone_free(q->bufq2->mz);
+               rte_free(q->bufq2);
+               rte_memzone_free(q->mz);
+               rte_free(q);
+               return;
+       }
+
+       /* Single queue */
+       q->ops->release_mbufs(q);
+       rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
+       rte_free(q);
+}
+
+static void
+idpf_tx_queue_release(void *txq)
+{
+       struct idpf_tx_queue *q = (struct idpf_tx_queue *)txq;
+
+       if (!q)
+               return;
+
+       if (q->complq)
+               rte_free(q->complq);
+       q->ops->release_mbufs(q);
+       rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
+       rte_free(q);
+}
+
+static inline void
+reset_split_rx_descq(struct idpf_rx_queue *rxq)
+{
+       uint16_t len;
+       uint32_t i;
+
+       if (!rxq)
+               return;
+
+       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+       for (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);
+            i++)
+               ((volatile char *)rxq->rx_ring)[i] = 0;
+
+       rxq->rx_tail = 0;
+       rxq->expected_gen_id = 1;
+}
+
+static inline void
+reset_split_rx_bufq(struct idpf_rx_queue *rxq)
+{
+       uint16_t len;
+       uint32_t i;
+
+       if (!rxq)
+               return;
+
+       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+       for (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);
+            i++)
+               ((volatile char *)rxq->rx_ring)[i] = 0;
+
+       memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+       for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+               rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+       /* The next descriptor id which can be received. */
+       rxq->rx_next_avail = 0;
+
+       /* The next descriptor id which can be refilled. */
+       rxq->rx_tail = 0;
+       /* The number of descriptors which can be refilled. */
+       rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
+
+       rxq->bufq1 = NULL;
+       rxq->bufq2 = NULL;
+}
+
+static inline void
+reset_split_rx_queue(struct idpf_rx_queue *rxq)
+{
+       reset_split_rx_descq(rxq);
+       reset_split_rx_bufq(rxq->bufq1);
+       reset_split_rx_bufq(rxq->bufq2);
+}
+
+static inline void
+reset_single_rx_queue(struct idpf_rx_queue *rxq)
+{
+       uint16_t len;
+       uint32_t i;
+
+       if (!rxq)
+               return;
+
+       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+       for (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);
+            i++)
+               ((volatile char *)rxq->rx_ring)[i] = 0;
+
+       memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+       for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+               rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+       rxq->rx_tail = 0;
+       rxq->nb_rx_hold = 0;
+
+       if (rxq->pkt_first_seg != NULL)
+               rte_pktmbuf_free(rxq->pkt_first_seg);
+
+       rxq->pkt_first_seg = NULL;
+       rxq->pkt_last_seg = NULL;
+}
+
+static inline void
+reset_split_tx_descq(struct idpf_tx_queue *txq)
+{
+       struct idpf_tx_entry *txe;
+       uint32_t i, size;
+       uint16_t prev;
+
+       if (!txq) {
+               PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+               return;
+       }
+
+       size = sizeof(struct iecm_flex_tx_sched_desc) * txq->nb_tx_desc;
+       for (i = 0; i < size; i++)
+               ((volatile char *)txq->desc_ring)[i] = 0;
+
+       txe = txq->sw_ring;
+       prev = (uint16_t)(txq->sw_nb_desc - 1);
+       for (i = 0; i < txq->sw_nb_desc; i++) {
+               txe[i].mbuf = NULL;
+               txe[i].last_id = i;
+               txe[prev].next_id = i;
+               prev = i;
+       }
+
+       txq->tx_tail = 0;
+       txq->nb_used = 0;
+
+       /* Use this as next to clean for split desc queue */
+       txq->last_desc_cleaned = 0;
+       txq->sw_tail = 0;
+       txq->nb_free = txq->nb_tx_desc - 1;
+}
+
+static inline void
+reset_split_tx_complq(struct idpf_tx_queue *cq)
+{
+       uint32_t i, size;
+
+       if (!cq) {
+               PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+               return;
+       }
+
+       size = sizeof(struct iecm_splitq_tx_compl_desc) * cq->nb_tx_desc;
+       for (i = 0; i < size; i++)
+               ((volatile char *)cq->compl_ring)[i] = 0;
+
+       cq->tx_tail = 0;
+       cq->expected_gen_id = 1;
+}
+
+static inline void
+reset_single_tx_queue(struct idpf_tx_queue *txq)
+{
+       struct idpf_tx_entry *txe;
+       uint32_t i, size;
+       uint16_t prev;
+
+       if (!txq) {
+               PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+               return;
+       }
+
+       txe = txq->sw_ring;
+       size = sizeof(struct iecm_base_tx_desc) * txq->nb_tx_desc;
+       for (i = 0; i < size; i++)
+               ((volatile char *)txq->tx_ring)[i] = 0;
+
+       prev = (uint16_t)(txq->nb_tx_desc - 1);
+       for (i = 0; i < txq->nb_tx_desc; i++) {
+               txq->tx_ring[i].qw1 =
+                       rte_cpu_to_le_64(IECM_TX_DESC_DTYPE_DESC_DONE);
+               txe[i].mbuf =  NULL;
+               txe[i].last_id = i;
+               txe[prev].next_id = i;
+               prev = i;
+       }
+
+       txq->tx_tail = 0;
+       txq->nb_used = 0;
+
+       txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+       txq->nb_free = txq->nb_tx_desc - 1;
+
+       txq->next_dd = txq->rs_thresh - 1;
+       txq->next_rs = txq->rs_thresh - 1;
+}
+
+static int
+idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+                        uint16_t queue_idx, uint16_t rx_free_thresh,
+                        uint16_t nb_desc, unsigned int socket_id,
+                        const struct rte_eth_rxconf *rx_conf,
+                        struct rte_mempool *mp)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       struct iecm_hw *hw = &adapter->hw;
+       const struct rte_memzone *mz;
+       uint32_t ring_size;
+       uint16_t len;
+
+       bufq->mp = mp;
+       bufq->nb_rx_desc = nb_desc;
+       bufq->rx_free_thresh = rx_free_thresh;
+       bufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;
+       bufq->port_id = dev->data->port_id;
+       bufq->rx_deferred_start = rx_conf->rx_deferred_start;
+       bufq->rx_hdr_len = 0;
+       bufq->adapter = adapter;
+
+       if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+               bufq->crc_len = RTE_ETHER_CRC_LEN;
+       else
+               bufq->crc_len = 0;
+
+       len = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;
+       bufq->rx_buf_len = len;
+
+       /* Allocate the software ring. */
+       len = nb_desc + IDPF_RX_MAX_BURST;
+       bufq->sw_ring =
+               rte_zmalloc_socket("idpf rx bufq sw ring",
+                                  sizeof(struct rte_mbuf *) * len,
+                                  RTE_CACHE_LINE_SIZE,
+                                  socket_id);
+       if (!bufq->sw_ring) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+               return -ENOMEM;
+       }
+
+       /* Allocate a liitle more to support bulk allocate. */
+       len = nb_desc + IDPF_RX_MAX_BURST;
+       ring_size = RTE_ALIGN(len *
+                             sizeof(struct virtchnl2_splitq_rx_buf_desc),
+                             IDPF_DMA_MEM_ALIGN);
+       mz = rte_eth_dma_zone_reserve(dev, "rx_buf_ring", queue_idx,
+                                     ring_size, IDPF_RING_BASE_ALIGN,
+                                     socket_id);
+       if (!mz) {
+               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer 
queue.");
+               rte_free(bufq->sw_ring);
+               return -ENOMEM;
+       }
+
+       /* Zero all the descriptors in the ring. */
+       memset(mz->addr, 0, ring_size);
+       bufq->rx_ring_phys_addr = mz->iova;
+       bufq->rx_ring = mz->addr;
+
+       bufq->mz = mz;
+       reset_split_rx_bufq(bufq);
+       bufq->q_set = true;
+       bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
+                        queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
+       bufq->ops = &def_rxq_ops;
+
+       /* TODO: allow bulk or vec */
+
+       return 0;
+}
+
+static int
+idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                         uint16_t nb_desc, unsigned int socket_id,
+                         const struct rte_eth_rxconf *rx_conf,
+                         struct rte_mempool *mp)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       struct idpf_rx_queue *rxq;
+       struct idpf_rx_queue *bufq1, *bufq2;
+       const struct rte_memzone *mz;
+       uint16_t rx_free_thresh;
+       uint32_t ring_size;
+       uint16_t qid;
+       uint16_t len;
+       int ret;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+           nb_desc > IDPF_MAX_RING_DESC ||
+           nb_desc < IDPF_MIN_RING_DESC) {
+               PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is 
invalid", nb_desc);
+               return -EINVAL;
+       }
+
+       /* Check free threshold */
+       rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+               IDPF_DEFAULT_RX_FREE_THRESH :
+               rx_conf->rx_free_thresh;
+       if (check_rx_thresh(nb_desc, rx_free_thresh))
+               return -EINVAL;
+
+       /* Free memory if needed */
+       if (dev->data->rx_queues[queue_idx]) {
+               idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               dev->data->rx_queues[queue_idx] = NULL;
+       }
+
+       /* Setup Rx description queue */
+       rxq = rte_zmalloc_socket("idpf rxq",
+                                sizeof(struct idpf_rx_queue),
+                                RTE_CACHE_LINE_SIZE,
+                                socket_id);
+       if (!rxq) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data 
structure");
+               return -ENOMEM;
+       }
+
+       rxq->mp = mp;
+       rxq->nb_rx_desc = nb_desc;
+       rxq->rx_free_thresh = rx_free_thresh;
+       rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
+       rxq->port_id = dev->data->port_id;
+       rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+       rxq->rx_hdr_len = 0;
+       rxq->adapter = adapter;
+
+       if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+               rxq->crc_len = RTE_ETHER_CRC_LEN;
+       else
+               rxq->crc_len = 0;
+
+       len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+       rxq->rx_buf_len = len;
+
+       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+       ring_size = RTE_ALIGN(len *
+                             sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),
+                             IDPF_DMA_MEM_ALIGN);
+       mz = rte_eth_dma_zone_reserve(dev, "rx_cpmpl_ring", queue_idx,
+                                     ring_size, IDPF_RING_BASE_ALIGN,
+                                     socket_id);
+
+       if (!mz) {
+               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
+               ret = -ENOMEM;
+               goto free_rxq;
+       }
+
+       /* Zero all the descriptors in the ring. */
+       memset(mz->addr, 0, ring_size);
+       rxq->rx_ring_phys_addr = mz->iova;
+       rxq->rx_ring = mz->addr;
+
+       rxq->mz = mz;
+       reset_split_rx_descq(rxq);
+       rxq->q_set = true;
+       dev->data->rx_queues[queue_idx] = rxq;
+
+       /* TODO: allow bulk or vec */
+
+       /* setup Rx buffer queue */
+       bufq1 = rte_zmalloc_socket("idpf bufq1",
+                                  sizeof(struct idpf_rx_queue),
+                                  RTE_CACHE_LINE_SIZE,
+                                  socket_id);
+       if (!bufq1) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer 
queue 1.");
+               ret = -ENOMEM;
+               goto free_mz;
+       }
+       qid = 2 * queue_idx;
+       ret = idpf_rx_split_bufq_setup(dev, bufq1, qid, rx_free_thresh,
+                                      nb_desc, socket_id, rx_conf, mp);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to setup buffer queue 1");
+               ret = -EINVAL;
+               goto free_bufq1;
+       }
+       rxq->bufq1 = bufq1;
+
+       bufq2 = rte_zmalloc_socket("idpf bufq2",
+                                  sizeof(struct idpf_rx_queue),
+                                  RTE_CACHE_LINE_SIZE,
+                                  socket_id);
+       if (!bufq2) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer 
queue 2.");
+               rte_free(bufq1->sw_ring);
+               rte_memzone_free(bufq1->mz);
+               ret = -ENOMEM;
+               goto free_bufq1;
+       }
+       qid = 2 * queue_idx + 1;
+       ret = idpf_rx_split_bufq_setup(dev, bufq2, qid, rx_free_thresh,
+                                      nb_desc, socket_id, rx_conf, mp);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Failed to setup buffer queue 2");
+               rte_free(bufq1->sw_ring);
+               rte_memzone_free(bufq1->mz);
+               ret = -EINVAL;
+               goto free_bufq2;
+       }
+       rxq->bufq2 = bufq2;
+
+       return 0;
+
+free_bufq2:
+       rte_free(bufq2);
+free_bufq1:
+       rte_free(bufq1);
+free_mz:
+       rte_memzone_free(mz);
+free_rxq:
+       rte_free(rxq);
+
+       return ret;
+}
+
+static int
+idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                          uint16_t nb_desc, unsigned int socket_id,
+                          const struct rte_eth_rxconf *rx_conf,
+                          struct rte_mempool *mp)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       struct iecm_hw *hw = &adapter->hw;
+       struct idpf_rx_queue *rxq;
+       const struct rte_memzone *mz;
+       uint16_t rx_free_thresh;
+       uint32_t ring_size;
+       uint16_t len;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+           nb_desc > IDPF_MAX_RING_DESC ||
+           nb_desc < IDPF_MIN_RING_DESC) {
+               PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is 
invalid",
+                            nb_desc);
+               return -EINVAL;
+       }
+
+       /* Check free threshold */
+       rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+               IDPF_DEFAULT_RX_FREE_THRESH :
+               rx_conf->rx_free_thresh;
+       if (check_rx_thresh(nb_desc, rx_free_thresh))
+               return -EINVAL;
+
+       /* Free memory if needed */
+       if (dev->data->rx_queues[queue_idx]) {
+               idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               dev->data->rx_queues[queue_idx] = NULL;
+       }
+
+       /* Setup Rx description queue */
+       rxq = rte_zmalloc_socket("idpf rxq",
+                                sizeof(struct idpf_rx_queue),
+                                RTE_CACHE_LINE_SIZE,
+                                socket_id);
+       if (!rxq) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data 
structure");
+               return -ENOMEM;
+       }
+
+       rxq->mp = mp;
+       rxq->nb_rx_desc = nb_desc;
+       rxq->rx_free_thresh = rx_free_thresh;
+       rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
+       rxq->port_id = dev->data->port_id;
+       rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+       rxq->rx_hdr_len = 0;
+       rxq->adapter = adapter;
+
+       if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+               rxq->crc_len = RTE_ETHER_CRC_LEN;
+       else
+               rxq->crc_len = 0;
+
+       len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+       rxq->rx_buf_len = len;
+
+       len = nb_desc + IDPF_RX_MAX_BURST;
+       rxq->sw_ring =
+               rte_zmalloc_socket("idpf rxq sw ring",
+                                  sizeof(struct rte_mbuf *) * len,
+                                  RTE_CACHE_LINE_SIZE,
+                                  socket_id);
+       if (!rxq->sw_ring) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+               rte_free(rxq);
+               return -ENOMEM;
+       }
+
+       /* Allocate a liitle more to support bulk allocate. */
+       len = nb_desc + IDPF_RX_MAX_BURST;
+       ring_size = RTE_ALIGN(len *
+                             sizeof(struct virtchnl2_singleq_rx_buf_desc),
+                             IDPF_DMA_MEM_ALIGN);
+       mz = rte_eth_dma_zone_reserve(dev, "rx ring", queue_idx,
+                                     ring_size, IDPF_RING_BASE_ALIGN,
+                                     socket_id);
+       if (!mz) {
+               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer 
queue.");
+               rte_free(rxq->sw_ring);
+               rte_free(rxq);
+               return -ENOMEM;
+       }
+
+       /* Zero all the descriptors in the ring. */
+       memset(mz->addr, 0, ring_size);
+       rxq->rx_ring_phys_addr = mz->iova;
+       rxq->rx_ring = mz->addr;
+
+       rxq->mz = mz;
+       reset_single_rx_queue(rxq);
+       rxq->q_set = true;
+       dev->data->rx_queues[queue_idx] = rxq;
+       rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
+                       queue_idx * vport->chunks_info.rx_qtail_spacing);
+       rxq->ops = &def_rxq_ops;
+
+       return 0;
+}
+
+int
+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                   uint16_t nb_desc, unsigned int socket_id,
+                   const struct rte_eth_rxconf *rx_conf,
+                   struct rte_mempool *mp)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+
+       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+               return idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,
+                                                 socket_id, rx_conf, mp);
+       else
+               return idpf_rx_split_queue_setup(dev, queue_idx, nb_desc,
+                                                socket_id, rx_conf, mp);
+}
+
+static int
+idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                         uint16_t nb_desc, unsigned int socket_id,
+                         const struct rte_eth_txconf *tx_conf)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       struct iecm_hw *hw = &adapter->hw;
+       struct idpf_tx_queue *txq, *cq;
+       const struct rte_memzone *mz;
+       uint32_t ring_size;
+       uint16_t tx_rs_thresh, tx_free_thresh;
+       uint64_t offloads;
+
+       PMD_INIT_FUNC_TRACE();
+
+       offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+       if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+           nb_desc > IDPF_MAX_RING_DESC ||
+           nb_desc < IDPF_MIN_RING_DESC) {
+               PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is 
invalid",
+                            nb_desc);
+               return -EINVAL;
+       }
+
+       tx_rs_thresh = IDPF_DEFAULT_TX_RS_THRESH;
+       tx_free_thresh = IDPF_DEFAULT_TX_FREE_THRESH;
+       if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh))
+               return -EINVAL;
+
+       /* Free memory if needed. */
+       if (dev->data->tx_queues[queue_idx]) {
+               idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               dev->data->tx_queues[queue_idx] = NULL;
+       }
+
+       /* Allocate the TX queue data structure. */
+       txq = rte_zmalloc_socket("idpf split txq",
+                                sizeof(struct idpf_tx_queue),
+                                RTE_CACHE_LINE_SIZE,
+                                socket_id);
+       if (!txq) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue 
structure");
+               return -ENOMEM;
+       }
+
+       txq->nb_tx_desc = nb_desc;
+       txq->rs_thresh = tx_rs_thresh;
+       txq->free_thresh = tx_free_thresh;
+       txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
+       txq->port_id = dev->data->port_id;
+       txq->offloads = offloads;
+       txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+       /* Allocate software ring */
+       txq->sw_nb_desc = 2 * nb_desc;
+       txq->sw_ring =
+               rte_zmalloc_socket("idpf split tx sw ring",
+                                  sizeof(struct idpf_tx_entry) *
+                                  txq->sw_nb_desc,
+                                  RTE_CACHE_LINE_SIZE,
+                                  socket_id);
+       if (!txq->sw_ring) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+               rte_free(txq);
+               return -ENOMEM;
+       }
+
+       /* Allocate TX hardware ring descriptors. */
+       ring_size = sizeof(struct iecm_flex_tx_sched_desc) * txq->nb_tx_desc;
+       ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
+       mz = rte_eth_dma_zone_reserve(dev, "split_tx_ring", queue_idx,
+                                     ring_size, IDPF_RING_BASE_ALIGN,
+                                     socket_id);
+       if (!mz) {
+               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+               rte_free(txq->sw_ring);
+               rte_free(txq);
+               return -ENOMEM;
+       }
+       txq->tx_ring_phys_addr = mz->iova;
+       txq->desc_ring = (struct iecm_flex_tx_sched_desc *)mz->addr;
+
+       txq->mz = mz;
+       reset_split_tx_descq(txq);
+       txq->q_set = true;
+       dev->data->tx_queues[queue_idx] = txq;
+       txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
+                       queue_idx * vport->chunks_info.tx_qtail_spacing);
+       txq->ops = &def_txq_ops;
+
+       /* Allocate the TX completion queue data structure. */
+       txq->complq = rte_zmalloc_socket("idpf splitq cq",
+                                        sizeof(struct idpf_tx_queue),
+                                        RTE_CACHE_LINE_SIZE,
+                                        socket_id);
+       cq = txq->complq;
+       if (!cq) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue 
structure");
+               return -ENOMEM;
+       }
+       cq->nb_tx_desc = 2 * nb_desc;
+       cq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx;
+       cq->port_id = dev->data->port_id;
+       cq->txqs = dev->data->tx_queues;
+       cq->tx_start_qid = vport->chunks_info.tx_start_qid;
+
+       ring_size = sizeof(struct iecm_splitq_tx_compl_desc) * cq->nb_tx_desc;
+       ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
+       mz = rte_eth_dma_zone_reserve(dev, "tx_split_compl_ring", queue_idx,
+                                     ring_size, IDPF_RING_BASE_ALIGN,
+                                     socket_id);
+       if (!mz) {
+               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX 
completion queue");
+               rte_free(txq->sw_ring);
+               rte_free(txq);
+               return -ENOMEM;
+       }
+       cq->tx_ring_phys_addr = mz->iova;
+       cq->compl_ring = (struct iecm_splitq_tx_compl_desc *)mz->addr;
+       cq->mz = mz;
+       reset_split_tx_complq(cq);
+
+       return 0;
+}
+
+static int
+idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                          uint16_t nb_desc, unsigned int socket_id,
+                          const struct rte_eth_txconf *tx_conf)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       struct iecm_hw *hw = &adapter->hw;
+       struct idpf_tx_queue *txq;
+       const struct rte_memzone *mz;
+       uint32_t ring_size;
+       uint16_t tx_rs_thresh, tx_free_thresh;
+       uint64_t offloads;
+
+       PMD_INIT_FUNC_TRACE();
+
+       offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+       if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+           nb_desc > IDPF_MAX_RING_DESC ||
+           nb_desc < IDPF_MIN_RING_DESC) {
+               PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is 
invalid",
+                            nb_desc);
+               return -EINVAL;
+       }
+
+       tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+               tx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);
+       tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+               tx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);
+       check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
+
+       /* Free memory if needed. */
+       if (dev->data->tx_queues[queue_idx]) {
+               idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               dev->data->tx_queues[queue_idx] = NULL;
+       }
+
+       /* Allocate the TX queue data structure. */
+       txq = rte_zmalloc_socket("idpf txq",
+                                sizeof(struct idpf_tx_queue),
+                                RTE_CACHE_LINE_SIZE,
+                                socket_id);
+       if (!txq) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue 
structure");
+               return -ENOMEM;
+       }
+
+       /* TODO: vlan offload */
+
+       txq->nb_tx_desc = nb_desc;
+       txq->rs_thresh = tx_rs_thresh;
+       txq->free_thresh = tx_free_thresh;
+       txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
+       txq->port_id = dev->data->port_id;
+       txq->offloads = offloads;
+       txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+       /* Allocate software ring */
+       txq->sw_ring =
+               rte_zmalloc_socket("idpf tx sw ring",
+                                  sizeof(struct idpf_tx_entry) * nb_desc,
+                                  RTE_CACHE_LINE_SIZE,
+                                  socket_id);
+       if (!txq->sw_ring) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+               rte_free(txq);
+               return -ENOMEM;
+       }
+
+       /* Allocate TX hardware ring descriptors. */
+       ring_size = sizeof(struct iecm_base_tx_desc) * nb_desc;
+       ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
+       mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+                                     ring_size, IDPF_RING_BASE_ALIGN,
+                                     socket_id);
+       if (!mz) {
+               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+               rte_free(txq->sw_ring);
+               rte_free(txq);
+               return -ENOMEM;
+       }
+
+       txq->tx_ring_phys_addr = mz->iova;
+       txq->tx_ring = (struct iecm_base_tx_desc *)mz->addr;
+
+       txq->mz = mz;
+       reset_single_tx_queue(txq);
+       txq->q_set = true;
+       dev->data->tx_queues[queue_idx] = txq;
+       txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
+                       queue_idx * vport->chunks_info.tx_qtail_spacing);
+       txq->ops = &def_txq_ops;
+
+       return 0;
+}
+
+int
+idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                   uint16_t nb_desc, unsigned int socket_id,
+                   const struct rte_eth_txconf *tx_conf)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+               return idpf_tx_single_queue_setup(dev, queue_idx, nb_desc,
+                                                 socket_id, tx_conf);
+       else
+               return idpf_tx_split_queue_setup(dev, queue_idx, nb_desc,
+                                                socket_id, tx_conf);
+}
+
+static int
+idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+       volatile struct virtchnl2_singleq_rx_buf_desc *rxd;
+       struct rte_mbuf *mbuf = NULL;
+       uint64_t dma_addr;
+       uint16_t i;
+
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               mbuf = rte_mbuf_raw_alloc(rxq->mp);
+               if (unlikely(!mbuf)) {
+                       PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+                       return -ENOMEM;
+               }
+
+               rte_mbuf_refcnt_set(mbuf, 1);
+               mbuf->next = NULL;
+               mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+               mbuf->nb_segs = 1;
+               mbuf->port = rxq->port_id;
+
+               dma_addr =
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+               rxd = &((volatile struct virtchnl2_singleq_rx_buf_desc 
*)(rxq->rx_ring))[i];
+               rxd->pkt_addr = dma_addr;
+               rxd->hdr_addr = 0;
+#ifndef RTE_LIBRTE_IDPF_16BYTE_RX_DESC
+               rxd->rsvd1 = 0;
+               rxd->rsvd2 = 0;
+#endif
+
+               rxq->sw_ring[i] = mbuf;
+       }
+
+       return 0;
+}
+
+static int
+idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+       volatile struct virtchnl2_splitq_rx_buf_desc *rxd;
+       struct rte_mbuf *mbuf = NULL;
+       uint64_t dma_addr;
+       uint16_t i;
+
+       for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
+               mbuf = rte_mbuf_raw_alloc(rxq->mp);
+               if (unlikely(!mbuf)) {
+                       PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+                       return -ENOMEM;
+               }
+
+               rte_mbuf_refcnt_set(mbuf, 1);
+               mbuf->next = NULL;
+               mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+               mbuf->nb_segs = 1;
+               mbuf->port = rxq->port_id;
+
+               dma_addr =
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+               rxd = &((volatile struct virtchnl2_splitq_rx_buf_desc 
*)(rxq->rx_ring))[i];
+               rxd->qword0.buf_id = i;
+               rxd->qword0.rsvd0 = 0;
+               rxd->qword0.rsvd1 = 0;
+               rxd->pkt_addr = dma_addr;
+               rxd->hdr_addr = 0;
+               rxd->rsvd2 = 0;
+
+               rxq->sw_ring[i] = mbuf;
+       }
+
+       rxq->nb_rx_hold = 0;
+       rxq->rx_tail = rxq->nb_rx_desc - 1;
+
+       return 0;
+}
+
+int
+idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct idpf_rx_queue *rxq;
+       int err;
+
+       if (rx_queue_id >= dev->data->nb_rx_queues)
+               return -EINVAL;
+
+       rxq = dev->data->rx_queues[rx_queue_id];
+
+       if (!rxq->bufq1) {
+               /* Single queue */
+               err = idpf_alloc_single_rxq_mbufs(rxq);
+               if (err) {
+                       PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+                       return err;
+               }
+
+               rte_wmb();
+
+               /* Init the RX tail register. */
+               IECM_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+       } else {
+               /* Split queue */
+               err = idpf_alloc_split_rxq_mbufs(rxq->bufq1);
+               if (err) {
+                       PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue 
mbuf");
+                       return err;
+               }
+               err = idpf_alloc_split_rxq_mbufs(rxq->bufq2);
+               if (err) {
+                       PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue 
mbuf");
+                       return err;
+               }
+
+               rte_wmb();
+
+               /* Init the RX tail register. */
+               IECM_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->nb_rx_desc - 1);
+               IECM_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->nb_rx_desc - 1);
+       }
+
+       return err;
+}
+
+int
+idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       int err = 0;
+
+       PMD_DRV_FUNC_TRACE();
+
+       err = idpf_rx_queue_init(dev, rx_queue_id);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to init RX queue %u",
+                           rx_queue_id);
+               return err;
+       }
+
+       /* Ready to switch the queue on */
+       err = idpf_switch_queue(vport, rx_queue_id, true, true);
+       if (err)
+               PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+                           rx_queue_id);
+       else
+               dev->data->rx_queue_state[rx_queue_id] =
+                       RTE_ETH_QUEUE_STATE_STARTED;
+
+       return err;
+}
+
+int
+idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct idpf_tx_queue *txq;
+
+       if (tx_queue_id >= dev->data->nb_tx_queues)
+               return -EINVAL;
+
+       txq = dev->data->tx_queues[tx_queue_id];
+
+       /* Init the RX tail register. */
+       IECM_PCI_REG_WRITE(txq->qtx_tail, 0);
+
+       return 0;
+}
+
+int
+idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       int err = 0;
+
+       PMD_DRV_FUNC_TRACE();
+
+       err = idpf_tx_queue_init(dev, tx_queue_id);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to init TX queue %u",
+                           tx_queue_id);
+               return err;
+       }
+
+       /* Ready to switch the queue on */
+       err = idpf_switch_queue(vport, tx_queue_id, false, true);
+       if (err)
+               PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+                           tx_queue_id);
+       else
+               dev->data->tx_queue_state[tx_queue_id] =
+                       RTE_ETH_QUEUE_STATE_STARTED;
+
+       return err;
+}
+
+int
+idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       struct idpf_rx_queue *rxq;
+       int err;
+
+       PMD_DRV_FUNC_TRACE();
+
+       if (rx_queue_id >= dev->data->nb_rx_queues)
+               return -EINVAL;
+
+       err = idpf_switch_queue(vport, rx_queue_id, true, false);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+                           rx_queue_id);
+               return err;
+       }
+
+       rxq = dev->data->rx_queues[rx_queue_id];
+       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+               rxq->ops->release_mbufs(rxq);
+               reset_single_rx_queue(rxq);
+       } else {
+               rxq->bufq1->ops->release_mbufs(rxq->bufq1);
+               rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+               reset_split_rx_queue(rxq);
+       }
+       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+}
+
+int
+idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       struct idpf_tx_queue *txq;
+       int err;
+
+       PMD_DRV_FUNC_TRACE();
+
+       if (tx_queue_id >= dev->data->nb_tx_queues)
+               return -EINVAL;
+
+       err = idpf_switch_queue(vport, tx_queue_id, false, false);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+                           tx_queue_id);
+               return err;
+       }
+
+       txq = dev->data->tx_queues[tx_queue_id];
+       txq->ops->release_mbufs(txq);
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+               reset_single_tx_queue(txq);
+       } else {
+               reset_split_tx_descq(txq);
+               reset_split_tx_complq(txq->complq);
+       }
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+}
+
+void
+idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       idpf_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       idpf_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
+void
+idpf_stop_queues(struct rte_eth_dev *dev)
+{
+       struct idpf_vport *vport =
+               (struct idpf_vport *)dev->data->dev_private;
+       struct idpf_rx_queue *rxq;
+       struct idpf_tx_queue *txq;
+       int ret, i;
+
+       /* Stop All queues */
+       ret = idpf_ena_dis_queues(vport, false);
+       if (ret)
+               PMD_DRV_LOG(WARNING, "Fail to stop queues");
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               if (!rxq)
+                       continue;
+               if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+                       rxq->ops->release_mbufs(rxq);
+                       reset_single_rx_queue(rxq);
+               } else {
+                       rxq->bufq1->ops->release_mbufs(rxq->bufq1);
+                       rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+                       reset_split_rx_queue(rxq);
+               }
+               dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+       }
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if (!txq)
+                       continue;
+               txq->ops->release_mbufs(txq);
+               if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+                       reset_split_tx_descq(txq);
+                       reset_split_tx_complq(txq->complq);
+               } else {
+                       reset_single_tx_queue(txq);
+               }
+               dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+       }
+}
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
new file mode 100644
index 0000000000..705f706890
--- /dev/null
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#ifndef _IDPF_RXTX_H_
+#define _IDPF_RXTX_H_
+
+#include "base/iecm_osdep.h"
+#include "base/iecm_type.h"
+#include "base/iecm_devids.h"
+#include "base/iecm_lan_txrx.h"
+#include "base/iecm_lan_pf_regs.h"
+#include "base/virtchnl.h"
+#include "base/virtchnl2.h"
+
+/* In QLEN must be whole number of 32 descriptors. */
+#define IDPF_ALIGN_RING_DESC   32
+#define IDPF_MIN_RING_DESC     32
+#define IDPF_MAX_RING_DESC     4096
+#define IDPF_DMA_MEM_ALIGN     4096
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define IDPF_RING_BASE_ALIGN   128
+
+/* used for Rx Bulk Allocate */
+#define IDPF_RX_MAX_BURST      32
+
+#define IDPF_DEFAULT_RX_FREE_THRESH    32
+
+
+#define IDPF_DEFAULT_TX_RS_THRESH      128
+#define IDPF_DEFAULT_TX_FREE_THRESH    128
+
+#define IDPF_MIN_TSO_MSS       256
+#define IDPF_MAX_TSO_MSS       9668
+#define IDPF_TSO_MAX_SEG       UINT8_MAX
+#define IDPF_TX_MAX_MTU_SEG     8
+
+struct idpf_rx_queue {
+       struct idpf_adapter *adapter;   /* the adapter this queue belongs to */
+       struct rte_mempool *mp;         /* mbuf pool to populate Rx ring */
+       const struct rte_memzone *mz;   /* memzone for Rx ring */
+       volatile void *rx_ring;
+       struct rte_mbuf **sw_ring;      /* address of SW ring */
+       uint64_t rx_ring_phys_addr;     /* Rx ring DMA address */
+
+       uint16_t nb_rx_desc;            /* ring length */
+       uint16_t rx_tail;               /* current value of tail */
+       volatile uint8_t *qrx_tail;     /* register address of tail */
+       uint16_t rx_free_thresh;        /* max free RX desc to hold */
+       uint16_t nb_rx_hold;            /* number of held free RX desc */
+       struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
+       struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
+       struct rte_mbuf fake_mbuf;      /* dummy mbuf */
+
+       /* for rx bulk */
+       uint16_t rx_nb_avail;      /* number of staged packets ready */
+       uint16_t rx_next_avail;    /* index of next staged packets */
+       uint16_t rx_free_trigger;  /* triggers rx buffer allocation */
+       struct rte_mbuf *rx_stage[IDPF_RX_MAX_BURST * 2]; /* store mbuf */
+
+       uint16_t port_id;       /* device port ID */
+       uint16_t queue_id;      /* Rx queue index */
+       uint16_t rx_buf_len;    /* The packet buffer size */
+       uint16_t rx_hdr_len;    /* The header buffer size */
+       uint16_t max_pkt_len;   /* Maximum packet length */
+       uint8_t crc_len;        /* 0 if CRC stripped, 4 otherwise */
+       uint8_t rxdid;
+
+       bool q_set;             /* if rx queue has been configured */
+       bool rx_deferred_start; /* don't start this queue in dev start */
+       const struct idpf_rxq_ops *ops;
+
+       /* only valid for split queue mode */
+       uint8_t expected_gen_id;
+       struct idpf_rx_queue *bufq1;
+       struct idpf_rx_queue *bufq2;
+};
+
+struct idpf_tx_entry {
+       struct rte_mbuf *mbuf;
+       uint16_t next_id;
+       uint16_t last_id;
+};
+
+/* Structure associated with each TX queue. */
+struct idpf_tx_queue {
+       const struct rte_memzone *mz;           /* memzone for Tx ring */
+       volatile struct iecm_base_tx_desc *tx_ring;     /* Tx ring virtual 
address */
+       volatile union {
+               struct iecm_flex_tx_sched_desc *desc_ring;
+               struct iecm_splitq_tx_compl_desc *compl_ring;
+       };
+       uint64_t tx_ring_phys_addr;             /* Tx ring DMA address */
+       struct idpf_tx_entry *sw_ring;          /* address array of SW ring */
+
+       uint16_t nb_tx_desc;            /* ring length */
+       uint16_t tx_tail;               /* current value of tail */
+       volatile uint8_t *qtx_tail;     /* register address of tail */
+       /* number of used desc since RS bit set */
+       uint16_t nb_used;
+       uint16_t nb_free;
+       uint16_t last_desc_cleaned;     /* last desc have been cleaned*/
+       uint16_t free_thresh;
+       uint16_t rs_thresh;
+
+       uint16_t port_id;
+       uint16_t queue_id;
+       uint64_t offloads;
+       uint16_t next_dd;       /* next to set RS, for VPMD */
+       uint16_t next_rs;       /* next to check DD,  for VPMD */
+
+       bool q_set;             /* if rx queue has been configured */
+       bool tx_deferred_start; /* don't start this queue in dev start */
+       const struct idpf_txq_ops *ops;
+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1       BIT(0)
+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2       BIT(1)
+       uint8_t vlan_flag;
+
+       /* only valid for split queue mode */
+       uint16_t sw_nb_desc;
+       uint16_t sw_tail;
+       void **txqs;
+       uint32_t tx_start_qid;
+       uint8_t expected_gen_id;
+       struct idpf_tx_queue *complq;
+};
+
+/* Offload features */
+union idpf_tx_offload {
+       uint64_t data;
+       struct {
+               uint64_t l2_len:7; /* L2 (MAC) Header Length. */
+               uint64_t l3_len:9; /* L3 (IP) Header Length. */
+               uint64_t l4_len:8; /* L4 Header Length. */
+               uint64_t tso_segsz:16; /* TCP TSO segment size */
+               /* uint64_t unused : 24; */
+       };
+};
+
+struct idpf_rxq_ops {
+       void (*release_mbufs)(struct idpf_rx_queue *rxq);
+};
+
+struct idpf_txq_ops {
+       void (*release_mbufs)(struct idpf_tx_queue *txq);
+};
+
+int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                       uint16_t nb_desc, unsigned int socket_id,
+                       const struct rte_eth_rxconf *rx_conf,
+                       struct rte_mempool *mp);
+int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+
+int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                       uint16_t nb_desc, unsigned int socket_id,
+                       const struct rte_eth_txconf *tx_conf);
+int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+
+void idpf_stop_queues(struct rte_eth_dev *dev);
+
+#endif /* _IDPF_RXTX_H_ */
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 77d77b82d8..74ed555449 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -21,6 +21,7 @@
 #include <rte_dev.h>
 
 #include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
 
 #include "base/iecm_prototype.h"
 
@@ -440,6 +441,347 @@ idpf_destroy_vport(struct idpf_vport *vport)
        return err;
 }
 
+#define IDPF_RX_BUF_STRIDE             64
+int
+idpf_config_rxqs(struct idpf_vport *vport)
+{
+       struct idpf_rx_queue **rxq =
+               (struct idpf_rx_queue **)vport->dev_data->rx_queues;
+       struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+       struct virtchnl2_rxq_info *rxq_info;
+       struct idpf_cmd_info args;
+       uint16_t total_qs, num_qs;
+       int size, err, i, j;
+       int k = 0;
+
+       total_qs = vport->num_rx_q + vport->num_rx_bufq;
+       while (total_qs) {
+               if (total_qs > adapter->max_rxq_per_msg) {
+                       num_qs = adapter->max_rxq_per_msg;
+                       total_qs -= adapter->max_rxq_per_msg;
+               } else {
+                       num_qs = total_qs;
+                       total_qs = 0;
+               }
+
+               size = sizeof(*vc_rxqs) + (num_qs - 1) *
+                       sizeof(struct virtchnl2_rxq_info);
+               vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+               if (vc_rxqs == NULL) {
+                       PMD_DRV_LOG(ERR, "Failed to allocate 
virtchnl2_config_rx_queues");
+                       err = -ENOMEM;
+                       break;
+               }
+               vc_rxqs->vport_id = vport->vport_id;
+               vc_rxqs->num_qinfo = num_qs;
+               if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+                       for (i = 0; i < num_qs; i++, k++) {
+                               rxq_info = &vc_rxqs->qinfo[i];
+                               rxq_info->dma_ring_addr = 
rxq[k]->rx_ring_phys_addr;
+                               rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+                               rxq_info->queue_id = rxq[k]->queue_id;
+                               rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+                               rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
+                               rxq_info->max_pkt_size =
+                                       vport->dev_data->dev_conf.rxmode.mtu;
+
+                               rxq_info->desc_ids = 
VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+                               rxq_info->qflags |= 
VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+                               rxq_info->ring_len = rxq[k]->nb_rx_desc;
+                       }
+               } else {
+                       for (i = 0; i < num_qs / 3; i++, k++) {
+                               /* Rx queue */
+                               rxq_info = &vc_rxqs->qinfo[i * 3];
+                               rxq_info->dma_ring_addr =
+                                       rxq[k]->rx_ring_phys_addr;
+                               rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+                               rxq_info->queue_id = rxq[k]->queue_id;
+                               rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+                               rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
+                               rxq_info->max_pkt_size =
+                                       vport->dev_data->dev_conf.rxmode.mtu;
+
+                               rxq_info->desc_ids = 
VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+                               rxq_info->qflags |= 
VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+                               rxq_info->ring_len = rxq[k]->nb_rx_desc;
+                               rxq_info->rx_bufq1_id = rxq[k]->bufq1->queue_id;
+                               rxq_info->rx_bufq2_id = rxq[k]->bufq2->queue_id;
+                               rxq_info->rx_buffer_low_watermark = 64;
+
+                               /* Buffer queue */
+                               for (j = 1; j <= IDPF_RX_BUFQ_PER_GRP; j++) {
+                                       struct idpf_rx_queue *bufq = j == 1 ?
+                                               rxq[k]->bufq1 : rxq[k]->bufq2;
+                                       rxq_info = &vc_rxqs->qinfo[i * 3 + j];
+                                       rxq_info->dma_ring_addr =
+                                               bufq->rx_ring_phys_addr;
+                                       rxq_info->type =
+                                               VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+                                       rxq_info->queue_id = bufq->queue_id;
+                                       rxq_info->model = 
VIRTCHNL2_QUEUE_MODEL_SPLIT;
+                                       rxq_info->data_buffer_size = 
bufq->rx_buf_len;
+                                       rxq_info->desc_ids =
+                                               VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+                                       rxq_info->ring_len = bufq->nb_rx_desc;
+
+                                       rxq_info->buffer_notif_stride =
+                                               IDPF_RX_BUF_STRIDE;
+                                       rxq_info->rx_buffer_low_watermark = 64;
+                               }
+                       }
+               }
+               memset(&args, 0, sizeof(args));
+               args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+               args.in_args = (uint8_t *)vc_rxqs;
+               args.in_args_size = size;
+               args.out_buffer = adapter->mbx_resp;
+               args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+               err = idpf_execute_vc_cmd(adapter, &args);
+               rte_free(vc_rxqs);
+               if (err) {
+                       PMD_DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+                       break;
+               }
+       }
+
+       return err;
+}
+
+int
+idpf_config_txqs(struct idpf_vport *vport)
+{
+       struct idpf_tx_queue **txq =
+               (struct idpf_tx_queue **)vport->dev_data->tx_queues;
+       struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+       struct virtchnl2_txq_info *txq_info;
+       struct idpf_cmd_info args;
+       uint16_t total_qs, num_qs;
+       int size, err, i;
+       int k = 0;
+
+       total_qs = vport->num_tx_q + vport->num_tx_complq;
+       while (total_qs) {
+               if (total_qs > adapter->max_txq_per_msg) {
+                       num_qs = adapter->max_txq_per_msg;
+                       total_qs -= adapter->max_txq_per_msg;
+               } else {
+                       num_qs = total_qs;
+                       total_qs = 0;
+               }
+               size = sizeof(*vc_txqs) + (num_qs - 1) *
+                       sizeof(struct virtchnl2_txq_info);
+               vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+               if (vc_txqs == NULL) {
+                       PMD_DRV_LOG(ERR, "Failed to allocate 
virtchnl2_config_tx_queues");
+                       err = -ENOMEM;
+                       break;
+               }
+               vc_txqs->vport_id = vport->vport_id;
+               vc_txqs->num_qinfo = num_qs;
+               if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+                       for (i = 0; i < num_qs; i++, k++) {
+                               txq_info = &vc_txqs->qinfo[i];
+                               txq_info->dma_ring_addr = 
txq[k]->tx_ring_phys_addr;
+                               txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+                               txq_info->queue_id = txq[k]->queue_id;
+                               txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+                               txq_info->sched_mode = 
VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+                               txq_info->ring_len = txq[k]->nb_tx_desc;
+                       }
+               } else {
+                       for (i = 0; i < num_qs / 2; i++, k++) {
+                               /* txq info */
+                               txq_info = &vc_txqs->qinfo[2 * i];
+                               txq_info->dma_ring_addr = 
txq[k]->tx_ring_phys_addr;
+                               txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+                               txq_info->queue_id = txq[k]->queue_id;
+                               txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+                               txq_info->sched_mode = 
VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+                               txq_info->ring_len = txq[k]->nb_tx_desc;
+                               txq_info->tx_compl_queue_id =
+                                       txq[k]->complq->queue_id;
+
+                               /* tx completion queue info */
+                               txq_info = &vc_txqs->qinfo[2 * i + 1];
+                               txq_info->dma_ring_addr =
+                                       txq[k]->complq->tx_ring_phys_addr;
+                               txq_info->type = 
VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+                               txq_info->queue_id = txq[k]->complq->queue_id;
+                               txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+                               txq_info->sched_mode = 
VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+                               txq_info->ring_len = txq[k]->complq->nb_tx_desc;
+                       }
+               }
+
+               memset(&args, 0, sizeof(args));
+               args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+               args.in_args = (uint8_t *)vc_txqs;
+               args.in_args_size = size;
+               args.out_buffer = adapter->mbx_resp;
+               args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+               err = idpf_execute_vc_cmd(adapter, &args);
+               rte_free(vc_txqs);
+               if (err) {
+                       PMD_DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+                       break;
+               }
+       }
+
+       return err;
+}
+
+static int
+idpf_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+                      uint32_t type, bool on)
+{
+       struct virtchnl2_del_ena_dis_queues *queue_select;
+       struct virtchnl2_queue_chunk *queue_chunk;
+       struct idpf_cmd_info args;
+       int err, len;
+
+       len = sizeof(struct virtchnl2_del_ena_dis_queues);
+       queue_select = rte_zmalloc("queue_select", len, 0);
+       if (!queue_select)
+               return -ENOMEM;
+
+       queue_chunk = queue_select->chunks.chunks;
+       queue_select->chunks.num_chunks = 1;
+       queue_select->vport_id = vport->vport_id;
+
+       queue_chunk->type = type;
+       queue_chunk->start_queue_id = qid;
+       queue_chunk->num_queues = 1;
+
+       args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES :
+               VIRTCHNL2_OP_DISABLE_QUEUES;
+       args.in_args = (u8 *)queue_select;
+       args.in_args_size = len;
+       args.out_buffer = adapter->mbx_resp;
+       args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+       err = idpf_execute_vc_cmd(adapter, &args);
+       if (err)
+               PMD_DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_%s_QUEUES",
+                           on ? "ENABLE" : "DISABLE");
+
+       rte_free(queue_select);
+       return err;
+}
+
+int
+idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
+                bool rx, bool on)
+{
+       uint32_t type;
+       int err, queue_id;
+
+       /* switch txq/rxq */
+       type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+       if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+               queue_id = vport->chunks_info.rx_start_qid + qid;
+       else
+               queue_id = vport->chunks_info.tx_start_qid + qid;
+       err = idpf_ena_dis_one_queue(vport, queue_id, type, on);
+       if (err)
+               return err;
+
+       /* switch tx completion queue */
+       if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+               type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+               queue_id = vport->chunks_info.tx_compl_start_qid + qid;
+               err = idpf_ena_dis_one_queue(vport, queue_id, type, on);
+               if (err)
+                       return err;
+       }
+
+       /* switch rx buffer queue */
+       if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+               type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+               queue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid;
+               err = idpf_ena_dis_one_queue(vport, queue_id, type, on);
+               if (err)
+                       return err;
+               queue_id++;
+               err = idpf_ena_dis_one_queue(vport, queue_id, type, on);
+               if (err)
+                       return err;
+       }
+
+       return err;
+}
+
+#define IDPF_RXTX_QUEUE_CHUNKS_NUM     2
+int idpf_ena_dis_queues(struct idpf_vport *vport, bool enable)
+{
+       struct virtchnl2_del_ena_dis_queues *queue_select;
+       struct virtchnl2_queue_chunk *queue_chunk;
+       uint32_t type;
+       struct idpf_cmd_info args;
+       uint16_t num_chunks;
+       int err, len;
+
+       num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+               num_chunks++;
+       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+               num_chunks++;
+
+       len = sizeof(struct virtchnl2_del_ena_dis_queues) +
+               sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);
+       queue_select = rte_zmalloc("queue_select", len, 0);
+       if (queue_select == NULL)
+               return -ENOMEM;
+
+       queue_chunk = queue_select->chunks.chunks;
+       queue_select->chunks.num_chunks = num_chunks;
+       queue_select->vport_id = vport->vport_id;
+
+       type = VIRTCHNL_QUEUE_TYPE_RX;
+       queue_chunk[type].type = type;
+       queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;
+       queue_chunk[type].num_queues = vport->num_rx_q;
+
+       type = VIRTCHNL2_QUEUE_TYPE_TX;
+       queue_chunk[type].type = type;
+       queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;
+       queue_chunk[type].num_queues = vport->num_tx_q;
+
+       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+               type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+               queue_chunk[type].type = type;
+               queue_chunk[type].start_queue_id =
+                       vport->chunks_info.rx_buf_start_qid;
+               queue_chunk[type].num_queues = vport->num_rx_bufq;
+       }
+
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+               type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+               queue_chunk[type].type = type;
+               queue_chunk[type].start_queue_id =
+                       vport->chunks_info.tx_compl_start_qid;
+               queue_chunk[type].num_queues = vport->num_tx_complq;
+       }
+
+       args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :
+               VIRTCHNL2_OP_DISABLE_QUEUES;
+       args.in_args = (u8 *)queue_select;
+       args.in_args_size = len;
+       args.out_buffer = adapter->mbx_resp;
+       args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+       err = idpf_execute_vc_cmd(adapter, &args);
+       if (err)
+               PMD_DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_%s_QUEUES",
+                           enable ? "ENABLE" : "DISABLE");
+
+       rte_free(queue_select);
+       return err;
+}
+
 int
 idpf_ena_dis_vport(struct idpf_vport *vport, bool enable)
 {
diff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build
index 262a7aa8c7..9bda251ead 100644
--- a/drivers/net/idpf/meson.build
+++ b/drivers/net/idpf/meson.build
@@ -12,6 +12,7 @@ objs = [base_objs]
 
 sources = files(
         'idpf_ethdev.c',
+       'idpf_rxtx.c',
        'idpf_vchnl.c',
 )
 
-- 
2.25.1

Reply via email to