The code is very similar, but the simple case can skip a few branches
in the hot path. This improves PPS when 10KB mbufs are used.

S/G is enabled on the Rx side by offload DEV_RX_OFFLOAD_SCATTER.
S/G is enabled on the Tx side by offload DEV_TX_OFFLOAD_MULTI_SEGS.

S/G is automatically enabled on the Rx side if the provided mbufs are
too small to hold the maximum possible frame.

To enable S/G in testpmd, add these args:
  --rx-offloads=0x2000 --tx-offloads=0x8000

Signed-off-by: Andrew Boyer <andrew.bo...@amd.com>
Signed-off-by: R Mohamed Shah <mohamedsha...@amd.com>
---
 doc/guides/rel_notes/release_22_11.rst |   1 +
 drivers/net/ionic/ionic_ethdev.c       |  25 +-
 drivers/net/ionic/ionic_lif.c          |  61 ++-
 drivers/net/ionic/ionic_lif.h          |   1 +
 drivers/net/ionic/ionic_rxtx.c         | 576 ++-----------------------
 drivers/net/ionic/ionic_rxtx.h         |  46 +-
 drivers/net/ionic/ionic_rxtx_sg.c      | 496 +++++++++++++++++++++
 drivers/net/ionic/ionic_rxtx_simple.c  | 417 ++++++++++++++++++
 drivers/net/ionic/meson.build          |   2 +
 9 files changed, 1054 insertions(+), 571 deletions(-)
 create mode 100644 drivers/net/ionic/ionic_rxtx_sg.c
 create mode 100644 drivers/net/ionic/ionic_rxtx_simple.c

diff --git a/doc/guides/rel_notes/release_22_11.rst 
b/doc/guides/rel_notes/release_22_11.rst
index 974400d0a6..9992236217 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -88,6 +88,7 @@ New Features
   * Added support for advertising packet types.
   * Added support for descriptor status functions.
   * Added Q-in-CMB feature controlled by devarg ionic_cmb.
+  * Added optimized handlers for non-scattered Rx and Tx.
 
 Removed Items
 -------------
diff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c
index 28297879cf..d29aa717e3 100644
--- a/drivers/net/ionic/ionic_ethdev.c
+++ b/drivers/net/ionic/ionic_ethdev.c
@@ -828,8 +828,6 @@ ionic_dev_configure(struct rte_eth_dev *eth_dev)
 
        ionic_lif_configure(lif);
 
-       ionic_lif_set_features(lif);
-
        return 0;
 }
 
@@ -883,6 +881,13 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
        if (dev_conf->lpbk_mode)
                IONIC_PRINT(WARNING, "Loopback mode not supported");
 
+       /* Re-set features in case SG flag was added in rx_queue_setup() */
+       err = ionic_lif_set_features(lif);
+       if (err) {
+               IONIC_PRINT(ERR, "Cannot set LIF features: %d", err);
+               return err;
+       }
+
        lif->frame_size = eth_dev->data->mtu + IONIC_ETH_OVERHEAD;
 
        err = ionic_lif_change_mtu(lif, eth_dev->data->mtu);
@@ -917,6 +922,18 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
                                speed);
        }
 
+       if (lif->hw_features & IONIC_ETH_HW_RX_SG)
+               eth_dev->rx_pkt_burst = &ionic_recv_pkts_sg;
+       else
+               eth_dev->rx_pkt_burst = &ionic_recv_pkts;
+
+       if (lif->hw_features & IONIC_ETH_HW_TX_SG)
+               eth_dev->tx_pkt_burst = &ionic_xmit_pkts_sg;
+       else
+               eth_dev->tx_pkt_burst = &ionic_xmit_pkts;
+
+       eth_dev->tx_pkt_prepare = &ionic_prep_pkts;
+
        ionic_dev_link_update(eth_dev, 0);
 
        return 0;
@@ -980,10 +997,6 @@ eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params)
        IONIC_PRINT_CALL();
 
        eth_dev->dev_ops = &ionic_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &ionic_recv_pkts;
-       eth_dev->tx_pkt_burst = &ionic_xmit_pkts;
-       eth_dev->tx_pkt_prepare = &ionic_prep_pkts;
-
        eth_dev->rx_descriptor_status = ionic_dev_rx_descriptor_status;
        eth_dev->tx_descriptor_status = ionic_dev_tx_descriptor_status;
 
diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c
index cf9605c791..affb6a44af 100644
--- a/drivers/net/ionic/ionic_lif.c
+++ b/drivers/net/ionic/ionic_lif.c
@@ -755,11 +755,10 @@ ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t 
socket_id, uint32_t index,
                struct ionic_rx_qcq **rxq_out)
 {
        struct ionic_rx_qcq *rxq;
-       uint16_t flags, seg_size, hdr_seg_size, max_segs, max_segs_fw;
+       uint16_t flags = 0, seg_size, hdr_seg_size, max_segs, max_segs_fw = 1;
        uint32_t max_mtu;
        int err;
 
-       flags = IONIC_QCQ_F_SG;
        if (lif->state & IONIC_LIF_F_Q_IN_CMB)
                flags |= IONIC_QCQ_F_CMB;
 
@@ -770,7 +769,18 @@ ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t 
socket_id, uint32_t index,
 
        max_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);
 
-       max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
+       /* If mbufs are too small to hold received packets, enable SG */
+       if (max_mtu > hdr_seg_size) {
+               IONIC_PRINT(NOTICE, "Enabling RX_OFFLOAD_SCATTER");
+               lif->eth_dev->data->dev_conf.rxmode.offloads |=
+                       RTE_ETH_RX_OFFLOAD_SCATTER;
+               ionic_lif_configure_rx_sg_offload(lif);
+       }
+
+       if (lif->features & IONIC_ETH_HW_RX_SG) {
+               flags |= IONIC_QCQ_F_SG;
+               max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
+       }
 
        /*
         * Calculate how many fragment pointers might be stored in queue.
@@ -820,14 +830,17 @@ ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t 
socket_id, uint32_t index,
                uint16_t ntxq_descs, struct ionic_tx_qcq **txq_out)
 {
        struct ionic_tx_qcq *txq;
-       uint16_t flags, num_segs_fw;
+       uint16_t flags = 0, num_segs_fw = 1;
        int err;
 
-       flags = IONIC_QCQ_F_SG;
+       if (lif->features & IONIC_ETH_HW_TX_SG) {
+               flags |= IONIC_QCQ_F_SG;
+               num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;
+       }
        if (lif->state & IONIC_LIF_F_Q_IN_CMB)
                flags |= IONIC_QCQ_F_CMB;
 
-       num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;
+       IONIC_PRINT(DEBUG, "txq %u num_segs %u", index, num_segs_fw);
 
        err = ionic_qcq_alloc(lif,
                IONIC_QTYPE_TXQ,
@@ -1561,8 +1574,7 @@ ionic_lif_txq_init(struct ionic_tx_qcq *txq)
                        .type = q->type,
                        .ver = lif->qtype_info[q->type].version,
                        .index = rte_cpu_to_le_32(q->index),
-                       .flags = rte_cpu_to_le_16(IONIC_QINIT_F_SG |
-                                               IONIC_QINIT_F_ENA),
+                       .flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
                        .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
                        .ring_size = rte_log2_u32(q->num_descs),
                        .ring_base = rte_cpu_to_le_64(q->base_pa),
@@ -1572,6 +1584,8 @@ ionic_lif_txq_init(struct ionic_tx_qcq *txq)
        };
        int err;
 
+       if (txq->flags & IONIC_QCQ_F_SG)
+               ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
        if (txq->flags & IONIC_QCQ_F_CMB)
                ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
 
@@ -1615,8 +1629,7 @@ ionic_lif_rxq_init(struct ionic_rx_qcq *rxq)
                        .type = q->type,
                        .ver = lif->qtype_info[q->type].version,
                        .index = rte_cpu_to_le_32(q->index),
-                       .flags = rte_cpu_to_le_16(IONIC_QINIT_F_SG |
-                                               IONIC_QINIT_F_ENA),
+                       .flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
                        .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
                        .ring_size = rte_log2_u32(q->num_descs),
                        .ring_base = rte_cpu_to_le_64(q->base_pa),
@@ -1626,6 +1639,8 @@ ionic_lif_rxq_init(struct ionic_rx_qcq *rxq)
        };
        int err;
 
+       if (rxq->flags & IONIC_QCQ_F_SG)
+               ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
        if (rxq->flags & IONIC_QCQ_F_CMB)
                ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
 
@@ -1791,6 +1806,20 @@ ionic_lif_configure_vlan_offload(struct ionic_lif *lif, 
int mask)
        }
 }
 
+void
+ionic_lif_configure_rx_sg_offload(struct ionic_lif *lif)
+{
+       struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
+
+       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
+               lif->features |= IONIC_ETH_HW_RX_SG;
+               lif->eth_dev->data->scattered_rx = 1;
+       } else {
+               lif->features &= ~IONIC_ETH_HW_RX_SG;
+               lif->eth_dev->data->scattered_rx = 0;
+       }
+}
+
 void
 ionic_lif_configure(struct ionic_lif *lif)
 {
@@ -1836,13 +1865,11 @@ ionic_lif_configure(struct ionic_lif *lif)
        else
                lif->features &= ~IONIC_ETH_HW_RX_CSUM;
 
-       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
-               lif->features |= IONIC_ETH_HW_RX_SG;
-               lif->eth_dev->data->scattered_rx = 1;
-       } else {
-               lif->features &= ~IONIC_ETH_HW_RX_SG;
-               lif->eth_dev->data->scattered_rx = 0;
-       }
+       /*
+        * NB: RX_SG may be enabled later during rx_queue_setup() if
+        * required by the mbuf/mtu configuration
+        */
+       ionic_lif_configure_rx_sg_offload(lif);
 
        /* Covers VLAN_STRIP */
        ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
diff --git a/drivers/net/ionic/ionic_lif.h b/drivers/net/ionic/ionic_lif.h
index e4af138a51..2aa9f774ff 100644
--- a/drivers/net/ionic/ionic_lif.h
+++ b/drivers/net/ionic/ionic_lif.h
@@ -188,6 +188,7 @@ void ionic_lif_stop(struct ionic_lif *lif);
 
 void ionic_lif_configure(struct ionic_lif *lif);
 void ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask);
+void ionic_lif_configure_rx_sg_offload(struct ionic_lif *lif);
 void ionic_lif_reset(struct ionic_lif *lif);
 
 int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr);
diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c
index 9a346f4143..64733da535 100644
--- a/drivers/net/ionic/ionic_rxtx.c
+++ b/drivers/net/ionic/ionic_rxtx.c
@@ -2,50 +2,28 @@
  * Copyright 2018-2022 Advanced Micro Devices, Inc. All Rights Reserved.
  */
 
-#include <sys/queue.h>
 #include <stdio.h>
-#include <stdlib.h>
 #include <string.h>
 #include <errno.h>
 #include <stdint.h>
-#include <stdarg.h>
-#include <unistd.h>
-#include <inttypes.h>
 
-#include <rte_byteorder.h>
 #include <rte_common.h>
-#include <rte_cycles.h>
+#include <rte_byteorder.h>
+#include <rte_errno.h>
 #include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_interrupts.h>
-#include <rte_pci.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_mempool.h>
-#include <rte_malloc.h>
 #include <rte_mbuf.h>
 #include <rte_ether.h>
-#include <ethdev_driver.h>
-#include <rte_prefetch.h>
-#include <rte_udp.h>
-#include <rte_tcp.h>
-#include <rte_sctp.h>
-#include <rte_string_fns.h>
-#include <rte_errno.h>
 #include <rte_ip.h>
-#include <rte_net.h>
+#include <rte_tcp.h>
+#include <rte_ethdev.h>
+#include <ethdev_driver.h>
 
-#include "ionic_logs.h"
-#include "ionic_mac_api.h"
-#include "ionic_ethdev.h"
+#include "ionic.h"
+#include "ionic_dev.h"
 #include "ionic_lif.h"
+#include "ionic_ethdev.h"
 #include "ionic_rxtx.h"
+#include "ionic_logs.h"
 
 static void
 ionic_empty_array(void **array, uint32_t cnt, uint16_t idx)
@@ -103,60 +81,6 @@ ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t 
queue_id,
        qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
 }
 
-static __rte_always_inline void
-ionic_tx_flush(struct ionic_tx_qcq *txq)
-{
-       struct ionic_cq *cq = &txq->qcq.cq;
-       struct ionic_queue *q = &txq->qcq.q;
-       struct rte_mbuf *txm;
-       struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;
-       void **info;
-       uint32_t i;
-
-       cq_desc = &cq_desc_base[cq->tail_idx];
-
-       while (color_match(cq_desc->color, cq->done_color)) {
-               cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
-               if (cq->tail_idx == 0)
-                       cq->done_color = !cq->done_color;
-
-               /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */
-               if ((cq->tail_idx & 0x3) == 0)
-                       rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
-
-               while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {
-                       /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */
-                       rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2)));
-
-                       /* Prefetch next mbuf */
-                       void **next_info =
-                               IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1));
-                       if (next_info[0])
-                               rte_mbuf_prefetch_part2(next_info[0]);
-                       if (next_info[1])
-                               rte_mbuf_prefetch_part2(next_info[1]);
-
-                       info = IONIC_INFO_PTR(q, q->tail_idx);
-                       for (i = 0; i < q->num_segs; i++) {
-                               txm = info[i];
-                               if (!txm)
-                                       break;
-
-                               if (txq->flags & IONIC_QCQ_F_FAST_FREE)
-                                       rte_mempool_put(txm->pool, txm);
-                               else
-                                       rte_pktmbuf_free_seg(txm);
-
-                               info[i] = NULL;
-                       }
-
-                       q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
-               }
-
-               cq_desc = &cq_desc_base[cq->tail_idx];
-       }
-}
-
 void __rte_cold
 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
@@ -394,7 +318,7 @@ ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct 
ionic_txq_sg_elem **elem)
        return desc;
 }
 
-static int
+int
 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
 {
        struct ionic_queue *q = &txq->qcq.q;
@@ -405,7 +329,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
        rte_iova_t data_iova;
        uint64_t desc_addr = 0, next_addr;
        uint16_t desc_len = 0;
-       uint8_t desc_nsge;
+       uint8_t desc_nsge = 0;
        uint32_t hdrlen;
        uint32_t mss = txm->tso_segsz;
        uint32_t frag_left = 0;
@@ -416,6 +340,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
        bool start, done;
        bool encap;
        bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN);
+       bool use_sgl = !!(txq->flags & IONIC_QCQ_F_SG);
        uint16_t vlan_tci = txm->vlan_tci;
        uint64_t ol_flags = txm->ol_flags;
 
@@ -438,48 +363,22 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf 
*txm)
                hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
        }
 
-       seglen = hdrlen + mss;
-       left = txm->data_len;
-       data_iova = rte_mbuf_data_iova(txm);
-
        desc = ionic_tx_tso_next(txq, &elem);
+       txm_seg = txm;
        start = true;
+       seglen = hdrlen + mss;
 
-       /* Chop data up into desc segments */
-
-       while (left > 0) {
-               len = RTE_MIN(seglen, left);
-               frag_left = seglen - len;
-               desc_addr = rte_cpu_to_le_64(data_iova + offset);
-               desc_len = len;
-               desc_nsge = 0;
-               left -= len;
-               offset += len;
-               if (txm->nb_segs > 1 && frag_left > 0)
-                       continue;
-               done = (txm->nb_segs == 1 && left == 0);
-               ionic_tx_tso_post(q, desc, txm,
-                       desc_addr, desc_nsge, desc_len,
-                       hdrlen, mss,
-                       encap,
-                       vlan_tci, has_vlan,
-                       start, done);
-               desc = ionic_tx_tso_next(txq, &elem);
-               start = false;
-               seglen = mss;
-       }
-
-       /* Chop frags into desc segments */
-
-       txm_seg = txm->next;
+       /* Walk the chain of mbufs */
        while (txm_seg != NULL) {
                offset = 0;
                data_iova = rte_mbuf_data_iova(txm_seg);
                left = txm_seg->data_len;
 
+               /* Split the mbuf data up into multiple descriptors */
                while (left > 0) {
                        next_addr = rte_cpu_to_le_64(data_iova + offset);
-                       if (frag_left > 0) {
+                       if (frag_left > 0 && use_sgl) {
+                               /* Fill previous descriptor's SGE */
                                len = RTE_MIN(frag_left, left);
                                frag_left -= len;
                                elem->addr = next_addr;
@@ -487,16 +386,19 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf 
*txm)
                                elem++;
                                desc_nsge++;
                        } else {
-                               len = RTE_MIN(mss, left);
-                               frag_left = mss - len;
+                               /* Fill new descriptor's data field */
+                               len = RTE_MIN(seglen, left);
+                               frag_left = seglen - len;
                                desc_addr = next_addr;
                                desc_len = len;
                                desc_nsge = 0;
                        }
                        left -= len;
                        offset += len;
-                       if (txm_seg->next != NULL && frag_left > 0)
-                               continue;
+
+                       /* Pack the next mbuf's data into the descriptor */
+                       if (txm_seg->next != NULL && frag_left > 0 && use_sgl)
+                               break;
 
                        done = (txm_seg->next == NULL && left == 0);
                        ionic_tx_tso_post(q, desc, txm_seg,
@@ -507,6 +409,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
                                start, done);
                        desc = ionic_tx_tso_next(txq, &elem);
                        start = false;
+                       seglen = mss;
                }
 
                txm_seg = txm_seg->next;
@@ -517,157 +420,6 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf 
*txm)
        return 0;
 }
 
-static __rte_always_inline int
-ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
-{
-       struct ionic_queue *q = &txq->qcq.q;
-       struct ionic_txq_desc *desc, *desc_base = q->base;
-       struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
-       struct ionic_txq_sg_elem *elem;
-       struct ionic_tx_stats *stats = &txq->stats;
-       struct rte_mbuf *txm_seg;
-       void **info;
-       rte_iova_t data_iova;
-       uint64_t ol_flags = txm->ol_flags;
-       uint64_t addr, cmd;
-       uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
-       uint8_t flags = 0;
-
-       desc = &desc_base[q->head_idx];
-       info = IONIC_INFO_PTR(q, q->head_idx);
-
-       if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
-           (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
-               opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
-               flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
-       }
-
-       if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
-            (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
-           ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
-            (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
-               opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
-               flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
-       }
-
-       if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
-               stats->no_csum++;
-
-       if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
-            (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
-           ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
-            (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {
-               flags |= IONIC_TXQ_DESC_FLAG_ENCAP;
-       }
-
-       if (ol_flags & RTE_MBUF_F_TX_VLAN) {
-               flags |= IONIC_TXQ_DESC_FLAG_VLAN;
-               desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);
-       }
-
-       addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
-
-       cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
-       desc->cmd = rte_cpu_to_le_64(cmd);
-       desc->len = rte_cpu_to_le_16(txm->data_len);
-
-       info[0] = txm;
-
-       if (txm->nb_segs > 1) {
-               txm_seg = txm->next;
-
-               elem = sg_desc_base[q->head_idx].elems;
-
-               while (txm_seg != NULL) {
-                       /* Stash the mbuf ptr in the array */
-                       info++;
-                       *info = txm_seg;
-
-                       /* Configure the SGE */
-                       data_iova = rte_mbuf_data_iova(txm_seg);
-                       elem->len = rte_cpu_to_le_16(txm_seg->data_len);
-                       elem->addr = rte_cpu_to_le_64(data_iova);
-                       elem++;
-
-                       txm_seg = txm_seg->next;
-               }
-       }
-
-       q->head_idx = Q_NEXT_TO_POST(q, 1);
-
-       return 0;
-}
-
-uint16_t
-ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-               uint16_t nb_pkts)
-{
-       struct ionic_tx_qcq *txq = tx_queue;
-       struct ionic_queue *q = &txq->qcq.q;
-       struct ionic_tx_stats *stats = &txq->stats;
-       struct rte_mbuf *mbuf;
-       uint32_t bytes_tx = 0;
-       uint16_t nb_avail, nb_tx = 0;
-       int err;
-
-       struct ionic_txq_desc *desc_base = q->base;
-       if (!(txq->flags & IONIC_QCQ_F_CMB))
-               rte_prefetch0(&desc_base[q->head_idx]);
-       rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx));
-
-       if (tx_pkts) {
-               rte_mbuf_prefetch_part1(tx_pkts[0]);
-               rte_mbuf_prefetch_part2(tx_pkts[0]);
-       }
-
-       if (unlikely(ionic_q_space_avail(q) < txq->free_thresh)) {
-               /* Cleaning old buffers */
-               ionic_tx_flush(txq);
-       }
-
-       nb_avail = ionic_q_space_avail(q);
-       if (unlikely(nb_avail < nb_pkts)) {
-               stats->stop += nb_pkts - nb_avail;
-               nb_pkts = nb_avail;
-       }
-
-       while (nb_tx < nb_pkts) {
-               uint16_t next_idx = Q_NEXT_TO_POST(q, 1);
-               if (!(txq->flags & IONIC_QCQ_F_CMB))
-                       rte_prefetch0(&desc_base[next_idx]);
-               rte_prefetch0(IONIC_INFO_PTR(q, next_idx));
-
-               if (nb_tx + 1 < nb_pkts) {
-                       rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);
-                       rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);
-               }
-
-               mbuf = tx_pkts[nb_tx];
-
-               if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
-                       err = ionic_tx_tso(txq, mbuf);
-               else
-                       err = ionic_tx(txq, mbuf);
-               if (err) {
-                       stats->drop += nb_pkts - nb_tx;
-                       break;
-               }
-
-               bytes_tx += mbuf->pkt_len;
-               nb_tx++;
-       }
-
-       if (nb_tx > 0) {
-               rte_wmb();
-               ionic_q_flush(q);
-
-               stats->packets += nb_tx;
-               stats->bytes += bytes_tx;
-       }
-
-       return nb_tx;
-}
-
 /*********************************************************************
  *
  *  TX prep functions
@@ -820,7 +572,7 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 }
 
 #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1)
-static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]
+const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]
                __rte_cache_aligned = {
        /* IP_BAD set */
        [IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD,
@@ -850,7 +602,7 @@ static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]
 };
 
 /* RTE_PTYPE_UNKNOWN is 0x0 */
-static const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK]
+const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK]
                __rte_cache_aligned = {
        [IONIC_PKT_TYPE_NON_IP]   = RTE_PTYPE_UNKNOWN,
        [IONIC_PKT_TYPE_IPV4]     = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
@@ -884,203 +636,6 @@ ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev 
__rte_unused)
        return ptypes;
 }
 
-/*
- * Cleans one descriptor. Connects the filled mbufs into a chain.
- * Does not advance the tail index.
- */
-static __rte_always_inline void
-ionic_rx_clean_one(struct ionic_rx_qcq *rxq,
-               struct ionic_rxq_comp *cq_desc,
-               struct ionic_rx_service *rx_svc)
-{
-       struct ionic_queue *q = &rxq->qcq.q;
-       struct rte_mbuf *rxm, *rxm_seg, *prev_rxm;
-       struct ionic_rx_stats *stats = &rxq->stats;
-       uint64_t pkt_flags = 0;
-       uint32_t pkt_type;
-       uint32_t left, i;
-       uint16_t cq_desc_len;
-       uint8_t ptype, cflags;
-       void **info;
-
-       cq_desc_len = rte_le_to_cpu_16(cq_desc->len);
-
-       info = IONIC_INFO_PTR(q, q->tail_idx);
-
-       rxm = info[0];
-
-       if (cq_desc->status) {
-               stats->bad_cq_status++;
-               return;
-       }
-
-       if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {
-               stats->bad_len++;
-               return;
-       }
-
-       info[0] = NULL;
-
-       /* Set the mbuf metadata based on the cq entry */
-       rxm->rearm_data[0] = rxq->rearm_data;
-       rxm->pkt_len = cq_desc_len;
-       rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len);
-       left = cq_desc_len - rxm->data_len;
-       rxm->nb_segs = cq_desc->num_sg_elems + 1;
-       prev_rxm = rxm;
-
-       for (i = 1; i < rxm->nb_segs && left; i++) {
-               rxm_seg = info[i];
-               info[i] = NULL;
-
-               /* Set the chained mbuf metadata */
-               rxm_seg->rearm_data[0] = rxq->rearm_seg_data;
-               rxm_seg->data_len = RTE_MIN(rxq->seg_size, left);
-               left -= rxm_seg->data_len;
-
-               /* Link the mbuf */
-               prev_rxm->next = rxm_seg;
-               prev_rxm = rxm_seg;
-       }
-
-       /* Terminate the mbuf chain */
-       prev_rxm->next = NULL;
-
-       /* RSS */
-       pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
-       rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);
-
-       /* Vlan Strip */
-       if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
-               pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
-               rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);
-       }
-
-       /* Checksum */
-       if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
-               cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK;
-               pkt_flags |= ionic_csum_flags[cflags];
-       }
-
-       rxm->ol_flags = pkt_flags;
-
-       /* Packet Type */
-       ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK;
-       pkt_type = ionic_ptype_table[ptype];
-       if (pkt_type == RTE_PTYPE_UNKNOWN) {
-               struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
-                               struct rte_ether_hdr *);
-               uint16_t ether_type = eth_h->ether_type;
-               if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
-                       pkt_type = RTE_PTYPE_L2_ETHER_ARP;
-               else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP))
-                       pkt_type = RTE_PTYPE_L2_ETHER_LLDP;
-               else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588))
-                       pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC;
-               stats->mtods++;
-       }
-
-       rxm->packet_type = pkt_type;
-
-       rx_svc->rx_pkts[rx_svc->nb_rx] = rxm;
-       rx_svc->nb_rx++;
-
-       stats->packets++;
-       stats->bytes += rxm->pkt_len;
-}
-
-/*
- * Fills one descriptor with mbufs. Does not advance the head index.
- */
-static __rte_always_inline int
-ionic_rx_fill_one(struct ionic_rx_qcq *rxq)
-{
-       struct ionic_queue *q = &rxq->qcq.q;
-       struct rte_mbuf *rxm, *rxm_seg;
-       struct ionic_rxq_desc *desc, *desc_base = q->base;
-       struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
-       rte_iova_t data_iova;
-       uint32_t i;
-       void **info;
-       int ret;
-
-       info = IONIC_INFO_PTR(q, q->head_idx);
-       desc = &desc_base[q->head_idx];
-       sg_desc = &sg_desc_base[q->head_idx];
-
-       /* mbuf is unused => whole chain is unused */
-       if (unlikely(info[0]))
-               return 0;
-
-       if (rxq->mb_idx == 0) {
-               ret = rte_mempool_get_bulk(rxq->mb_pool,
-                                       (void **)rxq->mbs,
-                                       IONIC_MBUF_BULK_ALLOC);
-               if (ret) {
-                       assert(0);
-                       return -ENOMEM;
-               }
-
-               rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
-       }
-
-       rxm = rxq->mbs[--rxq->mb_idx];
-       info[0] = rxm;
-
-       data_iova = rte_mbuf_data_iova_default(rxm);
-       desc->addr = rte_cpu_to_le_64(data_iova);
-
-       for (i = 1; i < q->num_segs; i++) {
-               /* mbuf is unused => rest of the chain is unused */
-               if (info[i])
-                       return 0;
-
-               if (rxq->mb_idx == 0) {
-                       ret = rte_mempool_get_bulk(rxq->mb_pool,
-                                       (void **)rxq->mbs,
-                                       IONIC_MBUF_BULK_ALLOC);
-                       if (ret) {
-                               assert(0);
-                               return -ENOMEM;
-                       }
-
-                       rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
-               }
-
-               rxm_seg = rxq->mbs[--rxq->mb_idx];
-               info[i] = rxm_seg;
-
-               /* The data_off does not get set to 0 until later */
-               data_iova = rxm_seg->buf_iova;
-               sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova);
-       }
-
-       return 0;
-}
-
-/*
- * Fills all descriptors with mbufs.
- */
-static int __rte_cold
-ionic_rx_fill(struct ionic_rx_qcq *rxq)
-{
-       struct ionic_queue *q = &rxq->qcq.q;
-       uint32_t i;
-       int err;
-
-       for (i = 1; i < q->num_descs; i++) {
-               err = ionic_rx_fill_one(rxq);
-               if (err)
-                       return err;
-
-               q->head_idx = Q_NEXT_TO_POST(q, 1);
-       }
-
-       ionic_q_flush(q);
-
-       return 0;
-}
-
 /*
  * Perform one-time initialization of descriptor fields
  * which will not change for the life of the queue.
@@ -1148,10 +703,13 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, 
uint16_t rx_queue_id)
        if (err)
                return err;
 
-       /* Allocate buffers for descriptor rings */
-       if (ionic_rx_fill(rxq) != 0) {
-               IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
-                       rx_queue_id);
+       /* Allocate buffers for descriptor ring */
+       if (rxq->flags & IONIC_QCQ_F_SG)
+               err = ionic_rx_fill_sg(rxq);
+       else
+               err = ionic_rx_fill(rxq);
+       if (err != 0) {
+               IONIC_PRINT(ERR, "Could not fill queue %d", rx_queue_id);
                return -1;
        }
 
@@ -1160,55 +718,6 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, 
uint16_t rx_queue_id)
        return 0;
 }
 
-/*
- * Walk the CQ to find completed receive descriptors.
- * Any completed descriptor found is refilled.
- */
-static __rte_always_inline void
-ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
-               struct ionic_rx_service *rx_svc)
-{
-       struct ionic_cq *cq = &rxq->qcq.cq;
-       struct ionic_queue *q = &rxq->qcq.q;
-       struct ionic_rxq_desc *q_desc_base = q->base;
-       struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
-       uint32_t work_done = 0;
-
-       cq_desc = &cq_desc_base[cq->tail_idx];
-
-       while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
-               cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
-
-               if (cq->tail_idx == 0)
-                       cq->done_color = !cq->done_color;
-
-               /* Prefetch 8 x 8B bufinfo */
-               rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8)));
-               /* Prefetch 4 x 16B comp */
-               rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
-               /* Prefetch 4 x 16B descriptors */
-               if (!(rxq->flags & IONIC_QCQ_F_CMB))
-                       rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);
-
-               ionic_rx_clean_one(rxq, cq_desc, rx_svc);
-
-               q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
-
-               (void)ionic_rx_fill_one(rxq);
-
-               q->head_idx = Q_NEXT_TO_POST(q, 1);
-
-               if (++work_done == work_to_do)
-                       break;
-
-               cq_desc = &cq_desc_base[cq->tail_idx];
-       }
-
-       /* Update the queue indices and ring the doorbell */
-       if (work_done)
-               ionic_q_flush(q);
-}
-
 /*
  * Stop Receive Units for specified queue.
  */
@@ -1237,21 +746,6 @@ ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, 
uint16_t rx_queue_id)
        return 0;
 }
 
-uint16_t
-ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-               uint16_t nb_pkts)
-{
-       struct ionic_rx_qcq *rxq = rx_queue;
-       struct ionic_rx_service rx_svc;
-
-       rx_svc.rx_pkts = rx_pkts;
-       rx_svc.nb_rx = 0;
-
-       ionic_rxq_service(rxq, nb_pkts, &rx_svc);
-
-       return rx_svc.nb_rx;
-}
-
 int
 ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
 {
diff --git a/drivers/net/ionic/ionic_rxtx.h b/drivers/net/ionic/ionic_rxtx.h
index f950d6472c..5939777963 100644
--- a/drivers/net/ionic/ionic_rxtx.h
+++ b/drivers/net/ionic/ionic_rxtx.h
@@ -5,7 +5,19 @@
 #ifndef _IONIC_RXTX_H_
 #define _IONIC_RXTX_H_
 
-#include <rte_mbuf.h>
+#include <stdint.h>
+
+#include "ionic_if.h"
+
+struct ionic_rx_qcq;
+struct ionic_tx_qcq;
+struct rte_eth_dev;
+struct rte_eth_rxconf;
+struct rte_eth_rxq_info;
+struct rte_eth_txconf;
+struct rte_eth_txq_info;
+struct rte_mbuf;
+struct rte_mempool;
 
 struct ionic_rx_service {
        /* cb in */
@@ -14,13 +26,12 @@ struct ionic_rx_service {
        uint16_t nb_rx;
 };
 
-uint16_t ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-       uint16_t nb_pkts);
-uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-       uint16_t nb_pkts);
-uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-       uint16_t nb_pkts);
+#define IONIC_CSUM_FLAG_MASK   (IONIC_RXQ_COMP_CSUM_F_VLAN - 1)
+
+extern const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK];
+extern const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK];
 
+/* ionic_rxtx.c */
 int ionic_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
        uint16_t nb_desc, uint32_t socket_id,
        const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp);
@@ -45,4 +56,25 @@ int ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t 
offset);
 
 const uint32_t *ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 
+int ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm);
+
+uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+       uint16_t nb_pkts);
+
+/* ionic_rxtx_simple.c */
+uint16_t ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+       uint16_t nb_pkts);
+uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+       uint16_t nb_pkts);
+
+int ionic_rx_fill(struct ionic_rx_qcq *rxq);
+
+/* ionic_rxtx_sg.c */
+uint16_t ionic_recv_pkts_sg(void *rx_queue, struct rte_mbuf **rx_pkts,
+       uint16_t nb_pkts);
+uint16_t ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts,
+       uint16_t nb_pkts);
+
+int ionic_rx_fill_sg(struct ionic_rx_qcq *rxq);
+
 #endif /* _IONIC_RXTX_H_ */
diff --git a/drivers/net/ionic/ionic_rxtx_sg.c 
b/drivers/net/ionic/ionic_rxtx_sg.c
new file mode 100644
index 0000000000..bdca3fa4b4
--- /dev/null
+++ b/drivers/net/ionic/ionic_rxtx_sg.c
@@ -0,0 +1,496 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2022 Advanced Micro Devices, Inc. All Rights Reserved.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <assert.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_prefetch.h>
+
+#include "ionic.h"
+#include "ionic_if.h"
+#include "ionic_dev.h"
+#include "ionic_lif.h"
+#include "ionic_rxtx.h"
+
+static __rte_always_inline void
+ionic_tx_flush_sg(struct ionic_tx_qcq *txq)
+{
+       struct ionic_cq *cq = &txq->qcq.cq;
+       struct ionic_queue *q = &txq->qcq.q;
+       struct rte_mbuf *txm;
+       struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;
+       void **info;
+       uint32_t i;
+
+       cq_desc = &cq_desc_base[cq->tail_idx];
+
+       while (color_match(cq_desc->color, cq->done_color)) {
+               cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
+               if (cq->tail_idx == 0)
+                       cq->done_color = !cq->done_color;
+
+               /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */
+               if ((cq->tail_idx & 0x3) == 0)
+                       rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
+
+               while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {
+                       /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */
+                       rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2)));
+
+                       /* Prefetch next mbuf */
+                       void **next_info =
+                               IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1));
+                       if (next_info[0])
+                               rte_mbuf_prefetch_part2(next_info[0]);
+                       if (next_info[1])
+                               rte_mbuf_prefetch_part2(next_info[1]);
+
+                       info = IONIC_INFO_PTR(q, q->tail_idx);
+                       for (i = 0; i < q->num_segs; i++) {
+                               txm = info[i];
+                               if (!txm)
+                                       break;
+
+                               if (txq->flags & IONIC_QCQ_F_FAST_FREE)
+                                       rte_mempool_put(txm->pool, txm);
+                               else
+                                       rte_pktmbuf_free_seg(txm);
+
+                               info[i] = NULL;
+                       }
+
+                       q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
+               }
+
+               cq_desc = &cq_desc_base[cq->tail_idx];
+       }
+}
+
+static __rte_always_inline int
+ionic_tx_sg(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
+{
+       struct ionic_queue *q = &txq->qcq.q;
+       struct ionic_txq_desc *desc, *desc_base = q->base;
+       struct ionic_txq_sg_desc_v1 *sg_desc, *sg_desc_base = q->sg_base;
+       struct ionic_txq_sg_elem *elem;
+       struct ionic_tx_stats *stats = &txq->stats;
+       struct rte_mbuf *txm_seg;
+       rte_iova_t data_iova;
+       void **info;
+       uint64_t ol_flags = txm->ol_flags;
+       uint64_t addr, cmd;
+       uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
+       uint8_t flags = 0;
+
+       desc = &desc_base[q->head_idx];
+       sg_desc = &sg_desc_base[q->head_idx];
+       info = IONIC_INFO_PTR(q, q->head_idx);
+
+       if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
+           (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
+               opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
+               flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
+       }
+
+       if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
+            (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
+           ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
+            (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
+               opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
+               flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
+       }
+
+       if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
+               stats->no_csum++;
+
+       if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
+            (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
+           ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
+            (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {
+               flags |= IONIC_TXQ_DESC_FLAG_ENCAP;
+       }
+
+       if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+               flags |= IONIC_TXQ_DESC_FLAG_VLAN;
+               desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);
+       }
+
+       addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
+
+       cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
+       desc->cmd = rte_cpu_to_le_64(cmd);
+       desc->len = rte_cpu_to_le_16(txm->data_len);
+
+       info[0] = txm;
+
+       if (txm->nb_segs > 1) {
+               txm_seg = txm->next;
+
+               elem = sg_desc->elems;
+
+               while (txm_seg != NULL) {
+                       /* Stash the mbuf ptr in the array */
+                       info++;
+                       *info = txm_seg;
+
+                       /* Configure the SGE */
+                       data_iova = rte_mbuf_data_iova(txm_seg);
+                       elem->len = rte_cpu_to_le_16(txm_seg->data_len);
+                       elem->addr = rte_cpu_to_le_64(data_iova);
+                       elem++;
+
+                       txm_seg = txm_seg->next;
+               }
+       }
+
+       q->head_idx = Q_NEXT_TO_POST(q, 1);
+
+       return 0;
+}
+
+uint16_t
+ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts)
+{
+       struct ionic_tx_qcq *txq = tx_queue;
+       struct ionic_queue *q = &txq->qcq.q;
+       struct ionic_tx_stats *stats = &txq->stats;
+       struct rte_mbuf *mbuf;
+       uint32_t bytes_tx = 0;
+       uint16_t nb_avail, nb_tx = 0;
+       int err;
+
+       struct ionic_txq_desc *desc_base = q->base;
+       if (!(txq->flags & IONIC_QCQ_F_CMB))
+               rte_prefetch0(&desc_base[q->head_idx]);
+       rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx));
+
+       if (tx_pkts) {
+               rte_mbuf_prefetch_part1(tx_pkts[0]);
+               rte_mbuf_prefetch_part2(tx_pkts[0]);
+       }
+
+       if (ionic_q_space_avail(q) < txq->free_thresh) {
+               /* Cleaning old buffers */
+               ionic_tx_flush_sg(txq);
+       }
+
+       nb_avail = ionic_q_space_avail(q);
+       if (nb_avail < nb_pkts) {
+               stats->stop += nb_pkts - nb_avail;
+               nb_pkts = nb_avail;
+       }
+
+       while (nb_tx < nb_pkts) {
+               uint16_t next_idx = Q_NEXT_TO_POST(q, 1);
+               if (!(txq->flags & IONIC_QCQ_F_CMB))
+                       rte_prefetch0(&desc_base[next_idx]);
+               rte_prefetch0(IONIC_INFO_PTR(q, next_idx));
+
+               if (nb_tx + 1 < nb_pkts) {
+                       rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);
+                       rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);
+               }
+
+               mbuf = tx_pkts[nb_tx];
+
+               if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+                       err = ionic_tx_tso(txq, mbuf);
+               else
+                       err = ionic_tx_sg(txq, mbuf);
+               if (err) {
+                       stats->drop += nb_pkts - nb_tx;
+                       break;
+               }
+
+               bytes_tx += mbuf->pkt_len;
+               nb_tx++;
+       }
+
+       if (nb_tx > 0) {
+               rte_wmb();
+               ionic_q_flush(q);
+
+               stats->packets += nb_tx;
+               stats->bytes += bytes_tx;
+       }
+
+       return nb_tx;
+}
+
+/*
+ * Cleans one descriptor. Connects the filled mbufs into a chain.
+ * Does not advance the tail index.
+ */
+static __rte_always_inline void
+ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq,
+               struct ionic_rxq_comp *cq_desc,
+               struct ionic_rx_service *rx_svc)
+{
+       struct ionic_queue *q = &rxq->qcq.q;
+       struct rte_mbuf *rxm;
+       struct rte_mbuf *rxm_seg, *prev_rxm;
+       struct ionic_rx_stats *stats = &rxq->stats;
+       uint64_t pkt_flags = 0;
+       uint32_t pkt_type;
+       uint32_t left, i;
+       uint16_t cq_desc_len;
+       uint8_t ptype, cflags;
+       void **info;
+
+       cq_desc_len = rte_le_to_cpu_16(cq_desc->len);
+
+       info = IONIC_INFO_PTR(q, q->tail_idx);
+
+       rxm = info[0];
+
+       if (cq_desc->status) {
+               stats->bad_cq_status++;
+               return;
+       }
+
+       if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {
+               stats->bad_len++;
+               return;
+       }
+
+       info[0] = NULL;
+
+       /* Set the mbuf metadata based on the cq entry */
+       rxm->rearm_data[0] = rxq->rearm_data;
+       rxm->pkt_len = cq_desc_len;
+       rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len);
+       left = cq_desc_len - rxm->data_len;
+       rxm->nb_segs = cq_desc->num_sg_elems + 1;
+
+       prev_rxm = rxm;
+
+       for (i = 1; i < rxm->nb_segs && left; i++) {
+               rxm_seg = info[i];
+               info[i] = NULL;
+
+               /* Set the chained mbuf metadata */
+               rxm_seg->rearm_data[0] = rxq->rearm_seg_data;
+               rxm_seg->data_len = RTE_MIN(rxq->seg_size, left);
+               left -= rxm_seg->data_len;
+
+               /* Link the mbuf */
+               prev_rxm->next = rxm_seg;
+               prev_rxm = rxm_seg;
+       }
+
+       /* Terminate the mbuf chain */
+       prev_rxm->next = NULL;
+
+       /* RSS */
+       pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
+       rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);
+
+       /* Vlan Strip */
+       if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
+               pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+               rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);
+       }
+
+       /* Checksum */
+       if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
+               cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK;
+               pkt_flags |= ionic_csum_flags[cflags];
+       }
+
+       rxm->ol_flags = pkt_flags;
+
+       /* Packet Type */
+       ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK;
+       pkt_type = ionic_ptype_table[ptype];
+       if (pkt_type == RTE_PTYPE_UNKNOWN) {
+               struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
+                               struct rte_ether_hdr *);
+               uint16_t ether_type = eth_h->ether_type;
+               if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
+                       pkt_type = RTE_PTYPE_L2_ETHER_ARP;
+               else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP))
+                       pkt_type = RTE_PTYPE_L2_ETHER_LLDP;
+               else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588))
+                       pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC;
+               stats->mtods++;
+       } else if (pkt_flags & RTE_MBUF_F_RX_VLAN) {
+               pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
+       } else {
+               pkt_type |= RTE_PTYPE_L2_ETHER;
+       }
+
+       rxm->packet_type = pkt_type;
+
+       rx_svc->rx_pkts[rx_svc->nb_rx] = rxm;
+       rx_svc->nb_rx++;
+
+       stats->packets++;
+       stats->bytes += rxm->pkt_len;
+}
+
+/*
+ * Fills one descriptor with mbufs. Does not advance the head index.
+ */
+static __rte_always_inline int
+ionic_rx_fill_one_sg(struct ionic_rx_qcq *rxq)
+{
+       struct ionic_queue *q = &rxq->qcq.q;
+       struct rte_mbuf *rxm;
+       struct rte_mbuf *rxm_seg;
+       struct ionic_rxq_desc *desc, *desc_base = q->base;
+       struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
+       rte_iova_t data_iova;
+       uint32_t i;
+       void **info;
+       int ret;
+
+       info = IONIC_INFO_PTR(q, q->head_idx);
+       desc = &desc_base[q->head_idx];
+       sg_desc = &sg_desc_base[q->head_idx];
+
+       /* mbuf is unused => whole chain is unused */
+       if (info[0])
+               return 0;
+
+       if (rxq->mb_idx == 0) {
+               ret = rte_mempool_get_bulk(rxq->mb_pool,
+                                       (void **)rxq->mbs,
+                                       IONIC_MBUF_BULK_ALLOC);
+               if (ret) {
+                       assert(0);
+                       return -ENOMEM;
+               }
+
+               rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
+       }
+
+       rxm = rxq->mbs[--rxq->mb_idx];
+       info[0] = rxm;
+
+       data_iova = rte_mbuf_data_iova_default(rxm);
+       desc->addr = rte_cpu_to_le_64(data_iova);
+
+       for (i = 1; i < q->num_segs; i++) {
+               /* mbuf is unused => rest of the chain is unused */
+               if (info[i])
+                       return 0;
+
+               if (rxq->mb_idx == 0) {
+                       ret = rte_mempool_get_bulk(rxq->mb_pool,
+                                       (void **)rxq->mbs,
+                                       IONIC_MBUF_BULK_ALLOC);
+                       if (ret) {
+                               assert(0);
+                               return -ENOMEM;
+                       }
+
+                       rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
+               }
+
+               rxm_seg = rxq->mbs[--rxq->mb_idx];
+               info[i] = rxm_seg;
+
+               /* The data_off does not get set to 0 until later */
+               data_iova = rxm_seg->buf_iova;
+               sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova);
+       }
+
+       return 0;
+}
+
+/*
+ * Walk the CQ to find completed receive descriptors.
+ * Any completed descriptor found is refilled.
+ */
+static __rte_always_inline void
+ionic_rxq_service_sg(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
+               struct ionic_rx_service *rx_svc)
+{
+       struct ionic_cq *cq = &rxq->qcq.cq;
+       struct ionic_queue *q = &rxq->qcq.q;
+       struct ionic_rxq_desc *q_desc_base = q->base;
+       struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
+       uint32_t work_done = 0;
+
+       cq_desc = &cq_desc_base[cq->tail_idx];
+
+       while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
+               cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
+               if (cq->tail_idx == 0)
+                       cq->done_color = !cq->done_color;
+
+               /* Prefetch 8 x 8B bufinfo */
+               rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8)));
+               /* Prefetch 4 x 16B comp */
+               rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
+               /* Prefetch 4 x 16B descriptors */
+               if (!(rxq->flags & IONIC_QCQ_F_CMB))
+                       rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);
+
+               /* Clean one descriptor */
+               ionic_rx_clean_one_sg(rxq, cq_desc, rx_svc);
+               q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
+
+               /* Fill one descriptor */
+               (void)ionic_rx_fill_one_sg(rxq);
+
+               q->head_idx = Q_NEXT_TO_POST(q, 1);
+
+               if (++work_done == work_to_do)
+                       break;
+
+               cq_desc = &cq_desc_base[cq->tail_idx];
+       }
+
+       /* Update the queue indices and ring the doorbell */
+       if (work_done)
+               ionic_q_flush(q);
+}
+
+uint16_t
+ionic_recv_pkts_sg(void *rx_queue, struct rte_mbuf **rx_pkts,
+               uint16_t nb_pkts)
+{
+       struct ionic_rx_qcq *rxq = rx_queue;
+       struct ionic_rx_service rx_svc;
+
+       rx_svc.rx_pkts = rx_pkts;
+       rx_svc.nb_rx = 0;
+
+       ionic_rxq_service_sg(rxq, nb_pkts, &rx_svc);
+
+       return rx_svc.nb_rx;
+}
+
+/*
+ * Fills all descriptors with mbufs.
+ */
+int __rte_cold
+ionic_rx_fill_sg(struct ionic_rx_qcq *rxq)
+{
+       struct ionic_queue *q = &rxq->qcq.q;
+       uint32_t i;
+       int err = 0;
+
+       for (i = 0; i < q->num_descs - 1u; i++) {
+               err = ionic_rx_fill_one_sg(rxq);
+               if (err)
+                       break;
+
+               q->head_idx = Q_NEXT_TO_POST(q, 1);
+       }
+
+       ionic_q_flush(q);
+
+       return err;
+}
diff --git a/drivers/net/ionic/ionic_rxtx_simple.c 
b/drivers/net/ionic/ionic_rxtx_simple.c
new file mode 100644
index 0000000000..fe10e2624e
--- /dev/null
+++ b/drivers/net/ionic/ionic_rxtx_simple.c
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2022 Advanced Micro Devices, Inc. All Rights Reserved.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <assert.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_prefetch.h>
+
+#include "ionic.h"
+#include "ionic_if.h"
+#include "ionic_dev.h"
+#include "ionic_lif.h"
+#include "ionic_rxtx.h"
+
+static __rte_always_inline void
+ionic_tx_flush(struct ionic_tx_qcq *txq)
+{
+       struct ionic_cq *cq = &txq->qcq.cq;
+       struct ionic_queue *q = &txq->qcq.q;
+       struct rte_mbuf *txm;
+       struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;
+       void **info;
+
+       cq_desc = &cq_desc_base[cq->tail_idx];
+
+       while (color_match(cq_desc->color, cq->done_color)) {
+               cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
+               if (cq->tail_idx == 0)
+                       cq->done_color = !cq->done_color;
+
+               /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */
+               if ((cq->tail_idx & 0x3) == 0)
+                       rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
+
+               while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {
+                       /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */
+                       rte_prefetch0(&q->info[Q_NEXT_TO_SRVC(q, 2)]);
+
+                       /* Prefetch next mbuf */
+                       void **next_info =
+                               &q->info[Q_NEXT_TO_SRVC(q, 1)];
+                       if (next_info[0])
+                               rte_mbuf_prefetch_part2(next_info[0]);
+
+                       info = &q->info[q->tail_idx];
+                       {
+                               txm = info[0];
+
+                               if (txq->flags & IONIC_QCQ_F_FAST_FREE)
+                                       rte_mempool_put(txm->pool, txm);
+                               else
+                                       rte_pktmbuf_free_seg(txm);
+
+                               info[0] = NULL;
+                       }
+
+                       q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
+               }
+
+               cq_desc = &cq_desc_base[cq->tail_idx];
+       }
+}
+
+static __rte_always_inline int
+ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
+{
+       struct ionic_queue *q = &txq->qcq.q;
+       struct ionic_txq_desc *desc, *desc_base = q->base;
+       struct ionic_tx_stats *stats = &txq->stats;
+       void **info;
+       uint64_t ol_flags = txm->ol_flags;
+       uint64_t addr, cmd;
+       uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
+       uint8_t flags = 0;
+
+       if (txm->nb_segs > 1)
+               return -EINVAL;
+
+       desc = &desc_base[q->head_idx];
+       info = &q->info[q->head_idx];
+
+       if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
+           (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
+               opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
+               flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
+       }
+
+       if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
+            (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
+           ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
+            (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
+               opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
+               flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
+       }
+
+       if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
+               stats->no_csum++;
+
+       if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
+            (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
+           ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
+            (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {
+               flags |= IONIC_TXQ_DESC_FLAG_ENCAP;
+       }
+
+       if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+               flags |= IONIC_TXQ_DESC_FLAG_VLAN;
+               desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);
+       }
+
+       addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
+
+       cmd = encode_txq_desc_cmd(opcode, flags, 0, addr);
+       desc->cmd = rte_cpu_to_le_64(cmd);
+       desc->len = rte_cpu_to_le_16(txm->data_len);
+
+       info[0] = txm;
+
+       q->head_idx = Q_NEXT_TO_POST(q, 1);
+
+       return 0;
+}
+
+uint16_t
+ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts)
+{
+       struct ionic_tx_qcq *txq = tx_queue;
+       struct ionic_queue *q = &txq->qcq.q;
+       struct ionic_tx_stats *stats = &txq->stats;
+       struct rte_mbuf *mbuf;
+       uint32_t bytes_tx = 0;
+       uint16_t nb_avail, nb_tx = 0;
+       int err;
+
+       struct ionic_txq_desc *desc_base = q->base;
+       if (!(txq->flags & IONIC_QCQ_F_CMB))
+               rte_prefetch0(&desc_base[q->head_idx]);
+       rte_prefetch0(&q->info[q->head_idx]);
+
+       if (tx_pkts) {
+               rte_mbuf_prefetch_part1(tx_pkts[0]);
+               rte_mbuf_prefetch_part2(tx_pkts[0]);
+       }
+
+       if (ionic_q_space_avail(q) < txq->free_thresh) {
+               /* Cleaning old buffers */
+               ionic_tx_flush(txq);
+       }
+
+       nb_avail = ionic_q_space_avail(q);
+       if (nb_avail < nb_pkts) {
+               stats->stop += nb_pkts - nb_avail;
+               nb_pkts = nb_avail;
+       }
+
+       while (nb_tx < nb_pkts) {
+               uint16_t next_idx = Q_NEXT_TO_POST(q, 1);
+               if (!(txq->flags & IONIC_QCQ_F_CMB))
+                       rte_prefetch0(&desc_base[next_idx]);
+               rte_prefetch0(&q->info[next_idx]);
+
+               if (nb_tx + 1 < nb_pkts) {
+                       rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);
+                       rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);
+               }
+
+               mbuf = tx_pkts[nb_tx];
+
+               if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+                       err = ionic_tx_tso(txq, mbuf);
+               else
+                       err = ionic_tx(txq, mbuf);
+               if (err) {
+                       stats->drop += nb_pkts - nb_tx;
+                       break;
+               }
+
+               bytes_tx += mbuf->pkt_len;
+               nb_tx++;
+       }
+
+       if (nb_tx > 0) {
+               rte_wmb();
+               ionic_q_flush(q);
+
+               stats->packets += nb_tx;
+               stats->bytes += bytes_tx;
+       }
+
+       return nb_tx;
+}
+
+/*
+ * Cleans one descriptor. Connects the filled mbufs into a chain.
+ * Does not advance the tail index.
+ */
+static __rte_always_inline void
+ionic_rx_clean_one(struct ionic_rx_qcq *rxq,
+               struct ionic_rxq_comp *cq_desc,
+               struct ionic_rx_service *rx_svc)
+{
+       struct ionic_queue *q = &rxq->qcq.q;
+       struct rte_mbuf *rxm;
+       struct ionic_rx_stats *stats = &rxq->stats;
+       uint64_t pkt_flags = 0;
+       uint32_t pkt_type;
+       uint16_t cq_desc_len;
+       uint8_t ptype, cflags;
+       void **info;
+
+       cq_desc_len = rte_le_to_cpu_16(cq_desc->len);
+
+       info = &q->info[q->tail_idx];
+
+       rxm = info[0];
+
+       if (cq_desc->status) {
+               stats->bad_cq_status++;
+               return;
+       }
+
+       if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {
+               stats->bad_len++;
+               return;
+       }
+
+       info[0] = NULL;
+
+       /* Set the mbuf metadata based on the cq entry */
+       rxm->rearm_data[0] = rxq->rearm_data;
+       rxm->pkt_len = cq_desc_len;
+       rxm->data_len = cq_desc_len;
+
+       /* RSS */
+       pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
+       rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);
+
+       /* Vlan Strip */
+       if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
+               pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+               rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);
+       }
+
+       /* Checksum */
+       if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
+               cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK;
+               pkt_flags |= ionic_csum_flags[cflags];
+       }
+
+       rxm->ol_flags = pkt_flags;
+
+       /* Packet Type */
+       ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK;
+       pkt_type = ionic_ptype_table[ptype];
+       if (pkt_type == RTE_PTYPE_UNKNOWN) {
+               struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
+                               struct rte_ether_hdr *);
+               uint16_t ether_type = eth_h->ether_type;
+               if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
+                       pkt_type = RTE_PTYPE_L2_ETHER_ARP;
+               else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP))
+                       pkt_type = RTE_PTYPE_L2_ETHER_LLDP;
+               else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588))
+                       pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC;
+               stats->mtods++;
+       } else if (pkt_flags & RTE_MBUF_F_RX_VLAN) {
+               pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
+       } else {
+               pkt_type |= RTE_PTYPE_L2_ETHER;
+       }
+
+       rxm->packet_type = pkt_type;
+
+       rx_svc->rx_pkts[rx_svc->nb_rx] = rxm;
+       rx_svc->nb_rx++;
+
+       stats->packets++;
+       stats->bytes += rxm->pkt_len;
+}
+
+/*
+ * Fills one descriptor with mbufs. Does not advance the head index.
+ */
+static __rte_always_inline int
+ionic_rx_fill_one(struct ionic_rx_qcq *rxq)
+{
+       struct ionic_queue *q = &rxq->qcq.q;
+       struct rte_mbuf *rxm;
+       struct ionic_rxq_desc *desc, *desc_base = q->base;
+       rte_iova_t data_iova;
+       void **info;
+       int ret;
+
+       info = &q->info[q->head_idx];
+       desc = &desc_base[q->head_idx];
+
+       /* mbuf is unused */
+       if (info[0])
+               return 0;
+
+       if (rxq->mb_idx == 0) {
+               ret = rte_mempool_get_bulk(rxq->mb_pool,
+                                       (void **)rxq->mbs,
+                                       IONIC_MBUF_BULK_ALLOC);
+               if (ret) {
+                       assert(0);
+                       return -ENOMEM;
+               }
+
+               rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
+       }
+
+       rxm = rxq->mbs[--rxq->mb_idx];
+       info[0] = rxm;
+
+       data_iova = rte_mbuf_data_iova_default(rxm);
+       desc->addr = rte_cpu_to_le_64(data_iova);
+
+       return 0;
+}
+
+/*
+ * Walk the CQ to find completed receive descriptors.
+ * Any completed descriptor found is refilled.
+ */
+static __rte_always_inline void
+ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
+               struct ionic_rx_service *rx_svc)
+{
+       struct ionic_cq *cq = &rxq->qcq.cq;
+       struct ionic_queue *q = &rxq->qcq.q;
+       struct ionic_rxq_desc *q_desc_base = q->base;
+       struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
+       uint32_t work_done = 0;
+
+       cq_desc = &cq_desc_base[cq->tail_idx];
+
+       while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
+               cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
+               if (cq->tail_idx == 0)
+                       cq->done_color = !cq->done_color;
+
+               /* Prefetch 8 x 8B bufinfo */
+               rte_prefetch0(&q->info[Q_NEXT_TO_SRVC(q, 8)]);
+               /* Prefetch 4 x 16B comp */
+               rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
+               /* Prefetch 4 x 16B descriptors */
+               if (!(rxq->flags & IONIC_QCQ_F_CMB))
+                       rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);
+
+               /* Clean one descriptor */
+               ionic_rx_clean_one(rxq, cq_desc, rx_svc);
+               q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
+
+               /* Fill one descriptor */
+               (void)ionic_rx_fill_one(rxq);
+
+               q->head_idx = Q_NEXT_TO_POST(q, 1);
+
+               if (++work_done == work_to_do)
+                       break;
+
+               cq_desc = &cq_desc_base[cq->tail_idx];
+       }
+
+       /* Update the queue indices and ring the doorbell */
+       if (work_done)
+               ionic_q_flush(q);
+}
+
+uint16_t
+ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+               uint16_t nb_pkts)
+{
+       struct ionic_rx_qcq *rxq = rx_queue;
+       struct ionic_rx_service rx_svc;
+
+       rx_svc.rx_pkts = rx_pkts;
+       rx_svc.nb_rx = 0;
+
+       ionic_rxq_service(rxq, nb_pkts, &rx_svc);
+
+       return rx_svc.nb_rx;
+}
+
+/*
+ * Fills all descriptors with mbufs.
+ */
+int __rte_cold
+ionic_rx_fill(struct ionic_rx_qcq *rxq)
+{
+       struct ionic_queue *q = &rxq->qcq.q;
+       uint32_t i;
+       int err = 0;
+
+       for (i = 0; i < q->num_descs - 1u; i++) {
+               err = ionic_rx_fill_one(rxq);
+               if (err)
+                       break;
+
+               q->head_idx = Q_NEXT_TO_POST(q, 1);
+       }
+
+       ionic_q_flush(q);
+
+       return err;
+}
diff --git a/drivers/net/ionic/meson.build b/drivers/net/ionic/meson.build
index 2869e0027c..629e6a037d 100644
--- a/drivers/net/ionic/meson.build
+++ b/drivers/net/ionic/meson.build
@@ -16,4 +16,6 @@ sources = files(
         'ionic_main.c',
         'ionic_rx_filter.c',
         'ionic_rxtx.c',
+        'ionic_rxtx_simple.c',
+        'ionic_rxtx_sg.c',
 )
-- 
2.17.1

Reply via email to