Define specific function implementation for ixgbe driver.
Currently, recycle buffer mode can support 128bit
vector path. And can be enabled both in fast free and
no fast free mode.

Suggested-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
Signed-off-by: Feifei Wang <feifei.wa...@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.w...@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c          |   1 +
 drivers/net/ixgbe/ixgbe_ethdev.h          |   3 +
 drivers/net/ixgbe/ixgbe_rxtx.c            |  25 +++++
 drivers/net/ixgbe/ixgbe_rxtx.h            |   4 +
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 127 ++++++++++++++++++++++
 5 files changed, 160 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 88118bc305..3bada9abbd 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -543,6 +543,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
        .rxq_info_get         = ixgbe_rxq_info_get,
        .txq_info_get         = ixgbe_txq_info_get,
+       .rxq_buf_recycle_info_get = ixgbe_rxq_buf_recycle_info_get,
        .timesync_enable      = ixgbe_timesync_enable,
        .timesync_disable     = ixgbe_timesync_disable,
        .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 48290af512..ca6aa0da64 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -625,6 +625,9 @@ void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t 
queue_id,
 void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        struct rte_eth_txq_info *qinfo);
 
+void ixgbe_rxq_buf_recycle_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+               struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
 
 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index c9d6ca9efe..ad276cbf33 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2558,6 +2558,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct 
ixgbe_tx_queue *txq)
                                (rte_eal_process_type() != RTE_PROC_PRIMARY ||
                                        ixgbe_txq_vec_setup(txq) == 0)) {
                        PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
+                       dev->tx_buf_stash = ixgbe_tx_buf_stash_vec;
                        dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
                } else
                dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
@@ -4852,6 +4853,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
                             RTE_IXGBE_DESCS_PER_LOOP,
                             dev->data->port_id);
 
+               dev->rx_descriptors_refill = ixgbe_rx_descriptors_refill_vec;
                dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
        } else if (adapter->rx_bulk_alloc_allowed) {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -5623,6 +5625,29 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t 
queue_id,
        qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
 }
 
+void
+ixgbe_rxq_buf_recycle_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info)
+{
+       struct ixgbe_rx_queue *rxq;
+       struct ixgbe_adapter *adapter = dev->data->dev_private;
+
+       rxq = dev->data->rx_queues[queue_id];
+
+       rxq_buf_recycle_info->buf_ring = (void *)rxq->sw_ring;
+       rxq_buf_recycle_info->mp = rxq->mb_pool;
+       rxq_buf_recycle_info->buf_ring_size = rxq->nb_rx_desc;
+       rxq_buf_recycle_info->receive_tail = &rxq->rx_tail;
+
+       if (adapter->rx_vec_allowed) {
+               rxq_buf_recycle_info->refill_request = 
RTE_IXGBE_RXQ_REARM_THRESH;
+               rxq_buf_recycle_info->refill_head = &rxq->rxrearm_start;
+       } else {
+               rxq_buf_recycle_info->refill_request = rxq->rx_free_thresh;
+               rxq_buf_recycle_info->refill_head = &rxq->rx_free_trigger;
+       }
+}
+
 /*
  * [VF] Initializes Receive Unit.
  */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 668a5b9814..18f890f91a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -295,6 +295,10 @@ int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t 
free_cnt);
 extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
 extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
 
+uint16_t ixgbe_tx_buf_stash_vec(void *tx_queue,
+               struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info);
+uint16_t ixgbe_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb);
+
 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                                    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h 
b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index a4d9ec9b08..e66a4a2d5b 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -139,6 +139,133 @@ tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
                txep[i].mbuf = tx_pkts[i];
 }
 
+uint16_t
+ixgbe_tx_buf_stash_vec(void *tx_queue,
+               struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info)
+{
+       struct ixgbe_tx_queue *txq = tx_queue;
+       struct ixgbe_tx_entry *txep;
+       struct rte_mbuf **rxep;
+       struct rte_mbuf *m[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
+       int i, j, n;
+       uint32_t status;
+       uint16_t avail = 0;
+       uint16_t buf_ring_size = rxq_buf_recycle_info->buf_ring_size;
+       uint16_t mask = rxq_buf_recycle_info->buf_ring_size - 1;
+       uint16_t refill_request = rxq_buf_recycle_info->refill_request;
+       uint16_t refill_head = *rxq_buf_recycle_info->refill_head;
+       uint16_t receive_tail = *rxq_buf_recycle_info->receive_tail;
+
+       /* Get available recycling Rx buffers. */
+       avail = (buf_ring_size - (refill_head - receive_tail)) & mask;
+
+       /* Check Tx free thresh and Rx available space. */
+       if (txq->nb_tx_free > txq->tx_free_thresh || avail <= txq->tx_rs_thresh)
+               return 0;
+
+       /* check DD bits on threshold descriptor */
+       status = txq->tx_ring[txq->tx_next_dd].wb.status;
+       if (!(status & IXGBE_ADVTXD_STAT_DD))
+               return 0;
+
+       n = txq->tx_rs_thresh;
+
+       /* Buffer recycle can only support no ring buffer wraparound.
+        * Two case for this:
+        *
+        * case 1: The refill head of Rx buffer ring needs to be aligned with
+        * buffer ring size. In this case, the number of Tx freeing buffers
+        * should be equal to refill_request.
+        *
+        * case 2: The refill head of Rx ring buffer does not need to be aligned
+        * with buffer ring size. In this case, the update of refill head can 
not
+        * exceed the Rx buffer ring size.
+        */
+       if (refill_request != n ||
+               (!refill_request && (refill_head + n > buf_ring_size)))
+               return 0;
+
+       /* First buffer to free from S/W ring is at index
+        * tx_next_dd - (tx_rs_thresh-1).
+        */
+       txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+       rxep = rxq_buf_recycle_info->buf_ring;
+       rxep += refill_head;
+
+       if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
+               /* Directly put mbufs from Tx to Rx. */
+               for (i = 0; i < n; i++, rxep++, txep++)
+                       *rxep = txep[0].mbuf;
+       } else {
+               for (i = 0, j = 0; i < n; i++) {
+                       /* Avoid txq contains buffers from expected mempoo. */
+                       if (unlikely(rxq_buf_recycle_info->mp
+                                               != txep[i].mbuf->pool))
+                               return 0;
+
+                       m[j] = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+
+                       /* In case 1, each of Tx buffers should be the
+                        * last reference.
+                        */
+                       if (unlikely(m[j] == NULL && refill_request))
+                               return 0;
+                       /* In case 2, the number of valid Tx free
+                        * buffers should be recorded.
+                        */
+                       j++;
+               }
+               rte_memcpy(rxep, m, sizeof(void *) * j);
+       }
+
+       /* Update counters for Tx. */
+       txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+       txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+       if (txq->tx_next_dd >= txq->nb_tx_desc)
+               txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+       return n;
+}
+
+uint16_t
+ixgbe_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb)
+{
+       struct ixgbe_rx_queue *rxq = rx_queue;
+       struct ixgbe_rx_entry *rxep;
+       volatile union ixgbe_adv_rx_desc *rxdp;
+       uint16_t rx_id;
+       uint64_t paddr;
+       uint64_t dma_addr;
+       uint16_t i;
+
+       rxdp = rxq->rx_ring + rxq->rxrearm_start;
+       rxep = &rxq->sw_ring[rxq->rxrearm_start];
+
+       for (i = 0; i < nb; i++) {
+               /* Initialize rxdp descs. */
+               paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM;
+               dma_addr = rte_cpu_to_le_64(paddr);
+               /* flush desc with pa dma_addr */
+               rxdp[i].read.hdr_addr = 0;
+               rxdp[i].read.pkt_addr = dma_addr;
+       }
+
+       /* Update the descriptor initializer index */
+       rxq->rxrearm_start += nb;
+       if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+               rxq->rxrearm_start = 0;
+
+       rxq->rxrearm_nb -= nb;
+
+       rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+                       (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+       /* Update the tail pointer on the NIC */
+       IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+
+       return nb;
+}
+
 static inline void
 _ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
 {
-- 
2.25.1

Reply via email to