Add RX/TX paths with lock for VF. It's used when
the function of link reset on VF is needed.
When the lock for RX/TX is added, the RX/TX can be
stopped. Then we have a chance to reset the VF link.

Please be aware there's performence drop if the lock
path is chosen.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu at intel.com>
---
 drivers/net/e1000/e1000_ethdev.h | 10 ++++++++++
 drivers/net/e1000/igb_ethdev.c   | 14 +++++++++++---
 drivers/net/e1000/igb_rxtx.c     | 26 +++++++++++++++++++++-----
 3 files changed, 42 insertions(+), 8 deletions(-)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index e8bf8da..6a42994 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -319,6 +319,16 @@ uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf 
**rx_pkts,
 uint16_t eth_igb_recv_scattered_pkts(void *rxq,
                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);

+uint16_t eth_igb_xmit_pkts_lock(void *txq,
+                               struct rte_mbuf **tx_pkts,
+                               uint16_t nb_pkts);
+uint16_t eth_igb_recv_pkts_lock(void *rxq,
+                               struct rte_mbuf **rx_pkts,
+                               uint16_t nb_pkts);
+uint16_t eth_igb_recv_scattered_pkts_lock(void *rxq,
+                                         struct rte_mbuf **rx_pkts,
+                                         uint16_t nb_pkts);
+
 int eth_igb_rss_hash_update(struct rte_eth_dev *dev,
                            struct rte_eth_rss_conf *rss_conf);

diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index b0e5e6a..8aad741 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -909,15 +909,17 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
        PMD_INIT_FUNC_TRACE();

        eth_dev->dev_ops = &igbvf_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
-       eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+       eth_dev->rx_pkt_burst = RX_LOCK_FUNCTION(eth_dev, eth_igb_recv_pkts);
+       eth_dev->tx_pkt_burst = TX_LOCK_FUNCTION(eth_dev, eth_igb_xmit_pkts);

        /* for secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
         * RX function */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
                if (eth_dev->data->scattered_rx)
-                       eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+                       eth_dev->rx_pkt_burst =
+                               RX_LOCK_FUNCTION(eth_dev,
+                                                eth_igb_recv_scattered_pkts);
                return 0;
        }

@@ -1999,7 +2001,13 @@ eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
        };

        if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
+#ifndef RTE_NEXT_ABI
            dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
+#else
+           dev->rx_pkt_burst == eth_igb_recv_scattered_pkts ||
+           dev->rx_pkt_burst == eth_igb_recv_pkts_lock ||
+           dev->rx_pkt_burst == eth_igb_recv_scattered_pkts_lock)
+#endif
                return ptypes;
        return NULL;
 }
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 18aeead..7e97330 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -67,6 +67,7 @@
 #include <rte_tcp.h>
 #include <rte_sctp.h>
 #include <rte_string_fns.h>
+#include <rte_spinlock.h>

 #include "e1000_logs.h"
 #include "base/e1000_api.h"
@@ -107,6 +108,7 @@ struct igb_rx_queue {
        struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
        struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
        struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
+       rte_spinlock_t      rx_lock; /**< Lock for packet receiption. */
        uint16_t            nb_rx_desc; /**< number of RX descriptors. */
        uint16_t            rx_tail;    /**< current value of RDT register. */
        uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -174,6 +176,7 @@ struct igb_tx_queue {
        volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
        uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
        struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
+       rte_spinlock_t         tx_lock; /**< Lock for packet transmission. */
        volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
        uint32_t               txd_type;      /**< Device-specific TXD type */
        uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
@@ -615,6 +618,8 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        return nb_tx;
 }

+GENERATE_TX_LOCK(eth_igb_xmit_pkts, igb)
+
 /*********************************************************************
  *
  *  RX functions
@@ -931,6 +936,8 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        return nb_rx;
 }

+GENERATE_RX_LOCK(eth_igb_recv_pkts, igb)
+
 uint16_t
 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
@@ -1186,6 +1193,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
        return nb_rx;
 }

+GENERATE_RX_LOCK(eth_igb_recv_scattered_pkts, igb)
+
 /*
  * Maximum number of Ring Descriptors.
  *
@@ -1344,6 +1353,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
        txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
                queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
        txq->port_id = dev->data->port_id;
+       rte_spinlock_init(&txq->tx_lock);

        txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
        txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
@@ -1361,7 +1371,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
                     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);

        igb_reset_tx_queue(txq, dev);
-       dev->tx_pkt_burst = eth_igb_xmit_pkts;
+       dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, eth_igb_xmit_pkts);
        dev->data->tx_queues[queue_idx] = txq;

        return 0;
@@ -1467,6 +1477,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->port_id = dev->data->port_id;
        rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 
:
                                  ETHER_CRC_LEN);
+       rte_spinlock_init(&rxq->rx_lock);

        /*
         *  Allocate RX ring hardware descriptors. A memzone large enough to
@@ -2323,7 +2334,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)

        /* Configure and enable each RX queue. */
        rctl_bsize = 0;
-       dev->rx_pkt_burst = eth_igb_recv_pkts;
+       dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, eth_igb_recv_pkts);
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                uint64_t bus_addr;
                uint32_t rxdctl;
@@ -2370,7 +2381,9 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                                if (!dev->data->scattered_rx)
                                        PMD_INIT_LOG(DEBUG,
                                                     "forcing scatter mode");
-                               dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+                               dev->rx_pkt_burst =
+                                       RX_LOCK_FUNCTION(dev,
+                                               eth_igb_recv_scattered_pkts);
                                dev->data->scattered_rx = 1;
                        }
                } else {
@@ -2381,7 +2394,9 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                                rctl_bsize = buf_size;
                        if (!dev->data->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-                       dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+                       dev->rx_pkt_burst =
+                               RX_LOCK_FUNCTION(dev,
+                                       eth_igb_recv_scattered_pkts);
                        dev->data->scattered_rx = 1;
                }

@@ -2414,7 +2429,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
        if (dev->data->dev_conf.rxmode.enable_scatter) {
                if (!dev->data->scattered_rx)
                        PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-               dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+               dev->rx_pkt_burst =
+                       RX_LOCK_FUNCTION(dev, eth_igb_recv_scattered_pkts);
                dev->data->scattered_rx = 1;
        }

-- 
2.1.4

Reply via email to