Add RX/TX paths with lock for VF. It's used when
the function of link reset on VF is needed.
When the lock for RX/TX is added, the RX/TX can be
stopped. Then we have a chance to reset the VF link.

Please be aware there's performence drop if the lock
path is chosen.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu at intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c   | 12 +++++--
 drivers/net/ixgbe/ixgbe_ethdev.h   | 20 +++++++++++
 drivers/net/ixgbe/ixgbe_rxtx.c     | 74 ++++++++++++++++++++++++++++++++------
 drivers/net/ixgbe/ixgbe_rxtx.h     | 13 +++++++
 drivers/net/ixgbe/ixgbe_rxtx_vec.c |  6 ++++
 5 files changed, 112 insertions(+), 13 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 05f4f29..fd2682f 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1325,8 +1325,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        PMD_INIT_FUNC_TRACE();

        eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
-       eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+       eth_dev->rx_pkt_burst = RX_LOCK_FUNCTION(eth_dev, ixgbe_recv_pkts);
+       eth_dev->tx_pkt_burst = TX_LOCK_FUNCTION(eth_dev, ixgbe_xmit_pkts);

        /* for secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
@@ -3012,7 +3012,15 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
        if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
            dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
            dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
+#ifndef RTE_NEXT_ABI
            dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
+#else
+           dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lock ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc_lock ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc_lock ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc_lock)
+#endif
                return ptypes;
        return NULL;
 }
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 4ff6338..701107b 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -390,12 +390,32 @@ uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
 uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);

+uint16_t ixgbe_recv_pkts_lock(void *rx_queue,
+                             struct rte_mbuf **rx_pkts,
+                             uint16_t nb_pkts);
+uint16_t ixgbe_recv_pkts_bulk_alloc_lock(void *rx_queue,
+                                        struct rte_mbuf **rx_pkts,
+                                        uint16_t nb_pkts);
+uint16_t ixgbe_recv_pkts_lro_single_alloc_lock(void *rx_queue,
+                                              struct rte_mbuf **rx_pkts,
+                                              uint16_t nb_pkts);
+uint16_t ixgbe_recv_pkts_lro_bulk_alloc_lock(void *rx_queue,
+                                            struct rte_mbuf **rx_pkts,
+                                            uint16_t nb_pkts);
+
 uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts);

 uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts);

+uint16_t ixgbe_xmit_pkts_lock(void *tx_queue,
+                             struct rte_mbuf **tx_pkts,
+                             uint16_t nb_pkts);
+uint16_t ixgbe_xmit_pkts_simple_lock(void *tx_queue,
+                                    struct rte_mbuf **tx_pkts,
+                                    uint16_t nb_pkts);
+
 int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
                              struct rte_eth_rss_conf *rss_conf);

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9c6eaf2..a45d115 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -353,6 +353,8 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf 
**tx_pkts,
        return nb_tx;
 }

+GENERATE_TX_LOCK(ixgbe_xmit_pkts_simple, ixgbe)
+
 static inline void
 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
                volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
@@ -904,6 +906,8 @@ end_of_tx:
        return nb_tx;
 }

+GENERATE_TX_LOCK(ixgbe_xmit_pkts, ixgbe)
+
 /*********************************************************************
  *
  *  RX functions
@@ -1524,6 +1528,8 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
        return nb_rx;
 }

+GENERATE_RX_LOCK(ixgbe_recv_pkts_bulk_alloc, ixgbe)
+
 uint16_t
 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
@@ -1712,6 +1718,8 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        return nb_rx;
 }

+GENERATE_RX_LOCK(ixgbe_recv_pkts, ixgbe)
+
 /**
  * Detect an RSC descriptor.
  */
@@ -2071,6 +2079,8 @@ ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
        return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
 }

+GENERATE_RX_LOCK(ixgbe_recv_pkts_lro_single_alloc, ixgbe)
+
 uint16_t
 ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
                               uint16_t nb_pkts)
@@ -2078,6 +2088,8 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
        return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
 }

+GENERATE_RX_LOCK(ixgbe_recv_pkts_lro_bulk_alloc, ixgbe)
+
 /*********************************************************************
  *
  *  Queue management functions
@@ -2186,10 +2198,12 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct 
ixgbe_tx_queue *txq)
                                (rte_eal_process_type() != RTE_PROC_PRIMARY ||
                                        ixgbe_txq_vec_setup(txq) == 0)) {
                        PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
-                       dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+                       dev->tx_pkt_burst =
+                               TX_LOCK_FUNCTION(dev, ixgbe_xmit_pkts_vec);
                } else
 #endif
-               dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+               dev->tx_pkt_burst =
+                       TX_LOCK_FUNCTION(dev, ixgbe_xmit_pkts_simple);
        } else {
                PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
                PMD_INIT_LOG(DEBUG,
@@ -2200,7 +2214,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct 
ixgbe_tx_queue *txq)
                                " - tx_rs_thresh = %lu " 
"[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
                                (unsigned long)txq->tx_rs_thresh,
                                (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
-               dev->tx_pkt_burst = ixgbe_xmit_pkts;
+               dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, ixgbe_xmit_pkts);
        }
 }

@@ -2347,6 +2361,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->txq_flags = tx_conf->txq_flags;
        txq->ops = &def_txq_ops;
        txq->tx_deferred_start = tx_conf->tx_deferred_start;
+       rte_spinlock_init(&txq->tx_lock);

        /*
         * Modification to set VFTDT for virtual function if vf is detected
@@ -2625,6 +2640,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                                                        0 : ETHER_CRC_LEN);
        rxq->drop_en = rx_conf->rx_drop_en;
        rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+       rte_spinlock_init(&rxq->rx_lock);

        /*
         * The packet type in RX descriptor is different for different NICs.
@@ -4172,11 +4188,15 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
                if (adapter->rx_bulk_alloc_allowed) {
                        PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
                                           "allocation version");
-                       dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+                       dev->rx_pkt_burst =
+                               RX_LOCK_FUNCTION(dev,
+                                       ixgbe_recv_pkts_lro_bulk_alloc);
                } else {
                        PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
                                           "allocation version");
-                       dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+                       dev->rx_pkt_burst =
+                               RX_LOCK_FUNCTION(dev,
+                                       ixgbe_recv_pkts_lro_single_alloc);
                }
        } else if (dev->data->scattered_rx) {
                /*
@@ -4188,12 +4208,16 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
                                            "callback (port=%d).",
                                     dev->data->port_id);

-                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+                       dev->rx_pkt_burst =
+                               RX_LOCK_FUNCTION(dev,
+                                       ixgbe_recv_scattered_pkts_vec);
                } else if (adapter->rx_bulk_alloc_allowed) {
                        PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
                                           "allocation callback (port=%d).",
                                     dev->data->port_id);
-                       dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+                       dev->rx_pkt_burst =
+                               RX_LOCK_FUNCTION(dev,
+                                       ixgbe_recv_pkts_lro_bulk_alloc);
                } else {
                        PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
                                            "single allocation) "
@@ -4201,7 +4225,9 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
                                            "(port=%d).",
                                     dev->data->port_id);

-                       dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+                       dev->rx_pkt_burst =
+                               RX_LOCK_FUNCTION(dev,
+                                       ixgbe_recv_pkts_lro_single_alloc);
                }
        /*
         * Below we set "simple" callbacks according to port/queues parameters.
@@ -4217,28 +4243,36 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
                             RTE_IXGBE_DESCS_PER_LOOP,
                             dev->data->port_id);

-               dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+               dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, ixgbe_recv_pkts_vec);
        } else if (adapter->rx_bulk_alloc_allowed) {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
                                    "satisfied. Rx Burst Bulk Alloc function "
                                    "will be used on port=%d.",
                             dev->data->port_id);

-               dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+               dev->rx_pkt_burst =
+                       RX_LOCK_FUNCTION(dev,
+                               ixgbe_recv_pkts_bulk_alloc);
        } else {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
                                    "satisfied, or Scattered Rx is requested "
                                    "(port=%d).",
                             dev->data->port_id);

-               dev->rx_pkt_burst = ixgbe_recv_pkts;
+               dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, ixgbe_recv_pkts);
        }

        /* Propagate information about RX function choice through all queues. */

        rx_using_sse =
                (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
+#ifndef RTE_NEXT_ABI
                dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
+#else
+                dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
+                dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec_lock ||
+                dev->rx_pkt_burst == ixgbe_recv_pkts_vec_lock);
+#endif

        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
@@ -5225,6 +5259,15 @@ ixgbe_recv_pkts_vec(
 }

 uint16_t __attribute__((weak))
+ixgbe_recv_pkts_vec_lock(
+       void __rte_unused *rx_queue,
+       struct rte_mbuf __rte_unused **rx_pkts,
+       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+uint16_t __attribute__((weak))
 ixgbe_recv_scattered_pkts_vec(
        void __rte_unused *rx_queue,
        struct rte_mbuf __rte_unused **rx_pkts,
@@ -5233,6 +5276,15 @@ ixgbe_recv_scattered_pkts_vec(
        return 0;
 }

+uint16_t __attribute__((weak))
+ixgbe_recv_scattered_pkts_vec_lock(
+       void __rte_unused *rx_queue,
+       struct rte_mbuf __rte_unused **rx_pkts,
+       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
 int __attribute__((weak))
 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
 {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 3691a19..5f0ca1f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -34,6 +34,8 @@
 #ifndef _IXGBE_RXTX_H_
 #define _IXGBE_RXTX_H_

+#include <rte_spinlock.h>
+
 /*
  * Rings setup and release.
  *
@@ -126,6 +128,7 @@ struct ixgbe_rx_queue {
        struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
        struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
        uint64_t            mbuf_initializer; /**< value to init mbufs */
+       rte_spinlock_t      rx_lock; /**< Lock for packet receiption. */
        uint16_t            nb_rx_desc; /**< number of RX descriptors. */
        uint16_t            rx_tail;  /**< current value of RDT register. */
        uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -212,6 +215,7 @@ struct ixgbe_tx_queue {
                struct ixgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for 
vector PMD */
        };
        volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
+       rte_spinlock_t      tx_lock; /**< Lock for packet transmission. */
        uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
        uint16_t            tx_tail;       /**< current value of TDT reg. */
        /**< Start freeing TX buffers if there are less free descriptors than
@@ -301,6 +305,12 @@ uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                uint16_t nb_pkts);
 uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ixgbe_recv_pkts_vec_lock(void *rx_queue,
+                                 struct rte_mbuf **rx_pkts,
+                                 uint16_t nb_pkts);
+uint16_t ixgbe_recv_scattered_pkts_vec_lock(void *rx_queue,
+                                           struct rte_mbuf **rx_pkts,
+                                           uint16_t nb_pkts);
 int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
 int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
 void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
@@ -309,6 +319,9 @@ void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue 
*rxq);

 uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts);
+uint16_t ixgbe_xmit_pkts_vec_lock(void *tx_queue,
+                                 struct rte_mbuf **tx_pkts,
+                                 uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);

 #endif /* RTE_IXGBE_INC_VECTOR */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index e97ea82..32ecbd2 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -420,6 +420,8 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf 
**rx_pkts,
        return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }

+GENERATE_RX_LOCK(ixgbe_recv_pkts_vec, ixgbe)
+
 static inline uint16_t
 reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
                   uint16_t nb_bufs, uint8_t *split_flags)
@@ -526,6 +528,8 @@ ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                &split_flags[i]);
 }

+GENERATE_RX_LOCK(ixgbe_recv_scattered_pkts_vec, ixgbe)
+
 static inline void
 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
                struct rte_mbuf *pkt, uint64_t flags)
@@ -680,6 +684,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,
        return nb_pkts;
 }

+GENERATE_TX_LOCK(ixgbe_xmit_pkts_vec, ixgbe)
+
 static void __attribute__((cold))
 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
 {
-- 
1.9.3

Reply via email to