Add RX/TX paths with lock for VF. It's used when
the function of link reset on VF is needed.
When the lock for RX/TX is added, the RX/TX can be
stopped. Then we have a chance to reset the VF link.

Please be aware there's performence drop if the lock
path is chosen.

Signed-off-by: Zhe Tao <zhe.tao at intel.com>
---
 drivers/net/i40e/i40e_ethdev.c    |  4 ++--
 drivers/net/i40e/i40e_ethdev.h    |  4 ++++
 drivers/net/i40e/i40e_ethdev_vf.c |  4 ++--
 drivers/net/i40e/i40e_rxtx.c      | 45 +++++++++++++++++++++++++--------------
 drivers/net/i40e/i40e_rxtx.h      | 30 ++++++++++++++++++++++++++
 5 files changed, 67 insertions(+), 20 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 24777d5..1380330 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -764,8 +764,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();

        dev->dev_ops = &i40e_eth_dev_ops;
-       dev->rx_pkt_burst = i40e_recv_pkts;
-       dev->tx_pkt_burst = i40e_xmit_pkts;
+       dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, i40e_recv_pkts);
+       dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, i40e_xmit_pkts);

        /* for secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index cfd2399..672d920 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -540,6 +540,10 @@ struct i40e_adapter {
        struct rte_timecounter systime_tc;
        struct rte_timecounter rx_tstamp_tc;
        struct rte_timecounter tx_tstamp_tc;
+
+       /* For VF reset backup */
+       eth_rx_burst_t rx_backup;
+       eth_tx_burst_t tx_backup;
 };

 int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c 
b/drivers/net/i40e/i40e_ethdev_vf.c
index 90682ac..46d8a7c 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1451,8 +1451,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)

        /* assign ops func pointer */
        eth_dev->dev_ops = &i40evf_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &i40e_recv_pkts;
-       eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
+       eth_dev->rx_pkt_burst = RX_LOCK_FUNCTION(eth_dev, i40e_recv_pkts);
+       eth_dev->tx_pkt_burst = TX_LOCK_FUNCTION(eth_dev, i40e_xmit_pkts);

        /*
         * For secondary processes, we don't initialise any further as primary
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index c833aa3..0a6dcfb 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -79,10 +79,6 @@
                PKT_TX_TCP_SEG |                 \
                PKT_TX_OUTER_IP_CKSUM)

-static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
-                                     struct rte_mbuf **tx_pkts,
-                                     uint16_t nb_pkts);
-
 static inline void
 i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
 {
@@ -1144,7 +1140,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
        return 0;
 }

-static uint16_t
+uint16_t
 i40e_recv_pkts_bulk_alloc(void *rx_queue,
                          struct rte_mbuf **rx_pkts,
                          uint16_t nb_pkts)
@@ -1169,7 +1165,7 @@ i40e_recv_pkts_bulk_alloc(void *rx_queue,
        return nb_rx;
 }
 #else
-static uint16_t
+uint16_t
 i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
                          struct rte_mbuf __rte_unused **rx_pkts,
                          uint16_t __rte_unused nb_pkts)
@@ -1892,7 +1888,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
        return nb_pkts;
 }

-static uint16_t
+uint16_t
 i40e_xmit_pkts_simple(void *tx_queue,
                      struct rte_mbuf **tx_pkts,
                      uint16_t nb_pkts)
@@ -2121,10 +2117,13 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
        };

        if (dev->rx_pkt_burst == i40e_recv_pkts ||
+           dev->rx_pkt_burst == i40e_recv_pkts_lock ||
 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
            dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
+           dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc_lock ||
 #endif
-           dev->rx_pkt_burst == i40e_recv_scattered_pkts)
+           dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
+           dev->rx_pkt_burst == i40e_recv_scattered_pkts_lock)
                return ptypes;
        return NULL;
 }
@@ -2648,6 +2647,7 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)

        rxq->rxrearm_start = 0;
        rxq->rxrearm_nb = 0;
+       rte_spinlock_init(&rxq->rx_lock);
 }

 void
@@ -2704,6 +2704,7 @@ i40e_reset_tx_queue(struct i40e_tx_queue *txq)

        txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
        txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+       rte_spinlock_init(&txq->tx_lock);
 }

 /* Init the TX queue in hardware */
@@ -3155,12 +3156,12 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
                                            "callback (port=%d).",
                                     dev->data->port_id);

-                       dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+                       dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, 
i40e_recv_scattered_pkts_vec);
                } else {
                        PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
                                           "allocation callback (port=%d).",
                                     dev->data->port_id);
-                       dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+                       dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, 
i40e_recv_scattered_pkts);
                }
        /* If parameters allow we are going to choose between the following
         * callbacks:
@@ -3174,27 +3175,29 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
                             RTE_I40E_DESCS_PER_LOOP,
                             dev->data->port_id);

-               dev->rx_pkt_burst = i40e_recv_pkts_vec;
+               dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, i40e_recv_pkts_vec);
        } else if (ad->rx_bulk_alloc_allowed) {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
                                    "satisfied. Rx Burst Bulk Alloc function "
                                    "will be used on port=%d.",
                             dev->data->port_id);

-               dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+               dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, 
i40e_recv_pkts_bulk_alloc);
        } else {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
                                    "satisfied, or Scattered Rx is requested "
                                    "(port=%d).",
                             dev->data->port_id);

-               dev->rx_pkt_burst = i40e_recv_pkts;
+               dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, i40e_recv_pkts);
        }

        /* Propagate information about RX function choice through all queues. */
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
                rx_using_sse =
                        (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+                        dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_lock 
||
+                        dev->rx_pkt_burst == i40e_recv_pkts_vec_lock ||
                         dev->rx_pkt_burst == i40e_recv_pkts_vec);

                for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -3250,14 +3253,14 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
        if (ad->tx_simple_allowed) {
                if (ad->tx_vec_allowed) {
                        PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
-                       dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+                       dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, 
i40e_xmit_pkts_vec);
                } else {
                        PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
-                       dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+                       dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, 
i40e_xmit_pkts_simple);
                }
        } else {
                PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
-               dev->tx_pkt_burst = i40e_xmit_pkts;
+               dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, i40e_xmit_pkts);
        }
 }

@@ -3311,3 +3314,13 @@ i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
 {
        return 0;
 }
+
+GENERATE_RX_LOCK(i40e_recv_pkts, i40e)
+GENERATE_RX_LOCK(i40e_recv_pkts_vec, i40e)
+GENERATE_RX_LOCK(i40e_recv_pkts_bulk_alloc, i40e)
+GENERATE_RX_LOCK(i40e_recv_scattered_pkts, i40e)
+GENERATE_RX_LOCK(i40e_recv_scattered_pkts_vec, i40e)
+
+GENERATE_TX_LOCK(i40e_xmit_pkts, i40e)
+GENERATE_TX_LOCK(i40e_xmit_pkts_vec, i40e)
+GENERATE_TX_LOCK(i40e_xmit_pkts_simple, i40e)
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 98179f0..a1c13b8 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -140,6 +140,7 @@ struct i40e_rx_queue {
        bool rx_deferred_start; /**< don't start this queue in dev start */
        uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
        uint8_t dcb_tc;         /**< Traffic class of rx queue */
+       rte_spinlock_t rx_lock; /**< lock for rx path */
 };

 struct i40e_tx_entry {
@@ -181,6 +182,7 @@ struct i40e_tx_queue {
        bool q_set; /**< indicate if tx queue has been configured */
        bool tx_deferred_start; /**< don't start this queue in dev start */
        uint8_t dcb_tc;         /**< Traffic class of tx queue */
+       rte_spinlock_t tx_lock; /**< lock for tx path */
 };

 /** Offload features */
@@ -223,6 +225,27 @@ uint16_t i40e_recv_scattered_pkts(void *rx_queue,
 uint16_t i40e_xmit_pkts(void *tx_queue,
                        struct rte_mbuf **tx_pkts,
                        uint16_t nb_pkts);
+uint16_t i40e_xmit_pkts_lock(void *tx_queue,
+                       struct rte_mbuf **tx_pkts,
+                       uint16_t nb_pkts);
+uint16_t i40e_xmit_pkts_simple(void *tx_queue,
+                     struct rte_mbuf **tx_pkts,
+                     uint16_t nb_pkts);
+uint16_t i40e_xmit_pkts_simple_lock(void *tx_queue,
+                     struct rte_mbuf **tx_pkts,
+                     uint16_t nb_pkts);
+uint16_t i40e_recv_pkts_lock(void *rx_queue,
+                       struct rte_mbuf **rx_pkts,
+                       uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_lock(void *rx_queue,
+                                 struct rte_mbuf **rx_pkts,
+                                 uint16_t nb_pkts);
+uint16_t i40e_recv_pkts_bulk_alloc(void *rx_queue,
+                         struct rte_mbuf **rx_pkts,
+                         uint16_t nb_pkts);
+uint16_t i40e_recv_pkts_bulk_alloc_lock(void *rx_queue,
+                         struct rte_mbuf **rx_pkts,
+                         uint16_t nb_pkts);
 int i40e_tx_queue_init(struct i40e_tx_queue *txq);
 int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
 void i40e_free_tx_resources(struct i40e_tx_queue *txq);
@@ -244,12 +267,19 @@ uint16_t i40e_recv_pkts_vec(void *rx_queue, struct 
rte_mbuf **rx_pkts,
 uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
                                      struct rte_mbuf **rx_pkts,
                                      uint16_t nb_pkts);
+uint16_t i40e_recv_pkts_vec_lock(void *rx_queue, struct rte_mbuf **rx_pkts,
+                           uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec_lock(void *rx_queue,
+                                     struct rte_mbuf **rx_pkts,
+                                     uint16_t nb_pkts);
 int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
 int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
 int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
 void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
 uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                            uint16_t nb_pkts);
+uint16_t i40e_xmit_pkts_vec_lock(void *tx_queue, struct rte_mbuf **tx_pkts,
+                           uint16_t nb_pkts);
 void i40e_set_rx_function(struct rte_eth_dev *dev);
 void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
                               struct i40e_tx_queue *txq);
-- 
2.1.4

Reply via email to