Implement the device reset function.
1, Add the fake RX/TX functions.
2, The reset function tries to stop RX/TX by replacing
   the RX/TX functions with the fake ones and getting the
   locks to make sure the regular RX/TX finished.
3, After the RX/TX stopped, reset the VF port, and then
   release the locks and restore the RX/TX functions.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu at intel.com>
---
 doc/guides/rel_notes/release_16_07.rst |   9 +++
 drivers/net/ixgbe/ixgbe_ethdev.c       | 108 ++++++++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h       |  12 +++-
 drivers/net/ixgbe/ixgbe_rxtx.c         |  42 ++++++++++++-
 4 files changed, 168 insertions(+), 3 deletions(-)

diff --git a/doc/guides/rel_notes/release_16_07.rst 
b/doc/guides/rel_notes/release_16_07.rst
index a761e3c..d36c4b1 100644
--- a/doc/guides/rel_notes/release_16_07.rst
+++ b/doc/guides/rel_notes/release_16_07.rst
@@ -53,6 +53,15 @@ New Features
   VF. To handle this link up/down event, add the mailbox interruption
   support to receive the message.

+* **Added device reset support for ixgbe VF.**
+
+  Added the device reset API. APP can call this API to reset the VF port
+  when it's not working.
+  Based on the mailbox interruption support, when VF reseives the control
+  message from PF, it means the PF link state changes, VF uses the reset
+  callback in the message handler to notice the APP. APP need call the device
+  reset API to reset the VF port.
+

 Resolved Issues
 ---------------
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index fd2682f..1e3520b 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -381,6 +381,8 @@ static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev 
*dev,
 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                                         struct rte_eth_udp_tunnel *udp_tunnel);

+static int ixgbevf_dev_reset(struct rte_eth_dev *dev);
+
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
  */
@@ -586,6 +588,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .reta_query           = ixgbe_dev_rss_reta_query,
        .rss_hash_update      = ixgbe_dev_rss_hash_update,
        .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
+       .dev_reset            = ixgbevf_dev_reset,
 };

 /* store statistics names and its offset in stats structure */
@@ -4060,7 +4063,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
                ETH_VLAN_EXTEND_MASK;
        ixgbevf_vlan_offload_set(dev, mask);

-       ixgbevf_dev_rxtx_start(dev);
+       if (ixgbevf_dev_rxtx_start(dev))
+               return -1;

        /* check and configure queue intr-vector mapping */
        if (dev->data->dev_conf.intr_conf.rxq != 0) {
@@ -7193,6 +7197,108 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
 }

 static int
+ixgbevf_dev_reset(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_adapter *adapter =
+               (struct ixgbe_adapter *)dev->data->dev_private;
+       int diag = 0;
+       uint32_t vteiam;
+       uint16_t i;
+       struct ixgbe_rx_queue *rxq;
+       struct ixgbe_tx_queue *txq;
+
+       /* Nothing needs to be done if the device is not started. */
+       if (!dev->data->dev_started)
+               return 0;
+
+       PMD_DRV_LOG(DEBUG, "Link up/down event detected.");
+
+       /**
+        * Stop RX/TX by fake functions and locks.
+        * Fake functions are used to make RX/TX lock easier.
+        */
+       adapter->rx_backup = dev->rx_pkt_burst;
+       adapter->tx_backup = dev->tx_pkt_burst;
+       dev->rx_pkt_burst = ixgbevf_recv_pkts_fake;
+       dev->tx_pkt_burst = ixgbevf_xmit_pkts_fake;
+
+       if (dev->data->rx_queues)
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       rxq = dev->data->rx_queues[i];
+                       rte_spinlock_lock(&rxq->rx_lock);
+               }
+
+       if (dev->data->tx_queues)
+               for (i = 0; i < dev->data->nb_tx_queues; i++) {
+                       txq = dev->data->tx_queues[i];
+                       rte_spinlock_lock(&txq->tx_lock);
+               }
+
+       /* Performance VF reset. */
+       do {
+               dev->data->dev_started = 0;
+               ixgbevf_dev_stop(dev);
+               if (dev->data->dev_conf.intr_conf.lsc == 0)
+                       diag = ixgbe_dev_link_update(dev, 0);
+               if (diag) {
+                       PMD_INIT_LOG(INFO, "Ixgbe VF reset: "
+                                    "Failed to update link.");
+               }
+               rte_delay_ms(1000);
+
+               diag = ixgbevf_dev_start(dev);
+               /*If fail to start the device, need to stop/start it again. */
+               if (diag) {
+                       PMD_INIT_LOG(ERR, "Ixgbe VF reset: "
+                                    "Failed to start device.");
+                       continue;
+               }
+               dev->data->dev_started = 1;
+               ixgbevf_dev_stats_reset(dev);
+               if (dev->data->dev_conf.intr_conf.lsc == 0)
+                       diag = ixgbe_dev_link_update(dev, 0);
+               if (diag) {
+                       PMD_INIT_LOG(INFO, "Ixgbe VF reset: "
+                                    "Failed to update link.");
+                       diag = 0;
+               }
+
+               /**
+                * When the PF link is down, there has chance
+                * that VF cannot operate its registers. Will
+                * check if the registers is written
+                * successfully. If not, repeat stop/start until
+                * the PF link is up, in other words, until the
+                * registers can be written.
+                */
+               vteiam = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
+       /* Reference ixgbevf_intr_enable when checking */
+       } while (diag || vteiam != IXGBE_VF_IRQ_ENABLE_MASK);
+
+       /**
+        * Release the locks for queues.
+        * Restore the RX/TX functions.
+        */
+       if (dev->data->rx_queues)
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       rxq = dev->data->rx_queues[i];
+                       rte_spinlock_unlock(&rxq->rx_lock);
+               }
+
+       if (dev->data->tx_queues)
+               for (i = 0; i < dev->data->nb_tx_queues; i++) {
+                       txq = dev->data->tx_queues[i];
+                       rte_spinlock_unlock(&txq->tx_lock);
+               }
+
+       dev->rx_pkt_burst = adapter->rx_backup;
+       dev->tx_pkt_burst = adapter->tx_backup;
+
+       return 0;
+}
+
+static int
 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
 {
        uint32_t eicr;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 701107b..d50fad4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -289,6 +289,8 @@ struct ixgbe_adapter {
        struct rte_timecounter      systime_tc;
        struct rte_timecounter      rx_tstamp_tc;
        struct rte_timecounter      tx_tstamp_tc;
+       eth_rx_burst_t              rx_backup;
+       eth_tx_burst_t              tx_backup;
 };

 #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
@@ -377,7 +379,7 @@ int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);

 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);

-void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
+int ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);

 uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts);
@@ -409,6 +411,14 @@ uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts);

+uint16_t ixgbevf_recv_pkts_fake(void *rx_queue,
+                               struct rte_mbuf **rx_pkts,
+                               uint16_t nb_pkts);
+
+uint16_t ixgbevf_xmit_pkts_fake(void *tx_queue,
+                               struct rte_mbuf **tx_pkts,
+                               uint16_t nb_pkts);
+
 uint16_t ixgbe_xmit_pkts_lock(void *tx_queue,
                              struct rte_mbuf **tx_pkts,
                              uint16_t nb_pkts);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a45d115..b4e7659 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -5181,7 +5181,7 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
 /*
  * [VF] Start Transmit and Receive Units.
  */
-void __attribute__((cold))
+int __attribute__((cold))
 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
@@ -5218,7 +5218,15 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
                        txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
                } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
                if (!poll_ms)
+#ifndef RTE_NEXT_ABI
                        PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
+#else
+               {
+                       PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
+                       if (dev->data->dev_conf.txmode.lock_mode)
+                               return -1;
+               }
+#endif
        }
        for (i = 0; i < dev->data->nb_rx_queues; i++) {

@@ -5235,11 +5243,21 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
                        rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
                } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
                if (!poll_ms)
+#ifndef RTE_NEXT_ABI
+                       PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
+#else
+               {
                        PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
+                       if (dev->data->dev_conf.rxmode.lock_mode)
+                               return -1;
+               }
+#endif
                rte_wmb();
                IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);

        }
+
+       return 0;
 }

 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
@@ -5290,3 +5308,25 @@ ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused 
*rxq)
 {
        return -1;
 }
+
+/**
+ * A fake function to stop receiption.
+ */
+uint16_t
+ixgbevf_recv_pkts_fake(void __rte_unused *rx_queue,
+                      struct rte_mbuf __rte_unused **rx_pkts,
+                      uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+/**
+ * A fake function to stop transmission.
+ */
+uint16_t
+ixgbevf_xmit_pkts_fake(void __rte_unused *tx_queue,
+                      struct rte_mbuf __rte_unused **tx_pkts,
+                      uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
-- 
1.9.3

Reply via email to