Implement the device reset function.
1, Add the fake RX/TX functions.
2, The reset function tries to stop RX/TX by replacing
   the RX/TX functions with the fake ones and getting the
   locks to make sure the regular RX/TX finished.
3, After the RX/TX stopped, reset the VF port, and then
   release the locks.

Signed-off-by: Zhe Tao <zhe.tao at intel.com>
---
 doc/guides/rel_notes/release_16_07.rst |   5 ++
 drivers/net/i40e/i40e_ethdev.h         |   7 +-
 drivers/net/i40e/i40e_ethdev_vf.c      | 141 +++++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_rxtx.h           |   4 +
 4 files changed, 154 insertions(+), 3 deletions(-)

diff --git a/doc/guides/rel_notes/release_16_07.rst 
b/doc/guides/rel_notes/release_16_07.rst
index a4c0cc3..f43b867 100644
--- a/doc/guides/rel_notes/release_16_07.rst
+++ b/doc/guides/rel_notes/release_16_07.rst
@@ -62,6 +62,11 @@ New Features
   callback in the message handler to notice the APP. APP need call the device
   reset API to reset the VF port.

+* **Added VF reset support for i40e VF driver.**
+
+  Added a new implementaion to allow i40e VF driver to
+  reset the functionality and state of itself.
+

 Resolved Issues
 ---------------
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 672d920..dcd6e0f 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -541,9 +541,8 @@ struct i40e_adapter {
        struct rte_timecounter rx_tstamp_tc;
        struct rte_timecounter tx_tstamp_tc;

-       /* For VF reset backup */
-       eth_rx_burst_t rx_backup;
-       eth_tx_burst_t tx_backup;
+       /* For VF reset */
+       uint8_t reset_number;
 };

 int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
@@ -597,6 +596,8 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t 
queue_id,
 void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        struct rte_eth_txq_info *qinfo);

+void i40evf_emulate_vf_reset(uint8_t port_id);
+
 /* I40E_DEV_PRIVATE_TO */
 #define I40E_DEV_PRIVATE_TO_PF(adapter) \
        (&((struct i40e_adapter *)adapter)->pf)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c 
b/drivers/net/i40e/i40e_ethdev_vf.c
index 46d8a7c..9fc121b 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -157,6 +157,12 @@ i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 
uint16_t queue_id);
 static void i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
                                   uint8_t *msg,
                                   uint16_t msglen);
+static int i40evf_dev_uninit(struct rte_eth_dev *eth_dev);
+static int i40evf_dev_init(struct rte_eth_dev *eth_dev);
+static void i40evf_dev_close(struct rte_eth_dev *dev);
+static int i40evf_dev_start(struct rte_eth_dev *dev);
+static int i40evf_dev_configure(struct rte_eth_dev *dev);
+static int i40evf_handle_vf_reset(struct rte_eth_dev *dev);

 /* Default hash key buffer for RSS */
 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
@@ -223,6 +229,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
        .reta_query           = i40evf_dev_rss_reta_query,
        .rss_hash_update      = i40evf_dev_rss_hash_update,
        .rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
+       .dev_reset            = i40evf_handle_vf_reset
 };

 /*
@@ -1309,6 +1316,140 @@ i40evf_uninit_vf(struct rte_eth_dev *dev)
 }

 static void
+i40e_vf_queue_reset(struct rte_eth_dev *dev)
+{
+       uint16_t i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
+
+               if (rxq->q_set) {
+                       i40e_dev_rx_queue_setup(dev,
+                                               rxq->queue_id,
+                                               rxq->nb_rx_desc,
+                                               rxq->socket_id,
+                                               &rxq->rxconf,
+                                               rxq->mp);
+               }
+
+               rxq = dev->data->rx_queues[i];
+               rte_spinlock_trylock(&rxq->rx_lock);
+       }
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               struct i40e_tx_queue *txq = dev->data->tx_queues[i];
+
+               if (txq->q_set) {
+                       i40e_dev_tx_queue_setup(dev,
+                                               txq->queue_id,
+                                               txq->nb_tx_desc,
+                                               txq->socket_id,
+                                               &txq->txconf);
+               }
+
+               txq = dev->data->tx_queues[i];
+               rte_spinlock_trylock(&txq->tx_lock);
+       }
+}
+
+static void
+i40e_vf_reset_dev(struct rte_eth_dev *dev)
+{
+       struct i40e_adapter *adapter =
+               I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+       i40evf_dev_close(dev);
+       PMD_DRV_LOG(DEBUG, "i40evf dev close complete");
+       i40evf_dev_uninit(dev);
+       PMD_DRV_LOG(DEBUG, "i40evf dev detached");
+       memset(dev->data->dev_private, 0,
+              (uint64_t)&adapter->reset_number - (uint64_t)adapter);
+
+       i40evf_dev_configure(dev);
+       i40evf_dev_init(dev);
+       PMD_DRV_LOG(DEBUG, "i40evf dev attached");
+       i40e_vf_queue_reset(dev);
+       PMD_DRV_LOG(DEBUG, "i40evf queue reset");
+       i40evf_dev_start(dev);
+       PMD_DRV_LOG(DEBUG, "i40evf dev restart");
+}
+
+static uint16_t
+i40evf_recv_pkts_detach(void __rte_unused *rx_queue,
+                       struct rte_mbuf __rte_unused **rx_pkts,
+                       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+static uint16_t
+i40evf_xmit_pkts_detach(void __rte_unused *tx_queue,
+                       struct rte_mbuf __rte_unused **tx_pkts,
+                       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+static int
+i40evf_handle_vf_reset(struct rte_eth_dev *dev)
+{
+       struct i40e_adapter *adapter =
+               I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       uint16_t i = 0;
+       struct i40e_rx_queue *rxq;
+       struct i40e_tx_queue *txq;
+
+       if (!dev->data->dev_started)
+               return 0;
+
+       adapter->reset_number = 1;
+
+       /**
+        * Stop RX/TX by fake functions and locks.
+        * Fake functions are used to make RX/TX lock easier.
+        */
+       dev->rx_pkt_burst = i40evf_recv_pkts_detach;
+       dev->tx_pkt_burst = i40evf_xmit_pkts_detach;
+
+       if (dev->data->rx_queues)
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       rxq = dev->data->rx_queues[i];
+                       rte_spinlock_lock(&rxq->rx_lock);
+               }
+
+       if (dev->data->tx_queues)
+               for (i = 0; i < dev->data->nb_tx_queues; i++) {
+                       txq = dev->data->tx_queues[i];
+                       rte_spinlock_lock(&txq->tx_lock);
+               }
+
+       i40e_vf_reset_dev(dev);
+
+       adapter->reset_number = 0;
+
+       if (dev->data->rx_queues)
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       rxq = dev->data->rx_queues[i];
+                       rte_spinlock_unlock(&rxq->rx_lock);
+               }
+
+       if (dev->data->tx_queues)
+               for (i = 0; i < dev->data->nb_tx_queues; i++) {
+                       txq = dev->data->tx_queues[i];
+                       rte_spinlock_unlock(&txq->tx_lock);
+               }
+
+       return 0;
+}
+
+void
+i40evf_emulate_vf_reset(uint8_t port_id)
+{
+       struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+       i40evf_handle_vf_reset(dev);
+}
+
+static void
 i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
                           uint8_t *msg,
                           __rte_unused uint16_t msglen)
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index a1c13b8..7ee33dc 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -141,6 +141,8 @@ struct i40e_rx_queue {
        uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
        uint8_t dcb_tc;         /**< Traffic class of rx queue */
        rte_spinlock_t rx_lock; /**< lock for rx path */
+       uint8_t socket_id;
+       struct rte_eth_rxconf rxconf;
 };

 struct i40e_tx_entry {
@@ -183,6 +185,8 @@ struct i40e_tx_queue {
        bool tx_deferred_start; /**< don't start this queue in dev start */
        uint8_t dcb_tc;         /**< Traffic class of tx queue */
        rte_spinlock_t tx_lock; /**< lock for tx path */
+       uint8_t socket_id;
+       struct rte_eth_txconf txconf;
 };

 /** Offload features */
-- 
1.9.3

Reply via email to