This patch adds dev_pause, dev_resume and inject_pkts APIs to allow
driver to pause the worker threads and inject special packets into
Tx queue. The next patch will be based on this.

Signed-off-by: Xiao Wang <xiao.w.w...@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c      | 56 +++++++++++++++++++++++++++++++++
 drivers/net/virtio/virtio_ethdev.h      |  5 +++
 drivers/net/virtio/virtio_pci.h         |  7 +++++
 drivers/net/virtio/virtio_rxtx.c        |  2 +-
 drivers/net/virtio/virtio_rxtx_simple.c |  2 +-
 5 files changed, 70 insertions(+), 2 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c 
b/drivers/net/virtio/virtio_ethdev.c
index 4e613ce30..e8ff1e449 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -26,6 +26,7 @@
 #include <rte_memory.h>
 #include <rte_eal.h>
 #include <rte_dev.h>
+#include <rte_cycles.h>
 
 #include "virtio_ethdev.h"
 #include "virtio_pci.h"
@@ -1220,6 +1221,57 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t 
req_features)
        return 0;
 }
 
+int
+virtio_dev_pause(struct rte_eth_dev *dev)
+{
+       struct virtio_hw *hw = dev->data->dev_private;
+
+       rte_spinlock_lock(&hw->state_lock);
+
+       if (hw->started == 0) {
+               /* Device is just stopped. */
+               rte_spinlock_unlock(&hw->state_lock);
+               return -1;
+       }
+       hw->started = 0;
+       /*
+        * Prevent the worker threads from touching queues to avoid contention,
+        * 1 ms should be enough for the ongoing Tx function to finish.
+        */
+       rte_delay_ms(1);
+       return 0;
+}
+
+/*
+ * Recover hw state to let the worker threads continue.
+ */
+void
+virtio_dev_resume(struct rte_eth_dev *dev)
+{
+       struct virtio_hw *hw = dev->data->dev_private;
+
+       hw->started = 1;
+       rte_spinlock_unlock(&hw->state_lock);
+}
+
+/*
+ * Should be called only after device is paused.
+ */
+int
+virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
+               int nb_pkts)
+{
+       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtnet_tx *txvq = dev->data->tx_queues[0];
+       int ret;
+
+       hw->inject_pkts = tx_pkts;
+       ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
+       hw->inject_pkts = NULL;
+
+       return ret;
+}
+
 /*
  * Process Virtio Config changed interrupt and call the callback
  * if link state changed.
@@ -1757,6 +1809,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
                        return -EBUSY;
                }
 
+       rte_spinlock_init(&hw->state_lock);
+
        hw->use_simple_rx = 1;
        hw->use_simple_tx = 1;
 
@@ -1923,12 +1977,14 @@ virtio_dev_stop(struct rte_eth_dev *dev)
 
        PMD_INIT_LOG(DEBUG, "stop");
 
+       rte_spinlock_lock(&hw->state_lock);
        if (intr_conf->lsc || intr_conf->rxq)
                virtio_intr_disable(dev);
 
        hw->started = 0;
        memset(&link, 0, sizeof(link));
        virtio_dev_atomic_write_link_status(dev, &link);
+       rte_spinlock_unlock(&hw->state_lock);
 }
 
 static int
diff --git a/drivers/net/virtio/virtio_ethdev.h 
b/drivers/net/virtio/virtio_ethdev.h
index 765d249e6..69b30b7e1 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -92,4 +92,9 @@ int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
 
 void virtio_interrupt_handler(void *param);
 
+int virtio_dev_pause(struct rte_eth_dev *dev);
+void virtio_dev_resume(struct rte_eth_dev *dev);
+int virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
+               int nb_pkts);
+
 #endif /* _VIRTIO_ETHDEV_H_ */
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index fb1f6a9ec..9d810a599 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -241,6 +241,13 @@ struct virtio_hw {
        struct virtio_pci_common_cfg *common_cfg;
        struct virtio_net_config *dev_cfg;
        void        *virtio_user_dev;
+       /*
+        * App management thread and virtio interrupt handler thread
+        * both can change device state, this lock is meant to avoid
+        * such a contention.
+        */
+       rte_spinlock_t state_lock;
+       struct rte_mbuf **inject_pkts;
 
        struct virtqueue **vqs;
 };
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 265debf20..80e996d06 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -988,7 +988,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
        uint16_t nb_used, nb_tx = 0;
        int error;
 
-       if (unlikely(hw->started == 0))
+       if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;
 
        if (unlikely(nb_pkts < 1))
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c 
b/drivers/net/virtio/virtio_rxtx_simple.c
index 8ef3c0c04..98a9da5d8 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -70,7 +70,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf 
**tx_pkts,
        uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
        uint16_t nb_tx = 0;
 
-       if (unlikely(hw->started == 0))
+       if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;
 
        nb_used = VIRTQUEUE_NUSED(vq);
-- 
2.15.1

Reply via email to