Re: [ovs-dev] [PATCH v3] netdev-dpdk: vhost: Fix txq enabling in the absence of notifications.

2016-03-29 Thread Daniele Di Proietto
Thanks!

I applied this to master and branch-2.5

On 28/03/2016 23:20, "Ilya Maximets"  wrote:

>According to QEMU documentation (docs/specs/vhost-user.txt) one queue
>should be enabled initially. More queues are enabled dynamically, by
>sending message VHOST_USER_SET_VRING_ENABLE.
>
>Currently all queues in OVS disabled by default. This breaks above
>specification. So, queue #0 should be enabled by default to support
>QEMU versions less than 2.5 and fix probable issues if QEMU will not
>send VHOST_USER_SET_VRING_ENABLE for queue #0 according to documentation.
>Also this will fix currently broken vhost-cuse support in OVS.
>
>Fixes: 585a5beaa2a4 ("netdev-dpdk: vhost-user: Fix sending packets to
>  queues not enabled by guest.")
>Reported-by: Mauricio Vasquez B
>
>Signed-off-by: Ilya Maximets 
>---
>
>version 3:
>   * Fixed qid checking in __netdev_dpdk_vhost_send()
>
>version 2:
>   * Fixed initialization in netdev_dpdk_alloc_txq().
>   * Clearing moved to separate function.
>
> lib/netdev-dpdk.c | 28 
> 1 file changed, 24 insertions(+), 4 deletions(-)
>
>diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
>index 7c4cd07..8eea788 100644
>--- a/lib/netdev-dpdk.c
>+++ b/lib/netdev-dpdk.c
>@@ -103,6 +103,9 @@ BUILD_ASSERT_DECL((MAX_NB_MBUF /
>ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
> #define NIC_PORT_TX_Q_SIZE 2048  /* Size of Physical NIC TX Queue, Max
>(n+32<=4096)*/
> 
> #define OVS_VHOST_MAX_QUEUE_NUM 1024  /* Maximum number of vHost TX
>queues. */
>+#define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
>+#define OVS_VHOST_QUEUE_DISABLED(-2) /* Queue was disabled by guest
>and not
>+  * yet mapped to another queue.
>*/
> 
> static char *cuse_dev_name = NULL;/* Character device cuse_dev_name.
>*/
> static char *vhost_sock_dir = NULL;   /* Location of vhost-user sockets
>*/
>@@ -671,7 +674,7 @@ netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev,
>unsigned int n_txqs)
> }
> 
> /* Initialize map for vhost devices. */
>-netdev->tx_q[i].map = -1;
>+netdev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
> rte_spinlock_init(>tx_q[i].tx_lock);
> }
> }
>@@ -1265,7 +1268,7 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int
>qid,
> 
> qid = vhost_dev->tx_q[qid % vhost_dev->real_n_txq].map;
> 
>-if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid == -1)) {
>+if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid < 0)) {
> rte_spinlock_lock(_dev->stats_lock);
> vhost_dev->stats.tx_dropped+= cnt;
> rte_spinlock_unlock(_dev->stats_lock);
>@@ -2019,7 +2022,7 @@ netdev_dpdk_remap_txqs(struct netdev_dpdk *netdev)
> }
> 
> if (n_enabled == 0 && total_txqs != 0) {
>-enabled_queues[0] = -1;
>+enabled_queues[0] = OVS_VHOST_QUEUE_DISABLED;
> n_enabled = 1;
> }
> 
>@@ -2056,6 +2059,10 @@ netdev_dpdk_vhost_set_queues(struct netdev_dpdk
>*netdev, struct virtio_net *dev)
> netdev->real_n_rxq = qp_num;
> netdev->real_n_txq = qp_num;
> netdev->txq_needs_locking = true;
>+/* Enable TX queue 0 by default if it wasn't disabled. */
>+if (netdev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
>+netdev->tx_q[0].map = 0;
>+}
> 
> netdev_dpdk_remap_txqs(netdev);
> 
>@@ -2104,6 +2111,18 @@ new_device(struct virtio_net *dev)
> return 0;
> }
> 
>+/* Clears mapping for all available queues of vhost interface. */
>+static void
>+netdev_dpdk_txq_map_clear(struct netdev_dpdk *dev)
>+OVS_REQUIRES(dev->mutex)
>+{
>+int i;
>+
>+for (i = 0; i < dev->real_n_txq; i++) {
>+dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
>+}
>+}
>+
> /*
>  * Remove a virtio-net device from the specific vhost port.  Use
>dev->remove
>  * flag to stop any more packets from being sent or received to/from a
>VM and
>@@ -2123,6 +2142,7 @@ destroy_device(volatile struct virtio_net *dev)
> ovs_mutex_lock(_dev->mutex);
> dev->flags &= ~VIRTIO_DEV_RUNNING;
> ovsrcu_set(_dev->virtio_dev, NULL);
>+netdev_dpdk_txq_map_clear(vhost_dev);
> exists = true;
> ovs_mutex_unlock(_dev->mutex);
> break;
>@@ -2169,7 +2189,7 @@ vring_state_changed(struct virtio_net *dev,
>uint16_t queue_id, int enable)
> if (enable) {
> vhost_dev->tx_q[qid].map = qid;
> } else {
>-vhost_dev->tx_q[qid].map = -1;
>+vhost_dev->tx_q[qid].map = OVS_VHOST_QUEUE_DISABLED;
> }
> netdev_dpdk_remap_txqs(vhost_dev);
> exists = true;
>-- 
>2.5.0
>

___
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev


Re: [ovs-dev] [PATCH v3] netdev-dpdk: vhost: Fix txq enabling in the absence of notifications.

2016-03-29 Thread Flavio Leitner
On Tue, Mar 29, 2016 at 09:20:41AM +0300, Ilya Maximets wrote:
> According to QEMU documentation (docs/specs/vhost-user.txt) one queue
> should be enabled initially. More queues are enabled dynamically, by
> sending message VHOST_USER_SET_VRING_ENABLE.
> 
> Currently all queues in OVS disabled by default. This breaks above
> specification. So, queue #0 should be enabled by default to support
> QEMU versions less than 2.5 and fix probable issues if QEMU will not
> send VHOST_USER_SET_VRING_ENABLE for queue #0 according to documentation.
> Also this will fix currently broken vhost-cuse support in OVS.
> 
> Fixes: 585a5beaa2a4 ("netdev-dpdk: vhost-user: Fix sending packets to
>   queues not enabled by guest.")
> Reported-by: Mauricio Vasquez B 
> Signed-off-by: Ilya Maximets 
> ---

Acked-by: Flavio Leitner 


___
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev