Re: [ovs-dev] [PATCH v11 2/3] netdev-dpdk : Detailed packet drop statistics

2019-11-11 Thread Sriram Vatala via dev
Thanks Ilya and kevin.

Regards,
Sriram.

-Original Message-
From: Ilya Maximets 
Sent: 12 November 2019 00:08
To: Ilya Maximets ; Kevin Traynor ; 
Sriram Vatala ; ovs-dev@openvswitch.org
Subject: Re: [PATCH v11 2/3] netdev-dpdk : Detailed packet drop statistics

On 11.11.2019 17:11, Ilya Maximets wrote:
>>> I'm not sure if clang annotations will work with rte_spinlock.
>>> DPDK doesn't have proper annotations for locking functions.
>>>
>>
>> Ah, good point, I didn't check the lock type. In that case nevermind,
>> patch+incremental LGTM as is.
>>
>> Acked-by: Kevin Traynor 
>
> Thanks. In this case, I think, there is no need to re-spin the series.
> I'll just squash the incremental with this patch and give it another try.
> If it'll work fine, I'll apply the series.

Thanks Sriram and Kevin! Series applied to master.

Best regards, Ilya Maximets.
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


Re: [ovs-dev] [PATCH v11 2/3] netdev-dpdk : Detailed packet drop statistics

2019-11-11 Thread Sriram Vatala via dev
Hi Ilya,
Thanks for the review. I agree with your proposal to move the stats update 
code to existing special functions. Thanks for the incremental patch, it looks 
good to me. Will wait for Kevin's taught on this.

Thanks & Regards,
Sriram.

-Original Message-
From: Ilya Maximets 
Sent: 11 November 2019 04:50
To: Sriram Vatala ; ovs-dev@openvswitch.org; 
ktray...@redhat.com; i.maxim...@ovn.org
Subject: Re: [PATCH v11 2/3] netdev-dpdk : Detailed packet drop statistics

On 29.10.2019 15:50, Sriram Vatala wrote:> @@ -2388,12 +2412,16 @@ 
__netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
>   }
>   } while (cnt && (retries++ < max_retries));
>
> +tx_failure = cnt;
>   rte_spinlock_unlock(>tx_q[qid].tx_lock);
>
>   rte_spinlock_lock(>stats_lock);
>   netdev_dpdk_vhost_update_tx_counters(>stats, pkts, total_pkts,
>cnt + dropped);
> -dev->tx_retries += MIN(retries, max_retries);
> +sw_stats->tx_retries += MIN(retries, max_retries);
> +sw_stats->tx_failure_drops += tx_failure;
> +sw_stats->tx_mtu_exceeded_drops += mtu_drops;
> +sw_stats->tx_qos_drops += qos_drops;

Kevin pointed to this part of code in hope that we can move this to a separate 
function and reuse in his review for v9.  This code catches my eyes too.  I 
don't think that we can reuse this part, at least this will be not very 
efficient in current situation (clearing of the unused fields in a stats 
structure will be probably needed before calling such a common function, but 
ETH tx uses only half of the struct).

But there is another thing here.  We already have special functions for vhost 
tx/rx counters.  And it looks strange when we're not using these functions to 
update tx/rx failure counters.

Suggesting following incremental.
Kevin, Sriram, please, share your thoughts.

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index 3cb7023a8..02120a379 
100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -2169,16 +2169,18 @@ netdev_dpdk_vhost_update_rx_size_counters(struct 
netdev_stats *stats,
  }

  static inline void
-netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
+netdev_dpdk_vhost_update_rx_counters(struct netdev_dpdk *dev,
   struct dp_packet **packets, int count,
- int dropped)
+ int qos_drops)
  {
-int i;
-unsigned int packet_size;
+struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats;
+struct netdev_stats *stats = >stats;
  struct dp_packet *packet;
+unsigned int packet_size;
+int i;

  stats->rx_packets += count;
-stats->rx_dropped += dropped;
+stats->rx_dropped += qos_drops;
  for (i = 0; i < count; i++) {
  packet = packets[i];
  packet_size = dp_packet_size(packet); @@ -2201,6 +2203,8 @@ 
netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,

  stats->rx_bytes += packet_size;
  }
+
+sw_stats->rx_qos_drops += qos_drops;
  }

  /*
@@ -2213,7 +2217,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
  struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
  struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
  uint16_t nb_rx = 0;
-uint16_t dropped = 0;
+uint16_t qos_drops = 0;
  int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
  int vid = netdev_dpdk_get_vid(dev);

@@ -2240,17 +2244,16 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
  }

  if (policer) {
-dropped = nb_rx;
+qos_drops = nb_rx;
  nb_rx = ingress_policer_run(policer,
  (struct rte_mbuf **) batch->packets,
  nb_rx, true);
-dropped -= nb_rx;
+qos_drops -= nb_rx;
  }

  rte_spinlock_lock(>stats_lock);
-netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,
- nb_rx, dropped);
-dev->sw_stats->rx_qos_drops += dropped;
+netdev_dpdk_vhost_update_rx_counters(dev, batch->packets,
+ nb_rx, qos_drops);
  rte_spinlock_unlock(>stats_lock);

  batch->count = nb_rx;
@@ -2360,13 +2363,18 @@ netdev_dpdk_filter_packet_len(struct netdev_dpdk *dev, 
struct rte_mbuf **pkts,
  }

  static inline void
-netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
+netdev_dpdk_vhost_update_tx_counters(struct netdev_dpdk *dev,
   struct dp_packet **packets,
   int attempted,
- int dropped)
+ struct netdev_dpdk_sw_stats
+ *sw_stats_add)
  {
-int i;
+struct netdev_dpdk_sw_stats *sw_stats = dev->sw

Re: [ovs-dev] [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-11-05 Thread Sriram Vatala via dev
Thanks Kevin for your response.

@i.maxim...@ovn.org  : Can you please review the patch set.

Thanks & Regards,
Sriram.

-Original Message-
From: Kevin Traynor  
Sent: 05 November 2019 16:07
To: Sriram Vatala ; ovs-dev@openvswitch.org;
i.maxim...@ovn.org
Cc: 'Ilya Maximets' 
Subject: Re: [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH
custom stats.

On 05/11/2019 04:52, Sriram Vatala wrote:
> Hi All,
> Please consider this as a gentle remainder.
> 
> Thanks & Regards,
> Sriram.
> 
> -Original Message-
> From: Sriram Vatala 
> Sent: 30 October 2019 11:57
> To: 'ovs-dev@openvswitch.org' ; 
> 'ktray...@redhat.com' ; 'i.maxim...@ovn.org'
> 
> Cc: 'Ilya Maximets' 
> Subject: RE: [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for 
> dpdk ETH custom stats.
> 
> Hi All,
> Please review the update patch.
> 
> Changes since v10 :
> Corrected the spelling mistake in the commit message.
> 

Hi Sriram,

I've acked v10, so not planning to re-review unless there are some other
changes.

thanks,
Kevin.

p.s. In general if it is only minor changes it is usually ok to keep acks
from a previous version. If it is doubtful you can check with the reviewer.

> Thanks & Regards,
> Sriram.
> 
> -Original Message-
> From: Sriram Vatala 
> Sent: 29 October 2019 20:20
> To: ovs-dev@openvswitch.org; ktray...@redhat.com; i.maxim...@ovn.org
> Cc: Ilya Maximets ; Sriram Vatala 
> 
> Subject: [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk 
> ETH custom stats.
> 
> From: Ilya Maximets 
> 
> This is yet another refactoring for upcoming detailed drop stats.
> It allows to use single function for all the software calculated 
> statistics in netdev-dpdk for both vhost and ETH ports.
> 
> UINT64_MAX used as a marker for non-supported statistics in a same way 
> as it's done in bridge.c for common netdev stats.
> 
> Co-authored-by: Sriram Vatala 
> Cc: Sriram Vatala 
> Signed-off-by: Ilya Maximets 
> Signed-off-by: Sriram Vatala 
> ---
>  lib/netdev-dpdk.c | 69 
> +++
>  1 file changed, 40 insertions(+), 29 deletions(-)
> 
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
> 04e1a2d1b..2cc2516a9 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -474,6 +474,8 @@ struct netdev_rxq_dpdk {  static void 
> netdev_dpdk_destruct(struct netdev *netdev);  static void 
> netdev_dpdk_vhost_destruct(struct netdev *netdev);
>  
> +static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
> +   struct netdev_custom_stats 
> +*);
>  static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
>  
>  int netdev_dpdk_get_vid(const struct netdev_dpdk *dev); @@ -1174,7 
> +1176,7 @@ common_construct(struct netdev *netdev, dpdk_port_t port_no,
>  dev->rte_xstats_ids = NULL;
>  dev->rte_xstats_ids_size = 0;
>  
> -dev->tx_retries = 0;
> +dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
>  
>  return 0;
>  }
> @@ -2775,7 +2777,9 @@ netdev_dpdk_get_custom_stats(const struct netdev 
> *netdev,
>  
>  uint32_t i;
>  struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
> -int rte_xstats_ret;
> +int rte_xstats_ret, sw_stats_size;
> +
> +netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
>  
>  ovs_mutex_lock(>mutex);
>  
> @@ -2790,23 +2794,22 @@ netdev_dpdk_get_custom_stats(const struct 
> netdev *netdev,
>  if (rte_xstats_ret > 0 &&
>  rte_xstats_ret <= dev->rte_xstats_ids_size) {
>  
> -custom_stats->size = rte_xstats_ret;
> -custom_stats->counters =
> -(struct netdev_custom_counter *)
> xcalloc(rte_xstats_ret,
> -sizeof(struct netdev_custom_counter));
> +sw_stats_size = custom_stats->size;
> +custom_stats->size += rte_xstats_ret;
> +custom_stats->counters = xrealloc(custom_stats->counters,
> +  custom_stats->size *
> +  sizeof 
> + *custom_stats->counters);
>  
>  for (i = 0; i < rte_xstats_ret; i++) {
> -ovs_strlcpy(custom_stats->counters[i].name,
> +ovs_strlcpy(custom_stats->counters[sw_stats_size + 
> + i].name,
>  netdev_dpdk_get_xstat_name(dev,
>  
> dev->rte_xstats_ids[i]),
>  NETDEV_CUSTOM_STATS_NAME_SIZE);
> -custom_stats->counters[i].value = values[

Re: [ovs-dev] [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-11-04 Thread Sriram Vatala via dev
Hi All,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 30 October 2019 11:57
To: 'ovs-dev@openvswitch.org' ;
'ktray...@redhat.com' ; 'i.maxim...@ovn.org'

Cc: 'Ilya Maximets' 
Subject: RE: [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH
custom stats.

Hi All,
Please review the update patch.

Changes since v10 :
Corrected the spelling mistake in the commit message.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 29 October 2019 20:20
To: ovs-dev@openvswitch.org; ktray...@redhat.com; i.maxim...@ovn.org
Cc: Ilya Maximets ; Sriram Vatala

Subject: [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH
custom stats.

From: Ilya Maximets 

This is yet another refactoring for upcoming detailed drop stats.
It allows to use single function for all the software calculated statistics
in netdev-dpdk for both vhost and ETH ports.

UINT64_MAX used as a marker for non-supported statistics in a same way as
it's done in bridge.c for common netdev stats.

Co-authored-by: Sriram Vatala 
Cc: Sriram Vatala 
Signed-off-by: Ilya Maximets 
Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c | 69 +++
 1 file changed, 40 insertions(+), 29 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
04e1a2d1b..2cc2516a9 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -474,6 +474,8 @@ struct netdev_rxq_dpdk {  static void
netdev_dpdk_destruct(struct netdev *netdev);  static void
netdev_dpdk_vhost_destruct(struct netdev *netdev);
 
+static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
+   struct netdev_custom_stats 
+*);
 static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
 
 int netdev_dpdk_get_vid(const struct netdev_dpdk *dev); @@ -1174,7 +1176,7
@@ common_construct(struct netdev *netdev, dpdk_port_t port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
+dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -2775,7 +2777,9 @@ netdev_dpdk_get_custom_stats(const struct netdev
*netdev,
 
 uint32_t i;
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int rte_xstats_ret;
+int rte_xstats_ret, sw_stats_size;
+
+netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
 
 ovs_mutex_lock(>mutex);
 
@@ -2790,23 +2794,22 @@ netdev_dpdk_get_custom_stats(const struct netdev
*netdev,
 if (rte_xstats_ret > 0 &&
 rte_xstats_ret <= dev->rte_xstats_ids_size) {
 
-custom_stats->size = rte_xstats_ret;
-custom_stats->counters =
-(struct netdev_custom_counter *)
xcalloc(rte_xstats_ret,
-sizeof(struct netdev_custom_counter));
+sw_stats_size = custom_stats->size;
+custom_stats->size += rte_xstats_ret;
+custom_stats->counters = xrealloc(custom_stats->counters,
+  custom_stats->size *
+  sizeof 
+ *custom_stats->counters);
 
 for (i = 0; i < rte_xstats_ret; i++) {
-ovs_strlcpy(custom_stats->counters[i].name,
+ovs_strlcpy(custom_stats->counters[sw_stats_size + 
+ i].name,
 netdev_dpdk_get_xstat_name(dev,
 
dev->rte_xstats_ids[i]),
 NETDEV_CUSTOM_STATS_NAME_SIZE);
-custom_stats->counters[i].value = values[i];
+custom_stats->counters[sw_stats_size + i].value = 
+ values[i];
 }
 } else {
 VLOG_WARN("Cannot get XSTATS values for port:
"DPDK_PORT_ID_FMT,
   dev->port_id);
-custom_stats->counters = NULL;
-custom_stats->size = 0;
 /* Let's clear statistics cache, so it will be
  * reconfigured */
 netdev_dpdk_clear_xstats(dev); @@ -2821,39 +2824,47 @@
netdev_dpdk_get_custom_stats(const struct netdev *netdev,  }
 
 static int
-netdev_dpdk_vhost_get_custom_stats(const struct netdev *netdev,
-   struct netdev_custom_stats
*custom_stats)
+netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
+struct netdev_custom_stats 
+*custom_stats)
 {
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int i;
+int i, n;
 
-#define VHOST_CSTATS \
-VHOST_CSTAT(tx_retries)
+#define SW_CSTATS \
+SW_CSTAT(tx_retries)
 
-#define VHOST_CSTAT(NAME) + 1
-custom_stats->size = VHOST_CSTATS;
-#undef VHOST_CSTAT
+#define SW_CSTAT(NAME) + 1
+custom_stats->size = SW_CSTATS;
+#undef SW_CSTAT
 custom_stats->counters = xcalloc(custom_stats-&g

Re: [ovs-dev] [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-10-31 Thread Sriram Vatala via dev
Hi,

Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 30 October 2019 11:57
To: 'ovs-dev@openvswitch.org' ;
'ktray...@redhat.com' ; 'i.maxim...@ovn.org'

Cc: 'Ilya Maximets' 
Subject: RE: [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH
custom stats.

Hi All,
Please review the update patch.

Changes since v10 :
Corrected the spelling mistake in the commit message.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 29 October 2019 20:20
To: ovs-dev@openvswitch.org; ktray...@redhat.com; i.maxim...@ovn.org
Cc: Ilya Maximets ; Sriram Vatala

Subject: [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH
custom stats.

From: Ilya Maximets 

This is yet another refactoring for upcoming detailed drop stats.
It allows to use single function for all the software calculated statistics
in netdev-dpdk for both vhost and ETH ports.

UINT64_MAX used as a marker for non-supported statistics in a same way as
it's done in bridge.c for common netdev stats.

Co-authored-by: Sriram Vatala 
Cc: Sriram Vatala 
Signed-off-by: Ilya Maximets 
Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c | 69 +++
 1 file changed, 40 insertions(+), 29 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
04e1a2d1b..2cc2516a9 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -474,6 +474,8 @@ struct netdev_rxq_dpdk {  static void
netdev_dpdk_destruct(struct netdev *netdev);  static void
netdev_dpdk_vhost_destruct(struct netdev *netdev);
 
+static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
+   struct netdev_custom_stats 
+*);
 static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
 
 int netdev_dpdk_get_vid(const struct netdev_dpdk *dev); @@ -1174,7 +1176,7
@@ common_construct(struct netdev *netdev, dpdk_port_t port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
+dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -2775,7 +2777,9 @@ netdev_dpdk_get_custom_stats(const struct netdev
*netdev,
 
 uint32_t i;
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int rte_xstats_ret;
+int rte_xstats_ret, sw_stats_size;
+
+netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
 
 ovs_mutex_lock(>mutex);
 
@@ -2790,23 +2794,22 @@ netdev_dpdk_get_custom_stats(const struct netdev
*netdev,
 if (rte_xstats_ret > 0 &&
 rte_xstats_ret <= dev->rte_xstats_ids_size) {
 
-custom_stats->size = rte_xstats_ret;
-custom_stats->counters =
-(struct netdev_custom_counter *)
xcalloc(rte_xstats_ret,
-sizeof(struct netdev_custom_counter));
+sw_stats_size = custom_stats->size;
+custom_stats->size += rte_xstats_ret;
+custom_stats->counters = xrealloc(custom_stats->counters,
+  custom_stats->size *
+  sizeof 
+ *custom_stats->counters);
 
 for (i = 0; i < rte_xstats_ret; i++) {
-ovs_strlcpy(custom_stats->counters[i].name,
+ovs_strlcpy(custom_stats->counters[sw_stats_size + 
+ i].name,
 netdev_dpdk_get_xstat_name(dev,
 
dev->rte_xstats_ids[i]),
 NETDEV_CUSTOM_STATS_NAME_SIZE);
-custom_stats->counters[i].value = values[i];
+custom_stats->counters[sw_stats_size + i].value = 
+ values[i];
 }
 } else {
 VLOG_WARN("Cannot get XSTATS values for port:
"DPDK_PORT_ID_FMT,
   dev->port_id);
-custom_stats->counters = NULL;
-custom_stats->size = 0;
 /* Let's clear statistics cache, so it will be
  * reconfigured */
 netdev_dpdk_clear_xstats(dev); @@ -2821,39 +2824,47 @@
netdev_dpdk_get_custom_stats(const struct netdev *netdev,  }
 
 static int
-netdev_dpdk_vhost_get_custom_stats(const struct netdev *netdev,
-   struct netdev_custom_stats
*custom_stats)
+netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
+struct netdev_custom_stats 
+*custom_stats)
 {
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int i;
+int i, n;
 
-#define VHOST_CSTATS \
-VHOST_CSTAT(tx_retries)
+#define SW_CSTATS \
+SW_CSTAT(tx_retries)
 
-#define VHOST_CSTAT(NAME) + 1
-custom_stats->size = VHOST_CSTATS;
-#undef VHOST_CSTAT
+#define SW_CSTAT(NAME) + 1
+custom_stats->size = SW_CSTATS;
+#undef SW_CSTAT
 custom_stats->counters = xcalloc(custom_stats->size,

Re: [ovs-dev] [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-10-30 Thread Sriram Vatala via dev
Hi All,
Please review the update patch.

Changes since v10 :
Corrected the spelling mistake in the commit message.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 29 October 2019 20:20
To: ovs-dev@openvswitch.org; ktray...@redhat.com; i.maxim...@ovn.org
Cc: Ilya Maximets ; Sriram Vatala

Subject: [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH
custom stats.

From: Ilya Maximets 

This is yet another refactoring for upcoming detailed drop stats.
It allows to use single function for all the software calculated statistics
in netdev-dpdk for both vhost and ETH ports.

UINT64_MAX used as a marker for non-supported statistics in a same way as
it's done in bridge.c for common netdev stats.

Co-authored-by: Sriram Vatala 
Cc: Sriram Vatala 
Signed-off-by: Ilya Maximets 
Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c | 69 +++
 1 file changed, 40 insertions(+), 29 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
04e1a2d1b..2cc2516a9 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -474,6 +474,8 @@ struct netdev_rxq_dpdk {  static void
netdev_dpdk_destruct(struct netdev *netdev);  static void
netdev_dpdk_vhost_destruct(struct netdev *netdev);
 
+static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
+   struct netdev_custom_stats 
+*);
 static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
 
 int netdev_dpdk_get_vid(const struct netdev_dpdk *dev); @@ -1174,7 +1176,7
@@ common_construct(struct netdev *netdev, dpdk_port_t port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
+dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -2775,7 +2777,9 @@ netdev_dpdk_get_custom_stats(const struct netdev
*netdev,
 
 uint32_t i;
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int rte_xstats_ret;
+int rte_xstats_ret, sw_stats_size;
+
+netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
 
 ovs_mutex_lock(>mutex);
 
@@ -2790,23 +2794,22 @@ netdev_dpdk_get_custom_stats(const struct netdev
*netdev,
 if (rte_xstats_ret > 0 &&
 rte_xstats_ret <= dev->rte_xstats_ids_size) {
 
-custom_stats->size = rte_xstats_ret;
-custom_stats->counters =
-(struct netdev_custom_counter *)
xcalloc(rte_xstats_ret,
-sizeof(struct netdev_custom_counter));
+sw_stats_size = custom_stats->size;
+custom_stats->size += rte_xstats_ret;
+custom_stats->counters = xrealloc(custom_stats->counters,
+  custom_stats->size *
+  sizeof 
+ *custom_stats->counters);
 
 for (i = 0; i < rte_xstats_ret; i++) {
-ovs_strlcpy(custom_stats->counters[i].name,
+ovs_strlcpy(custom_stats->counters[sw_stats_size + 
+ i].name,
 netdev_dpdk_get_xstat_name(dev,
 
dev->rte_xstats_ids[i]),
 NETDEV_CUSTOM_STATS_NAME_SIZE);
-custom_stats->counters[i].value = values[i];
+custom_stats->counters[sw_stats_size + i].value = 
+ values[i];
 }
 } else {
 VLOG_WARN("Cannot get XSTATS values for port:
"DPDK_PORT_ID_FMT,
   dev->port_id);
-custom_stats->counters = NULL;
-custom_stats->size = 0;
 /* Let's clear statistics cache, so it will be
  * reconfigured */
 netdev_dpdk_clear_xstats(dev); @@ -2821,39 +2824,47 @@
netdev_dpdk_get_custom_stats(const struct netdev *netdev,  }
 
 static int
-netdev_dpdk_vhost_get_custom_stats(const struct netdev *netdev,
-   struct netdev_custom_stats
*custom_stats)
+netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
+struct netdev_custom_stats 
+*custom_stats)
 {
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int i;
+int i, n;
 
-#define VHOST_CSTATS \
-VHOST_CSTAT(tx_retries)
+#define SW_CSTATS \
+SW_CSTAT(tx_retries)
 
-#define VHOST_CSTAT(NAME) + 1
-custom_stats->size = VHOST_CSTATS;
-#undef VHOST_CSTAT
+#define SW_CSTAT(NAME) + 1
+custom_stats->size = SW_CSTATS;
+#undef SW_CSTAT
 custom_stats->counters = xcalloc(custom_stats->size,
  sizeof *custom_stats->counters);
-i = 0;
-#define VHOST_CSTAT(NAME) \
-ovs_strlcpy(custom_stats->counters[i++].name, #NAME, \
-NETDEV_CUSTOM_STATS_NAME_SIZE);
-VHOST_CSTATS;
-#undef VHOST_CSTAT
 
 ovs_mutex_lock(>mutex);
 
 rte_spinlock_lock(>stats_lock);
 i = 0;

[ovs-dev] [PATCH v11 3/3] ovs-bugtool: Script to collect the port statistics

2019-10-30 Thread Sriram Vatala via dev
Sometimes, analysing the drop statistics of the ports
will be helpful in debugging. This patch adds script
to collect all supported port stats which also includes
the drop counters in userspace datapath. The output of
this script is included in the bugtool output.

Signed-off-by: Sriram Vatala 
---
 utilities/bugtool/automake.mk |  3 ++-
 utilities/bugtool/ovs-bugtool-get-port-stats  | 15 +++
 .../plugins/network-status/openvswitch.xml|  1 +
 3 files changed, 18 insertions(+), 1 deletion(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-port-stats

diff --git a/utilities/bugtool/automake.mk b/utilities/bugtool/automake.mk
index 4c85b9cba..0a9b93088 100644
--- a/utilities/bugtool/automake.mk
+++ b/utilities/bugtool/automake.mk
@@ -21,7 +21,8 @@ bugtool_scripts = \
utilities/bugtool/ovs-bugtool-ovs-bridge-datapath-type \
utilities/bugtool/ovs-bugtool-ovs-vswitchd-threads-affinity \
utilities/bugtool/ovs-bugtool-qos-configs \
-   utilities/bugtool/ovs-bugtool-get-dpdk-nic-numa
+   utilities/bugtool/ovs-bugtool-get-dpdk-nic-numa \
+   utilities/bugtool/ovs-bugtool-get-port-stats
 
 scripts_SCRIPTS += $(bugtool_scripts)
 
diff --git a/utilities/bugtool/ovs-bugtool-get-port-stats 
b/utilities/bugtool/ovs-bugtool-get-port-stats
new file mode 100755
index 0..23e61034e
--- /dev/null
+++ b/utilities/bugtool/ovs-bugtool-get-port-stats
@@ -0,0 +1,15 @@
+#! /bin/bash
+
+#Iterate through each port of every bridge and print
+#the port statistics
+
+for bridge in `ovs-vsctl -- --real list-br`
+do
+echo "${bridge} : "
+echo "  ${bridge} : `ovs-vsctl get interface ${bridge} statistics`"
+for iface in `ovs-vsctl list-ifaces ${bridge}`
+do
+echo "  ${iface} : `ovs-vsctl get interface ${iface} statistics`"
+done
+echo -e "\n"
+done
diff --git a/utilities/bugtool/plugins/network-status/openvswitch.xml 
b/utilities/bugtool/plugins/network-status/openvswitch.xml
index b0e7a1510..72aa44930 100644
--- a/utilities/bugtool/plugins/network-status/openvswitch.xml
+++ b/utilities/bugtool/plugins/network-status/openvswitch.xml
@@ -41,4 +41,5 @@
 /usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over-bridges
 "dump-group-stats"
 /usr/share/openvswitch/scripts/ovs-bugtool-get-dpdk-nic-numa
 ip -s -s link 
show
+/usr/share/openvswitch/scripts/ovs-bugtool-get-port-stats
 
-- 
2.20.1

___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


[ovs-dev] [PATCH v11 2/3] netdev-dpdk : Detailed packet drop statistics

2019-10-30 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons on
the userspace datapath and today there is a single counter to
track packets dropped due to any of those reasons. This patch
adds custom software stats for the different reasons packets
may be dropped during tx/rx on the userspace datapath in OVS.

- MTU drops : drops that occur due to a too large packet size
- Qos drops : drops that occur due to egress/ingress QOS
- Tx failures: drops as returned by the DPDK PMD send function

Note that the reason for tx failures is not specified in OVS.
In practice for vhost ports it is most common that tx failures
are because there are not enough available descriptors,
which is usually caused by misconfiguration of the guest queues
and/or because the guest is not consuming packets fast enough
from the queues.

These counters are displayed along with other stats in
"ovs-vsctl get interface  statistics" command and are
available for dpdk and vhostuser/vhostuserclient ports.

Also the existing "tx_retries" counter for vhost ports has been
renamed to "ovs_tx_retries", so that all the custom statistics
that OVS accumulates itself will have the prefix "ovs_". This
will prevent any custom stats names overlapping with
driver/HW stats.

Signed-off-by: Sriram Vatala 
---
 Documentation/topics/dpdk/bridge.rst |  6 ++
 Documentation/topics/dpdk/vhost-user.rst |  2 +-
 lib/netdev-dpdk.c| 82 +++-
 3 files changed, 72 insertions(+), 18 deletions(-)

diff --git a/Documentation/topics/dpdk/bridge.rst 
b/Documentation/topics/dpdk/bridge.rst
index d9bc7eba4..f0ef42ecc 100644
--- a/Documentation/topics/dpdk/bridge.rst
+++ b/Documentation/topics/dpdk/bridge.rst
@@ -75,6 +75,12 @@ OpenFlow14`` option::
 
 $ ovs-ofctl -O OpenFlow14 dump-ports br0
 
+There are custom statistics that OVS accumulates itself and these stats has
+``ovs_`` as prefix. These custom stats are shown along with other stats
+using the following command::
+
+$ ovs-vsctl get Interface  statistics
+
 EMC Insertion Probability
 -
 
diff --git a/Documentation/topics/dpdk/vhost-user.rst 
b/Documentation/topics/dpdk/vhost-user.rst
index cda5b122f..ec0caeb16 100644
--- a/Documentation/topics/dpdk/vhost-user.rst
+++ b/Documentation/topics/dpdk/vhost-user.rst
@@ -551,7 +551,7 @@ processing packets at the required rate.
 The amount of Tx retries on a vhost-user or vhost-user-client interface can be
 shown with::
 
-  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:tx_retries
+  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:ovs_tx_retries
 
 vhost-user Dequeue Zero Copy (experimental)
 ---
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 2cc2516a9..6922e61ca 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -174,6 +174,20 @@ static const struct vhost_device_ops virtio_net_device_ops 
=
 .destroy_connection = destroy_connection,
 };
 
+/* Custom software stats for dpdk ports */
+struct netdev_dpdk_sw_stats {
+/* No. of retries when unable to transmit. */
+uint64_t tx_retries;
+/* Packet drops when unable to transmit; Probably Tx queue is full. */
+uint64_t tx_failure_drops;
+/* Packet length greater than device MTU. */
+uint64_t tx_mtu_exceeded_drops;
+/* Packet drops in egress policer processing. */
+uint64_t tx_qos_drops;
+/* Packet drops in ingress policer processing. */
+uint64_t rx_qos_drops;
+};
+
 enum { DPDK_RING_SIZE = 256 };
 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
 enum { DRAIN_TSC = 20ULL };
@@ -416,11 +430,10 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
-uint64_t tx_retries;
+struct netdev_dpdk_sw_stats *sw_stats;
 /* Protects stats */
 rte_spinlock_t stats_lock;
-/* 4 pad bytes here. */
+/* 36 pad bytes here. */
 );
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
@@ -1176,7 +1189,8 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
+dev->sw_stats = xzalloc(sizeof *dev->sw_stats);
+dev->sw_stats->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -1362,6 +1376,7 @@ common_destruct(struct netdev_dpdk *dev)
 ovs_list_remove(>list_node);
 free(ovsrcu_get_protected(struct ingress_policer *,
   >ingress_policer));
+free(dev->sw_stats);
 ovs_mutex_destroy(>mutex);
 }
 
@@ -2212,6 +2227,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,

[ovs-dev] [PATCH v11 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-10-30 Thread Sriram Vatala via dev
From: Ilya Maximets 

This is yet another refactoring for upcoming detailed drop stats.
It allows to use single function for all the software calculated
statistics in netdev-dpdk for both vhost and ETH ports.

UINT64_MAX used as a marker for non-supported statistics in a
same way as it's done in bridge.c for common netdev stats.

Co-authored-by: Sriram Vatala 
Cc: Sriram Vatala 
Signed-off-by: Ilya Maximets 
Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c | 69 +++
 1 file changed, 40 insertions(+), 29 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 04e1a2d1b..2cc2516a9 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -474,6 +474,8 @@ struct netdev_rxq_dpdk {
 static void netdev_dpdk_destruct(struct netdev *netdev);
 static void netdev_dpdk_vhost_destruct(struct netdev *netdev);
 
+static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
+   struct netdev_custom_stats *);
 static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
 
 int netdev_dpdk_get_vid(const struct netdev_dpdk *dev);
@@ -1174,7 +1176,7 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
+dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -2775,7 +2777,9 @@ netdev_dpdk_get_custom_stats(const struct netdev *netdev,
 
 uint32_t i;
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int rte_xstats_ret;
+int rte_xstats_ret, sw_stats_size;
+
+netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
 
 ovs_mutex_lock(>mutex);
 
@@ -2790,23 +2794,22 @@ netdev_dpdk_get_custom_stats(const struct netdev 
*netdev,
 if (rte_xstats_ret > 0 &&
 rte_xstats_ret <= dev->rte_xstats_ids_size) {
 
-custom_stats->size = rte_xstats_ret;
-custom_stats->counters =
-(struct netdev_custom_counter *) xcalloc(rte_xstats_ret,
-sizeof(struct netdev_custom_counter));
+sw_stats_size = custom_stats->size;
+custom_stats->size += rte_xstats_ret;
+custom_stats->counters = xrealloc(custom_stats->counters,
+  custom_stats->size *
+  sizeof *custom_stats->counters);
 
 for (i = 0; i < rte_xstats_ret; i++) {
-ovs_strlcpy(custom_stats->counters[i].name,
+ovs_strlcpy(custom_stats->counters[sw_stats_size + i].name,
 netdev_dpdk_get_xstat_name(dev,
dev->rte_xstats_ids[i]),
 NETDEV_CUSTOM_STATS_NAME_SIZE);
-custom_stats->counters[i].value = values[i];
+custom_stats->counters[sw_stats_size + i].value = values[i];
 }
 } else {
 VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT,
   dev->port_id);
-custom_stats->counters = NULL;
-custom_stats->size = 0;
 /* Let's clear statistics cache, so it will be
  * reconfigured */
 netdev_dpdk_clear_xstats(dev);
@@ -2821,39 +2824,47 @@ netdev_dpdk_get_custom_stats(const struct netdev 
*netdev,
 }
 
 static int
-netdev_dpdk_vhost_get_custom_stats(const struct netdev *netdev,
-   struct netdev_custom_stats *custom_stats)
+netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
+struct netdev_custom_stats *custom_stats)
 {
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int i;
+int i, n;
 
-#define VHOST_CSTATS \
-VHOST_CSTAT(tx_retries)
+#define SW_CSTATS \
+SW_CSTAT(tx_retries)
 
-#define VHOST_CSTAT(NAME) + 1
-custom_stats->size = VHOST_CSTATS;
-#undef VHOST_CSTAT
+#define SW_CSTAT(NAME) + 1
+custom_stats->size = SW_CSTATS;
+#undef SW_CSTAT
 custom_stats->counters = xcalloc(custom_stats->size,
  sizeof *custom_stats->counters);
-i = 0;
-#define VHOST_CSTAT(NAME) \
-ovs_strlcpy(custom_stats->counters[i++].name, #NAME, \
-NETDEV_CUSTOM_STATS_NAME_SIZE);
-VHOST_CSTATS;
-#undef VHOST_CSTAT
 
 ovs_mutex_lock(>mutex);
 
 rte_spinlock_lock(>stats_lock);
 i = 0;
-#define VHOST_CSTAT(NAME) \
+#define SW_CSTAT(NAME) \
 custom_stats->counters[i++].value = dev->NAME;
-VHOST_CSTATS;
-#undef VHOST_CSTAT
+SW_CSTATS;
+#undef SW_CSTAT
 rte_spinlock_unlock(>stats_lock);
 
 ovs_mutex_unlock(>mutex);
 
+i = 0;
+n = 0;
+#define SW_CSTAT(NAME) \
+   

Re: [ovs-dev] [PATCH v10 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-10-30 Thread Sriram Vatala via dev
Thanks Kevin for reviewing and acknowledging the patch. I will correct the 
spelling and send the updated patch.

Thanks & Regards,
Sriram.

-Original Message-
From: Kevin Traynor 
Sent: 25 October 2019 20:36
To: Sriram Vatala ; ovs-dev@openvswitch.org; 
i.maxim...@ovn.org
Cc: Ilya Maximets 
Subject: Re: [PATCH v10 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH 
custom stats.

On 24/10/2019 19:21, Sriram Vatala wrote:
> From: Ilya Maximets 
>
> This is yet another refactoring for upcoming detailed drop stats.
> It allowes to use single function for all the software calculated

s/allowes/allows

> statistics in netdev-dpdk for both vhost and ETH ports.
>
> UINT64_MAX used as a marker for non-supported statistics in a same way
> as it's done in bridge.c for common netdev stats.
>
> Co-authored-by: Sriram Vatala 
> Cc: Sriram Vatala 
> Signed-off-by: Ilya Maximets 
> Signed-off-by: Sriram Vatala 

Acked-by: Kevin Traynor 

> ---
>  lib/netdev-dpdk.c | 69
> +++
>  1 file changed, 40 insertions(+), 29 deletions(-)
>
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
> 04e1a2d1b..2cc2516a9 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -474,6 +474,8 @@ struct netdev_rxq_dpdk {  static void
> netdev_dpdk_destruct(struct netdev *netdev);  static void
> netdev_dpdk_vhost_destruct(struct netdev *netdev);
>
> +static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
> +   struct netdev_custom_stats
> +*);
>  static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
>
>  int netdev_dpdk_get_vid(const struct netdev_dpdk *dev); @@ -1174,7
> +1176,7 @@ common_construct(struct netdev *netdev, dpdk_port_t port_no,
>  dev->rte_xstats_ids = NULL;
>  dev->rte_xstats_ids_size = 0;
>
> -dev->tx_retries = 0;
> +dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
>
>  return 0;
>  }
> @@ -2775,7 +2777,9 @@ netdev_dpdk_get_custom_stats(const struct netdev
> *netdev,
>
>  uint32_t i;
>  struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
> -int rte_xstats_ret;
> +int rte_xstats_ret, sw_stats_size;
> +
> +netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
>
>  ovs_mutex_lock(>mutex);
>
> @@ -2790,23 +2794,22 @@ netdev_dpdk_get_custom_stats(const struct netdev 
> *netdev,
>  if (rte_xstats_ret > 0 &&
>  rte_xstats_ret <= dev->rte_xstats_ids_size) {
>
> -custom_stats->size = rte_xstats_ret;
> -custom_stats->counters =
> -(struct netdev_custom_counter *) 
> xcalloc(rte_xstats_ret,
> -sizeof(struct netdev_custom_counter));
> +sw_stats_size = custom_stats->size;
> +custom_stats->size += rte_xstats_ret;
> +custom_stats->counters = xrealloc(custom_stats->counters,
> +  custom_stats->size *
> +  sizeof
> + *custom_stats->counters);
>
>  for (i = 0; i < rte_xstats_ret; i++) {
> -ovs_strlcpy(custom_stats->counters[i].name,
> +ovs_strlcpy(custom_stats->counters[sw_stats_size +
> + i].name,
>  netdev_dpdk_get_xstat_name(dev,
> 
> dev->rte_xstats_ids[i]),
>  NETDEV_CUSTOM_STATS_NAME_SIZE);
> -custom_stats->counters[i].value = values[i];
> +custom_stats->counters[sw_stats_size + i].value =
> + values[i];
>  }
>  } else {
>  VLOG_WARN("Cannot get XSTATS values for port: 
> "DPDK_PORT_ID_FMT,
>dev->port_id);
> -custom_stats->counters = NULL;
> -custom_stats->size = 0;
>  /* Let's clear statistics cache, so it will be
>   * reconfigured */
>  netdev_dpdk_clear_xstats(dev); @@ -2821,39 +2824,47 @@
> netdev_dpdk_get_custom_stats(const struct netdev *netdev,  }
>
>  static int
> -netdev_dpdk_vhost_get_custom_stats(const struct netdev *netdev,
> -   struct netdev_custom_stats 
> *custom_stats)
> +netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
> +struct netdev_custom_stats
> +*custom_stats)
>  {
>  struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
> -int i;
> +int i, n;
>
> -#define VHOST_CSTATS \
> -VHOST_CSTAT(tx_retries)
> +#define SW_CSTATS \
> +SW_CSTAT(tx_retri

Re: [ovs-dev] [PATCH v10 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-10-25 Thread Sriram Vatala via dev
Hi,

Please review the patch set v10.

Changes since v9 :
Patch - 1:
Fixed the issue with index inside the for loop for copying the dpdk extended
stats  inside 'netdev_dpdk_get_custom_stats' function.

Patch - 2:
Modified the commit message.
Changed the custom stats prefix from 'netdev_dpdk_' to 'ovs_'
Added note about the prefix 'ovs_' in dpdk documentation. 
Addressed other comments given by Kevin.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 24 October 2019 23:51
To: ovs-dev@openvswitch.org; i.maxim...@ovn.org; ktray...@redhat.com
Cc: Ilya Maximets ; Sriram Vatala

Subject: [PATCH v10 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH
custom stats.

From: Ilya Maximets 

This is yet another refactoring for upcoming detailed drop stats.
It allowes to use single function for all the software calculated statistics
in netdev-dpdk for both vhost and ETH ports.

UINT64_MAX used as a marker for non-supported statistics in a same way as
it's done in bridge.c for common netdev stats.

Co-authored-by: Sriram Vatala 
Cc: Sriram Vatala 
Signed-off-by: Ilya Maximets 
Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c | 69 +++
 1 file changed, 40 insertions(+), 29 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
04e1a2d1b..2cc2516a9 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -474,6 +474,8 @@ struct netdev_rxq_dpdk {  static void
netdev_dpdk_destruct(struct netdev *netdev);  static void
netdev_dpdk_vhost_destruct(struct netdev *netdev);
 
+static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
+   struct netdev_custom_stats 
+*);
 static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
 
 int netdev_dpdk_get_vid(const struct netdev_dpdk *dev); @@ -1174,7 +1176,7
@@ common_construct(struct netdev *netdev, dpdk_port_t port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
+dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -2775,7 +2777,9 @@ netdev_dpdk_get_custom_stats(const struct netdev
*netdev,
 
 uint32_t i;
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int rte_xstats_ret;
+int rte_xstats_ret, sw_stats_size;
+
+netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
 
 ovs_mutex_lock(>mutex);
 
@@ -2790,23 +2794,22 @@ netdev_dpdk_get_custom_stats(const struct netdev
*netdev,
 if (rte_xstats_ret > 0 &&
 rte_xstats_ret <= dev->rte_xstats_ids_size) {
 
-custom_stats->size = rte_xstats_ret;
-custom_stats->counters =
-(struct netdev_custom_counter *)
xcalloc(rte_xstats_ret,
-sizeof(struct netdev_custom_counter));
+sw_stats_size = custom_stats->size;
+custom_stats->size += rte_xstats_ret;
+custom_stats->counters = xrealloc(custom_stats->counters,
+  custom_stats->size *
+  sizeof 
+ *custom_stats->counters);
 
 for (i = 0; i < rte_xstats_ret; i++) {
-ovs_strlcpy(custom_stats->counters[i].name,
+ovs_strlcpy(custom_stats->counters[sw_stats_size + 
+ i].name,
 netdev_dpdk_get_xstat_name(dev,
 
dev->rte_xstats_ids[i]),
 NETDEV_CUSTOM_STATS_NAME_SIZE);
-custom_stats->counters[i].value = values[i];
+custom_stats->counters[sw_stats_size + i].value = 
+ values[i];
 }
 } else {
 VLOG_WARN("Cannot get XSTATS values for port:
"DPDK_PORT_ID_FMT,
   dev->port_id);
-custom_stats->counters = NULL;
-custom_stats->size = 0;
 /* Let's clear statistics cache, so it will be
  * reconfigured */
 netdev_dpdk_clear_xstats(dev); @@ -2821,39 +2824,47 @@
netdev_dpdk_get_custom_stats(const struct netdev *netdev,  }
 
 static int
-netdev_dpdk_vhost_get_custom_stats(const struct netdev *netdev,
-   struct netdev_custom_stats
*custom_stats)
+netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
+struct netdev_custom_stats 
+*custom_stats)
 {
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int i;
+int i, n;
 
-#define VHOST_CSTATS \
-VHOST_CSTAT(tx_retries)
+#define SW_CSTATS \
+SW_CSTAT(tx_retries)
 
-#define VHOST_CSTAT(NAME) + 1
-custom_stats->size = VHOST_CSTATS;
-#undef VHOST_CSTAT
+#define SW_CSTAT(NAME) + 1
+custom_stats->size = SW_CSTATS;
+#undef SW_CSTAT
 custom_stats->counters = xcalloc(custom_stats->size,
  sizeof *custom_sta

[ovs-dev] [PATCH v10 3/3] ovs-bugtool: Script to collect the port statistics

2019-10-24 Thread Sriram Vatala via dev
Sometimes, analysing the drop statistics of the ports
will be helpful in debugging. This patch adds script
to collect all supported port stats which also includes
the drop counters in userspace datapath. The output of
this scirpt is included in the bugtool output.

Signed-off-by: Sriram Vatala 
---
 utilities/bugtool/automake.mk |  3 ++-
 utilities/bugtool/ovs-bugtool-get-port-stats  | 15 +++
 .../plugins/network-status/openvswitch.xml|  1 +
 3 files changed, 18 insertions(+), 1 deletion(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-port-stats

diff --git a/utilities/bugtool/automake.mk b/utilities/bugtool/automake.mk
index 4c85b9cba..0a9b93088 100644
--- a/utilities/bugtool/automake.mk
+++ b/utilities/bugtool/automake.mk
@@ -21,7 +21,8 @@ bugtool_scripts = \
utilities/bugtool/ovs-bugtool-ovs-bridge-datapath-type \
utilities/bugtool/ovs-bugtool-ovs-vswitchd-threads-affinity \
utilities/bugtool/ovs-bugtool-qos-configs \
-   utilities/bugtool/ovs-bugtool-get-dpdk-nic-numa
+   utilities/bugtool/ovs-bugtool-get-dpdk-nic-numa \
+   utilities/bugtool/ovs-bugtool-get-port-stats
 
 scripts_SCRIPTS += $(bugtool_scripts)
 
diff --git a/utilities/bugtool/ovs-bugtool-get-port-stats 
b/utilities/bugtool/ovs-bugtool-get-port-stats
new file mode 100755
index 0..23e61034e
--- /dev/null
+++ b/utilities/bugtool/ovs-bugtool-get-port-stats
@@ -0,0 +1,15 @@
+#! /bin/bash
+
+#Iterate through each port of every bridge and print
+#the port statistics
+
+for bridge in `ovs-vsctl -- --real list-br`
+do
+echo "${bridge} : "
+echo "  ${bridge} : `ovs-vsctl get interface ${bridge} statistics`"
+for iface in `ovs-vsctl list-ifaces ${bridge}`
+do
+echo "  ${iface} : `ovs-vsctl get interface ${iface} statistics`"
+done
+echo -e "\n"
+done
diff --git a/utilities/bugtool/plugins/network-status/openvswitch.xml 
b/utilities/bugtool/plugins/network-status/openvswitch.xml
index b0e7a1510..72aa44930 100644
--- a/utilities/bugtool/plugins/network-status/openvswitch.xml
+++ b/utilities/bugtool/plugins/network-status/openvswitch.xml
@@ -41,4 +41,5 @@
 /usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over-bridges
 "dump-group-stats"
 /usr/share/openvswitch/scripts/ovs-bugtool-get-dpdk-nic-numa
 ip -s -s link 
show
+/usr/share/openvswitch/scripts/ovs-bugtool-get-port-stats
 
-- 
2.20.1

___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


[ovs-dev] [PATCH v10 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-10-24 Thread Sriram Vatala via dev
From: Ilya Maximets 

This is yet another refactoring for upcoming detailed drop stats.
It allowes to use single function for all the software calculated
statistics in netdev-dpdk for both vhost and ETH ports.

UINT64_MAX used as a marker for non-supported statistics in a
same way as it's done in bridge.c for common netdev stats.

Co-authored-by: Sriram Vatala 
Cc: Sriram Vatala 
Signed-off-by: Ilya Maximets 
Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c | 69 +++
 1 file changed, 40 insertions(+), 29 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 04e1a2d1b..2cc2516a9 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -474,6 +474,8 @@ struct netdev_rxq_dpdk {
 static void netdev_dpdk_destruct(struct netdev *netdev);
 static void netdev_dpdk_vhost_destruct(struct netdev *netdev);
 
+static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
+   struct netdev_custom_stats *);
 static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
 
 int netdev_dpdk_get_vid(const struct netdev_dpdk *dev);
@@ -1174,7 +1176,7 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
+dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -2775,7 +2777,9 @@ netdev_dpdk_get_custom_stats(const struct netdev *netdev,
 
 uint32_t i;
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int rte_xstats_ret;
+int rte_xstats_ret, sw_stats_size;
+
+netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
 
 ovs_mutex_lock(>mutex);
 
@@ -2790,23 +2794,22 @@ netdev_dpdk_get_custom_stats(const struct netdev 
*netdev,
 if (rte_xstats_ret > 0 &&
 rte_xstats_ret <= dev->rte_xstats_ids_size) {
 
-custom_stats->size = rte_xstats_ret;
-custom_stats->counters =
-(struct netdev_custom_counter *) xcalloc(rte_xstats_ret,
-sizeof(struct netdev_custom_counter));
+sw_stats_size = custom_stats->size;
+custom_stats->size += rte_xstats_ret;
+custom_stats->counters = xrealloc(custom_stats->counters,
+  custom_stats->size *
+  sizeof *custom_stats->counters);
 
 for (i = 0; i < rte_xstats_ret; i++) {
-ovs_strlcpy(custom_stats->counters[i].name,
+ovs_strlcpy(custom_stats->counters[sw_stats_size + i].name,
 netdev_dpdk_get_xstat_name(dev,
dev->rte_xstats_ids[i]),
 NETDEV_CUSTOM_STATS_NAME_SIZE);
-custom_stats->counters[i].value = values[i];
+custom_stats->counters[sw_stats_size + i].value = values[i];
 }
 } else {
 VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT,
   dev->port_id);
-custom_stats->counters = NULL;
-custom_stats->size = 0;
 /* Let's clear statistics cache, so it will be
  * reconfigured */
 netdev_dpdk_clear_xstats(dev);
@@ -2821,39 +2824,47 @@ netdev_dpdk_get_custom_stats(const struct netdev 
*netdev,
 }
 
 static int
-netdev_dpdk_vhost_get_custom_stats(const struct netdev *netdev,
-   struct netdev_custom_stats *custom_stats)
+netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
+struct netdev_custom_stats *custom_stats)
 {
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int i;
+int i, n;
 
-#define VHOST_CSTATS \
-VHOST_CSTAT(tx_retries)
+#define SW_CSTATS \
+SW_CSTAT(tx_retries)
 
-#define VHOST_CSTAT(NAME) + 1
-custom_stats->size = VHOST_CSTATS;
-#undef VHOST_CSTAT
+#define SW_CSTAT(NAME) + 1
+custom_stats->size = SW_CSTATS;
+#undef SW_CSTAT
 custom_stats->counters = xcalloc(custom_stats->size,
  sizeof *custom_stats->counters);
-i = 0;
-#define VHOST_CSTAT(NAME) \
-ovs_strlcpy(custom_stats->counters[i++].name, #NAME, \
-NETDEV_CUSTOM_STATS_NAME_SIZE);
-VHOST_CSTATS;
-#undef VHOST_CSTAT
 
 ovs_mutex_lock(>mutex);
 
 rte_spinlock_lock(>stats_lock);
 i = 0;
-#define VHOST_CSTAT(NAME) \
+#define SW_CSTAT(NAME) \
 custom_stats->counters[i++].value = dev->NAME;
-VHOST_CSTATS;
-#undef VHOST_CSTAT
+SW_CSTATS;
+#undef SW_CSTAT
 rte_spinlock_unlock(>stats_lock);
 
 ovs_mutex_unlock(>mutex);
 
+i = 0;
+n = 0;
+#define SW_CSTAT(NAME) \
+   

[ovs-dev] [PATCH v10 2/3] netdev-dpdk : Detailed packet drop statistics

2019-10-24 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons on
the userspace datapath and today there is a single counter to
track packets dropped due to any of those reasons. This patch
adds custom software stats for the different reasons packets
may be dropped during tx/rx on the userspace datapath in OVS.

- MTU drops : drops that occur due to a too large packet size
- Qos drops : drops that occur due to egress/ingress QOS
- Tx failures: drops as returned by the DPDK PMD send function

Note that the reason for tx failures is not specificied in OVS.
In practice for vhost ports it is most common that tx failures
are because there are not enough available descriptors,
which is usually caused by misconfiguration of the guest queues
and/or because the guest is not consuming packets fast enough
from the queues.

These counters are displayed along with other stats in
"ovs-vsctl get interface  statistics" command and are
available for dpdk and vhostuser/vhostuserclient ports.

Also the existing "tx_retries" counter for vhost ports has been
renamed to "ovs_tx_retries", so that all the custom statistics
that OVS accumulates itself will have the prefix "ovs_". This
will prevent any custom stats names overlapping with
driver/HW stats.

Signed-off-by: Sriram Vatala 
---
 Documentation/topics/dpdk/bridge.rst |  6 ++
 Documentation/topics/dpdk/vhost-user.rst |  2 +-
 lib/netdev-dpdk.c| 82 +++-
 3 files changed, 72 insertions(+), 18 deletions(-)

diff --git a/Documentation/topics/dpdk/bridge.rst 
b/Documentation/topics/dpdk/bridge.rst
index d9bc7eba4..f0ef42ecc 100644
--- a/Documentation/topics/dpdk/bridge.rst
+++ b/Documentation/topics/dpdk/bridge.rst
@@ -75,6 +75,12 @@ OpenFlow14`` option::
 
 $ ovs-ofctl -O OpenFlow14 dump-ports br0
 
+There are custom statistics that OVS accumulates itself and these stats has
+``ovs_`` as prefix. These custom stats are shown along with other stats
+using the following command::
+
+$ ovs-vsctl get Interface  statistics
+
 EMC Insertion Probability
 -
 
diff --git a/Documentation/topics/dpdk/vhost-user.rst 
b/Documentation/topics/dpdk/vhost-user.rst
index cda5b122f..ec0caeb16 100644
--- a/Documentation/topics/dpdk/vhost-user.rst
+++ b/Documentation/topics/dpdk/vhost-user.rst
@@ -551,7 +551,7 @@ processing packets at the required rate.
 The amount of Tx retries on a vhost-user or vhost-user-client interface can be
 shown with::
 
-  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:tx_retries
+  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:ovs_tx_retries
 
 vhost-user Dequeue Zero Copy (experimental)
 ---
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 2cc2516a9..6922e61ca 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -174,6 +174,20 @@ static const struct vhost_device_ops virtio_net_device_ops 
=
 .destroy_connection = destroy_connection,
 };
 
+/* Custom software stats for dpdk ports */
+struct netdev_dpdk_sw_stats {
+/* No. of retries when unable to transmit. */
+uint64_t tx_retries;
+/* Packet drops when unable to transmit; Probably Tx queue is full. */
+uint64_t tx_failure_drops;
+/* Packet length greater than device MTU. */
+uint64_t tx_mtu_exceeded_drops;
+/* Packet drops in egress policer processing. */
+uint64_t tx_qos_drops;
+/* Packet drops in ingress policer processing. */
+uint64_t rx_qos_drops;
+};
+
 enum { DPDK_RING_SIZE = 256 };
 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
 enum { DRAIN_TSC = 20ULL };
@@ -416,11 +430,10 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
-uint64_t tx_retries;
+struct netdev_dpdk_sw_stats *sw_stats;
 /* Protects stats */
 rte_spinlock_t stats_lock;
-/* 4 pad bytes here. */
+/* 36 pad bytes here. */
 );
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
@@ -1176,7 +1189,8 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
+dev->sw_stats = xzalloc(sizeof *dev->sw_stats);
+dev->sw_stats->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -1362,6 +1376,7 @@ common_destruct(struct netdev_dpdk *dev)
 ovs_list_remove(>list_node);
 free(ovsrcu_get_protected(struct ingress_policer *,
   >ingress_policer));
+free(dev->sw_stats);
 ovs_mutex_destroy(>mutex);
 }
 
@@ -2212,6 +2227,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,

[ovs-dev] [PATCH v10 1/3] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-10-24 Thread Sriram Vatala
From: Ilya Maximets 

This is yet another refactoring for upcoming detailed drop stats.
It allowes to use single function for all the software calculated
statistics in netdev-dpdk for both vhost and ETH ports.

UINT64_MAX used as a marker for non-supported statistics in a
same way as it's done in bridge.c for common netdev stats.

Cc: Sriram Vatala 
Signed-off-by: Ilya Maximets 
---
 lib/netdev-dpdk.c | 69 +++
 1 file changed, 40 insertions(+), 29 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 04e1a2d1b..2cc2516a9 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -474,6 +474,8 @@ struct netdev_rxq_dpdk {
 static void netdev_dpdk_destruct(struct netdev *netdev);
 static void netdev_dpdk_vhost_destruct(struct netdev *netdev);
 
+static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
+   struct netdev_custom_stats *);
 static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
 
 int netdev_dpdk_get_vid(const struct netdev_dpdk *dev);
@@ -1174,7 +1176,7 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
+dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -2775,7 +2777,9 @@ netdev_dpdk_get_custom_stats(const struct netdev *netdev,
 
 uint32_t i;
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int rte_xstats_ret;
+int rte_xstats_ret, sw_stats_size;
+
+netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
 
 ovs_mutex_lock(>mutex);
 
@@ -2790,23 +2794,22 @@ netdev_dpdk_get_custom_stats(const struct netdev 
*netdev,
 if (rte_xstats_ret > 0 &&
 rte_xstats_ret <= dev->rte_xstats_ids_size) {
 
-custom_stats->size = rte_xstats_ret;
-custom_stats->counters =
-(struct netdev_custom_counter *) xcalloc(rte_xstats_ret,
-sizeof(struct netdev_custom_counter));
+sw_stats_size = custom_stats->size;
+custom_stats->size += rte_xstats_ret;
+custom_stats->counters = xrealloc(custom_stats->counters,
+  custom_stats->size *
+  sizeof *custom_stats->counters);
 
 for (i = 0; i < rte_xstats_ret; i++) {
-ovs_strlcpy(custom_stats->counters[i].name,
+ovs_strlcpy(custom_stats->counters[sw_stats_size + i].name,
 netdev_dpdk_get_xstat_name(dev,
dev->rte_xstats_ids[i]),
 NETDEV_CUSTOM_STATS_NAME_SIZE);
-custom_stats->counters[i].value = values[i];
+custom_stats->counters[sw_stats_size + i].value = values[i];
 }
 } else {
 VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT,
   dev->port_id);
-custom_stats->counters = NULL;
-custom_stats->size = 0;
 /* Let's clear statistics cache, so it will be
  * reconfigured */
 netdev_dpdk_clear_xstats(dev);
@@ -2821,39 +2824,47 @@ netdev_dpdk_get_custom_stats(const struct netdev 
*netdev,
 }
 
 static int
-netdev_dpdk_vhost_get_custom_stats(const struct netdev *netdev,
-   struct netdev_custom_stats *custom_stats)
+netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
+struct netdev_custom_stats *custom_stats)
 {
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int i;
+int i, n;
 
-#define VHOST_CSTATS \
-VHOST_CSTAT(tx_retries)
+#define SW_CSTATS \
+SW_CSTAT(tx_retries)
 
-#define VHOST_CSTAT(NAME) + 1
-custom_stats->size = VHOST_CSTATS;
-#undef VHOST_CSTAT
+#define SW_CSTAT(NAME) + 1
+custom_stats->size = SW_CSTATS;
+#undef SW_CSTAT
 custom_stats->counters = xcalloc(custom_stats->size,
  sizeof *custom_stats->counters);
-i = 0;
-#define VHOST_CSTAT(NAME) \
-ovs_strlcpy(custom_stats->counters[i++].name, #NAME, \
-NETDEV_CUSTOM_STATS_NAME_SIZE);
-VHOST_CSTATS;
-#undef VHOST_CSTAT
 
 ovs_mutex_lock(>mutex);
 
 rte_spinlock_lock(>stats_lock);
 i = 0;
-#define VHOST_CSTAT(NAME) \
+#define SW_CSTAT(NAME) \
 custom_stats->counters[i++].value = dev->NAME;
-VHOST_CSTATS;
-#undef VHOST_CSTAT
+SW_CSTATS;
+#undef SW_CSTAT
 rte_spinlock_unlock(>stats_lock);
 
 ovs_mutex_unlock(>mutex);
 
+i = 0;
+n = 0;
+#define SW_CSTAT(NAME) \
+if (custom_stats->counters[i].value != UINT64_MAX) {  

Re: [ovs-dev] [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

2019-10-22 Thread Sriram Vatala via dev
Thanks Ilya and Kevin.
I will send the updated patch set.

Thanks & Regards,
Sriram.

-Original Message-
From: Ilya Maximets 
Sent: 22 October 2019 16:07
To: Kevin Traynor ; Sriram Vatala 
; 'Ilya Maximets' ; 
ovs-dev@openvswitch.org
Cc: 'Stokes, Ian' 
Subject: Re: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

On 22.10.2019 12:31, Kevin Traynor wrote:
> On 22/10/2019 09:25, Sriram Vatala wrote:
>> Hi Ilya & Kevin,
>> Thanks for your suggestions. I am summarizing our discussion, please
>> feel free to correct me, if I am wrong.
>>
>
> Hi Sriram,
>
> Will share my thought, Ilya may have different view.
>
>> 1) All custom stats calculated in OVS will have the prefix , so that
>> these stats will not intersect with the names of other stats (driver/HW 
>> etc).
>
> yes
>
>> 2) The prefix used for these custom stats is "cstat_".
>
> I think with 3) (note to explain the prefix) then 'sw_' or 'ovs_'
> would look neater in the logs but i'm ok with any of them.

OK. Lets go with 'ovs_' prefix + prefix explanation note in this case.
It looks like it will have lowest chances to overlap with existing stats.
And it also self-descriptive enough for those who doesn't read docs.

Best regards, Ilya Maximets.
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


Re: [ovs-dev] [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

2019-10-22 Thread Sriram Vatala via dev
Hi Ilya & Kevin,
Thanks for your suggestions. I am summarizing our discussion, please feel free 
to correct me, if I am wrong.

1) All custom stats calculated in OVS will have the prefix , so that these 
stats will not intersect with the names of other stats (driver/HW etc).
2) The prefix used for these custom stats is "cstat_".
3) Instead of documenting all the stats, I will add a generic note about the 
prefix used for custom stats in "Documentation/topics/dpdk/vhost-user.rst 
where "tx_retries" is documented.

Thanks & Regards,
Sriram.

-Original Message-
From: Kevin Traynor 
Sent: 21 October 2019 20:22
To: Ilya Maximets ; Sriram Vatala 
; ovs-dev@openvswitch.org
Cc: Stokes, Ian 
Subject: Re: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

On 18/10/2019 13:59, Ilya Maximets wrote:
> On 16.10.2019 19:48, Kevin Traynor wrote:
>> On 16/10/2019 13:16, Ilya Maximets wrote:
>>> Hi Kevin,
>>>
>>> Thanks for review. Some comments inline.
>>>
>>> On 16.10.2019 12:15, Kevin Traynor wrote:
>>>> Hi Sriram,
>>>>
>>>> Thanks for working on making more fine grained stats for debugging.
>>>> As mentioned yesterday, this patch needs rebase so I just reviewed
>>>> docs and netdev-dpdk.c which applied. Comments below.
>>>>
>>>> On 21/09/2019 03:40, Sriram Vatala wrote:
>>>>> OVS may be unable to transmit packets for multiple reasons and
>>>>> today there is a single counter to track packets dropped due to
>>>>> any of those reasons. The most common reason is that a VM is
>>>>> unable to read packets fast enough causing the vhostuser port
>>>>> transmit queue on the OVS side to become full. This manifests as a
>>>>> problem with VNFs not receiving all packets. Having a separate
>>>>> drop counter to track packets dropped because the transmit queue
>>>>> is full will clearly indicate that the problem is on the VM side
>>>>> and not in OVS. Similarly maintaining separate
>>>>
>>>> It reads like a bit of a contradiction. On one hand the "The *most
>>>> common* reason is that a VM is unable to read packets fast enough".
>>>> While having a stat "*will clearly indicate* that the problem is on
>>>> the VM side".
>>>>
>>>>> counters for all possible drops helps in indicating sensible cause
>>>>> for packet drops.
>>>>>
>>>>> This patch adds custom software stats counters to track packets
>>>>> dropped at port level and these counters are displayed along with
>>>>> other stats in "ovs-vsctl get interface  statistics"
>>>>> command. The detailed stats will be available for both dpdk and
>>>>> vhostuser ports.
>>>>>
>>>>
>>>> I think the commit msg could be a bit clearer, suggest something
>>>> like below - feel free to modify (or reject):
>>>>
>>>> OVS may be unable to transmit packets for multiple reasons on the
>>>> DPDK datapath and today there is a single counter to track packets
>>>> dropped due to any of those reasons.
>>>>
>>>> This patch adds custom software stats for the different reasons
>>>> packets may be dropped during tx on the DPDK datapath in OVS.
>>>>
>>>> - MTU drops : drops that occur due to a too large packet size
>>>> - Qos drops: drops that occur due to egress QOS
>>>> - Tx failures: drops as returned by the DPDK PMD send function
>>>>
>>>> Note that the reason for tx failures is not specificied in OVS. In
>>>> practice for vhost ports it is most common that tx failures are
>>>> because there are not enough available descriptors, which is
>>>> usually caused by misconfiguration of the guest queues and/or
>>>> because the guest is not consuming packets fast enough from the queues.
>>>>
>>>> These counters are displayed along with other stats in "ovs-vsctl
>>>> get interface  statistics" command and are available for
>>>> dpdk and vhostuser/vhostuserclient ports.
>>>>
>>>> Signed-off-by: Sriram Vatala 
>>>>
>>>> ---
>>>>
>>>> v9:...
>>>> v8:...
>>>>
>>>>
>>>> Note that signed-off, --- and version info should be like this ^^^.
>>>> otherwise the review version comments will be in the git commit log
>>>> when it is applied.

Re: [ovs-dev] [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

2019-10-16 Thread Sriram Vatala via dev
Hi Kevin & Ilya,
Thanks Kevin for reviewing the patch. Response inline.
Thanks Ilya for your response.

-Original Message-
From: Ilya Maximets 
Sent: 16 October 2019 17:46
To: Kevin Traynor ; Sriram Vatala 
; ovs-dev@openvswitch.org; Ilya Maximets 

Cc: Stokes, Ian 
Subject: Re: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi Kevin,

Thanks for review. Some comments inline.

On 16.10.2019 12:15, Kevin Traynor wrote:
> Hi Sriram,
>
> Thanks for working on making more fine grained stats for debugging. As
> mentioned yesterday, this patch needs rebase so I just reviewed docs
> and netdev-dpdk.c which applied. Comments below.
>
> On 21/09/2019 03:40, Sriram Vatala wrote:
>> OVS may be unable to transmit packets for multiple reasons and today
>> there is a single counter to track packets dropped due to any of
>> those reasons. The most common reason is that a VM is unable to read
>> packets fast enough causing the vhostuser port transmit queue on the
>> OVS side to become full. This manifests as a problem with VNFs not
>> receiving all packets. Having a separate drop counter to track
>> packets dropped because the transmit queue is full will clearly
>> indicate that the problem is on the VM side and not in OVS. Similarly
>> maintaining separate
>

> It reads like a bit of a contradiction. On one hand the "The *most
> common* reason is that a VM is unable to read packets fast enough".
> While having a stat "*will clearly indicate* that the problem is on
> the VM side".
>

I mentioned one use-case to emphasis the need for separate drop stats 
besides combined drop counter. I will rephrase the commit message.

>> counters for all possible drops helps in indicating sensible cause
>> for packet drops.
>>
>> This patch adds custom software stats counters to track packets
>> dropped at port level and these counters are displayed along with
>> other stats in "ovs-vsctl get interface  statistics"
>> command. The detailed stats will be available for both dpdk and
>> vhostuser ports.
>>
>
> I think the commit msg could be a bit clearer, suggest something like
> below - feel free to modify (or reject):
>
> OVS may be unable to transmit packets for multiple reasons on the DPDK
> datapath and today there is a single counter to track packets dropped
> due to any of those reasons.
>
> This patch adds custom software stats for the different reasons
> packets may be dropped during tx on the DPDK datapath in OVS.
>
> - MTU drops : drops that occur due to a too large packet size
> - Qos drops: drops that occur due to egress QOS
> - Tx failures: drops as returned by the DPDK PMD send function
>
> Note that the reason for tx failures is not specificied in OVS. In
> practice for vhost ports it is most common that tx failures are
> because there are not enough available descriptors, which is usually
> caused by misconfiguration of the guest queues and/or because the
> guest is not consuming packets fast enough from the queues.
>
> These counters are displayed along with other stats in "ovs-vsctl get
> interface  statistics" command and are available for dpdk and
> vhostuser/vhostuserclient ports.
>
Looks good to me. Thanks for the suggestion.
> Signed-off-by: Sriram Vatala 
>
> ---
>
> v9:...
> v8:...
>
>
> Note that signed-off, --- and version info should be like this ^^^.
> otherwise the review version comments will be in the git commit log
> when it is applied.
>
   I will take care of this in my next patch.
>> --
>> Changes since v8:
>> Addressed comments given by Ilya.
>>
>> Signed-off-by: Sriram Vatala 
>> ---
>>   Documentation/topics/dpdk/vhost-user.rst  | 13 ++-
>>   lib/netdev-dpdk.c | 81 +++
>>   utilities/bugtool/automake.mk |  3 +-
>>   utilities/bugtool/ovs-bugtool-get-port-stats  | 25 ++
>>   .../plugins/network-status/openvswitch.xml|  1 +
>>   5 files changed, 105 insertions(+), 18 deletions(-)
>>   create mode 100755 utilities/bugtool/ovs-bugtool-get-port-stats
>>
>> diff --git a/Documentation/topics/dpdk/vhost-user.rst
>> b/Documentation/topics/dpdk/vhost-user.rst
>> index 724aa62f6..89388a2bf 100644
>> --- a/Documentation/topics/dpdk/vhost-user.rst
>> +++ b/Documentation/topics/dpdk/vhost-user.rst
>> @@ -551,7 +551,18 @@ processing packets at the required rate.
>>   The amount of Tx retries on a vhost-user or vhost-user-client interface 
>> can be
>>   shown with::
>>
>> -  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:tx_retries
>> +  $ ovs-vsctl get Int

Re: [ovs-dev] [PATCH v9 1/2] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-10-15 Thread Sriram Vatala via dev
-Original Message-
From: Kevin Traynor 
Sent: 15 October 2019 19:21
To: Ilya Maximets ; Sriram Vatala 
; ovs-dev@openvswitch.org
Subject: Re: [PATCH v9 1/2] netdev-dpdk: Reuse vhost function for dpdk ETH 
custom stats.

On 15/10/2019 14:43, Ilya Maximets wrote:
> On 15.10.2019 15:35, Kevin Traynor wrote:
>> With corrected email for Ilya.
>>
>> On 15/10/2019 14:33, Kevin Traynor wrote:
>>> On 21/09/2019 03:40, Sriram Vatala wrote:
>>>> From: Ilya Maximets 
>>>>
>>>> This is yet another refactoring for upcoming detailed drop stats.
>>>> It allowes to use single function for all the software calculated
>>>> statistics in netdev-dpdk for both vhost and ETH ports.
>>>>
>>>> UINT64_MAX used as a marker for non-supported statistics in a same
>>>> way as it's done in bridge.c for common netdev stats.
>>>>
>>>
>>> Hi Ilya, one comment below,
>>>
>>> thanks,
>>> Kevin.
>>>
>>>> Cc: Sriram Vatala 
>>>> Signed-off-by: Ilya Maximets 
>>>> Signed-off-by: Sriram Vatala 
>>>> ---
>>>>   lib/netdev-dpdk.c | 67 +++
>>>>   1 file changed, 39 insertions(+), 28 deletions(-)
>>>>
>>>> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
>>>> bc20d6843..652b57e3b 100644
>>>> --- a/lib/netdev-dpdk.c
>>>> +++ b/lib/netdev-dpdk.c
>>>> @@ -471,6 +471,8 @@ struct netdev_rxq_dpdk {
>>>>   static void netdev_dpdk_destruct(struct netdev *netdev);
>>>>   static void netdev_dpdk_vhost_destruct(struct netdev *netdev);
>>>>
>>>> +static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
>>>> +   struct
>>>> +netdev_custom_stats *);
>>>>   static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
>>>>
>>>>   int netdev_dpdk_get_vid(const struct netdev_dpdk *dev); @@
>>>> -1171,7 +1173,7 @@ common_construct(struct netdev *netdev, dpdk_port_t 
>>>> port_no,
>>>>   dev->rte_xstats_ids = NULL;
>>>>   dev->rte_xstats_ids_size = 0;
>>>>
>>>> -dev->tx_retries = 0;
>>>> +dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 :
>>>> + UINT64_MAX;
>>>>
>>>>   return 0;
>>>>   }
>>>> @@ -2771,7 +2773,9 @@ netdev_dpdk_get_custom_stats(const struct
>>>> netdev *netdev,
>>>>
>>>>   uint32_t i;
>>>>   struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
>>>> -int rte_xstats_ret;
>>>> +int rte_xstats_ret, sw_stats_size;
>>>> +
>>>> +netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
>>>>
>>>>   ovs_mutex_lock(>mutex);
>>>>
>>>> @@ -2786,12 +2790,13 @@ netdev_dpdk_get_custom_stats(const struct netdev 
>>>> *netdev,
>>>>   if (rte_xstats_ret > 0 &&
>>>>   rte_xstats_ret <= dev->rte_xstats_ids_size) {
>>>>
>>>> -custom_stats->size = rte_xstats_ret;
>>>> -custom_stats->counters =
>>>> -(struct netdev_custom_counter *) 
>>>> xcalloc(rte_xstats_ret,
>>>> -sizeof(struct netdev_custom_counter));
>>>> +sw_stats_size = custom_stats->size;
>>>> +custom_stats->size += rte_xstats_ret;
>>>> +custom_stats->counters = xrealloc(custom_stats->counters,
>>>> +  custom_stats->size *
>>>> +  sizeof
>>>> + *custom_stats->counters);
>>>>
>>>> -for (i = 0; i < rte_xstats_ret; i++) {
>>>> +for (i = sw_stats_size; i < sw_stats_size +
>>>> + rte_xstats_ret; i++) {
>>>>   ovs_strlcpy(custom_stats->counters[i].name,
>>>>   netdev_dpdk_get_xstat_name(dev,
>>>>
>>>> dev->rte_xstats_ids[i]),
>>>
>>> I think you need to add another array index counter for
>>> ret_xstats_ids[] and values[] as they are still using i, but i is
>>> now starting with sw_stats_size and not 0 anymore.
>
> Good catch.
> I didn't actually test this code hoping that it'll be tested 

Re: [ovs-dev] [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

2019-10-14 Thread Sriram Vatala via dev
Hi,
Please consider this as gentle remainder and kindly review the patch set.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 11 October 2019 16:58
To: 'ovs-dev@openvswitch.org' ;
'i.maxim...@ovn.org' 
Subject: RE: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 09 October 2019 11:53
To: 'ovs-dev@openvswitch.org' 
Cc: 'ktray...@redhat.com' ; 'Ilya Maximets'
; 'i.maxim...@ovn.org' 
Subject: RE: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi All,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 04 October 2019 13:37
To: 'ovs-dev@openvswitch.org' 
Cc: 'ktray...@redhat.com' ; 'Ilya Maximets'

Subject: RE: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-----
From: Sriram Vatala  
Sent: 25 September 2019 09:58
To: 'ovs-dev@openvswitch.org' 
Cc: 'ktray...@redhat.com' ; 'Ilya Maximets'

Subject: RE: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi,
Addressed comments given in patch v8. Kindly review patch v9.
 
Changes since patch v8 :
Rebased my changes on top of Ilya's patch " Reuse vhost function for dpdk
ETH custom stats". Patch set 1/2.
Addressed comments given in patch v8.

Thanks & Regards,
Sriram.

-----Original Message-
From: Sriram Vatala  
Sent: 21 September 2019 08:10
To: ovs-dev@openvswitch.org; i.maxim...@samsung.com
Cc: ktray...@redhat.com; Sriram Vatala 
Subject: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

OVS may be unable to transmit packets for multiple reasons and today there
is a single counter to track packets dropped due to any of those reasons.
The most common reason is that a VM is unable to read packets fast enough
causing the vhostuser port transmit queue on the OVS side to become full.
This manifests as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the transmit queue is
full will clearly indicate that the problem is on the VM side and not in
OVS. Similarly maintaining separate counters for all possible drops helps in
indicating sensible cause for packet drops.

This patch adds custom software stats counters to track packets dropped at
port level and these counters are displayed along with other stats in
"ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and vhostuser
ports.

--
Changes since v8:
Addressed comments given by Ilya.

Signed-off-by: Sriram Vatala 
---
 Documentation/topics/dpdk/vhost-user.rst  | 13 ++-
 lib/netdev-dpdk.c | 81 +++
 utilities/bugtool/automake.mk |  3 +-
 utilities/bugtool/ovs-bugtool-get-port-stats  | 25 ++
 .../plugins/network-status/openvswitch.xml|  1 +
 5 files changed, 105 insertions(+), 18 deletions(-)  create mode 100755
utilities/bugtool/ovs-bugtool-get-port-stats

diff --git a/Documentation/topics/dpdk/vhost-user.rst
b/Documentation/topics/dpdk/vhost-user.rst
index 724aa62f6..89388a2bf 100644
--- a/Documentation/topics/dpdk/vhost-user.rst
+++ b/Documentation/topics/dpdk/vhost-user.rst
@@ -551,7 +551,18 @@ processing packets at the required rate.
 The amount of Tx retries on a vhost-user or vhost-user-client interface can
be  shown with::
 
-  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:tx_retries
+  $ ovs-vsctl get Interface dpdkvhostclient0 
+ statistics:netdev_dpdk_tx_retries
+
+When the guest is not able to consume the packets fast enough, the 
+transmit queue of the port gets filled up i.e queue runs out of free
descriptors.
+This is the most likely reason why dpdk transmit API will fail to send 
+packets besides other reasons.
+
+The amount of tx failure drops on a dpdk vhost/physical interface can 
+be shown with::
+
+  $ ovs-vsctl get Interface dpdkvhostclient0 \
+statistics:netdev_dpdk_tx_failure_drops
 
 vhost-user Dequeue Zero Copy (experimental)
 ---
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
652b57e3b..fd8f9102e 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -174,6 +174,20 @@ static const struct vhost_device_ops
virtio_net_device_ops =
 .destroy_connection = destroy_connection,  };
 
+/* Custom software stats for dpdk ports */ struct netdev_dpdk_sw_stats 
+{
+/* No. of retries when unable to transmit. */
+uint64_t tx_retries;
+/* Packet drops when unable to transmit; Probably Tx queue is full. */
+uint64_t tx_failure_drops;
+/* Packet length greater than device MTU. */
+uint64_t tx_mtu_exceeded_drops;
+/* Packet drops in egress policer processing. */
+uint64_t tx_qos_drops;

Re: [ovs-dev] [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

2019-10-11 Thread Sriram Vatala via dev
Hi,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 09 October 2019 11:53
To: 'ovs-dev@openvswitch.org' 
Cc: 'ktray...@redhat.com' ; 'Ilya Maximets'
; 'i.maxim...@ovn.org' 
Subject: RE: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi All,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 04 October 2019 13:37
To: 'ovs-dev@openvswitch.org' 
Cc: 'ktray...@redhat.com' ; 'Ilya Maximets'

Subject: RE: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 25 September 2019 09:58
To: 'ovs-dev@openvswitch.org' 
Cc: 'ktray...@redhat.com' ; 'Ilya Maximets'

Subject: RE: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi,
Addressed comments given in patch v8. Kindly review patch v9.
 
Changes since patch v8 :
Rebased my changes on top of Ilya's patch " Reuse vhost function for dpdk
ETH custom stats". Patch set 1/2.
Addressed comments given in patch v8.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 21 September 2019 08:10
To: ovs-dev@openvswitch.org; i.maxim...@samsung.com
Cc: ktray...@redhat.com; Sriram Vatala 
Subject: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

OVS may be unable to transmit packets for multiple reasons and today there
is a single counter to track packets dropped due to any of those reasons.
The most common reason is that a VM is unable to read packets fast enough
causing the vhostuser port transmit queue on the OVS side to become full.
This manifests as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the transmit queue is
full will clearly indicate that the problem is on the VM side and not in
OVS. Similarly maintaining separate counters for all possible drops helps in
indicating sensible cause for packet drops.

This patch adds custom software stats counters to track packets dropped at
port level and these counters are displayed along with other stats in
"ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and vhostuser
ports.

--
Changes since v8:
Addressed comments given by Ilya.

Signed-off-by: Sriram Vatala 
---
 Documentation/topics/dpdk/vhost-user.rst  | 13 ++-
 lib/netdev-dpdk.c | 81 +++
 utilities/bugtool/automake.mk |  3 +-
 utilities/bugtool/ovs-bugtool-get-port-stats  | 25 ++
 .../plugins/network-status/openvswitch.xml|  1 +
 5 files changed, 105 insertions(+), 18 deletions(-)  create mode 100755
utilities/bugtool/ovs-bugtool-get-port-stats

diff --git a/Documentation/topics/dpdk/vhost-user.rst
b/Documentation/topics/dpdk/vhost-user.rst
index 724aa62f6..89388a2bf 100644
--- a/Documentation/topics/dpdk/vhost-user.rst
+++ b/Documentation/topics/dpdk/vhost-user.rst
@@ -551,7 +551,18 @@ processing packets at the required rate.
 The amount of Tx retries on a vhost-user or vhost-user-client interface can
be  shown with::
 
-  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:tx_retries
+  $ ovs-vsctl get Interface dpdkvhostclient0 
+ statistics:netdev_dpdk_tx_retries
+
+When the guest is not able to consume the packets fast enough, the 
+transmit queue of the port gets filled up i.e queue runs out of free
descriptors.
+This is the most likely reason why dpdk transmit API will fail to send 
+packets besides other reasons.
+
+The amount of tx failure drops on a dpdk vhost/physical interface can 
+be shown with::
+
+  $ ovs-vsctl get Interface dpdkvhostclient0 \
+statistics:netdev_dpdk_tx_failure_drops
 
 vhost-user Dequeue Zero Copy (experimental)
 ---
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
652b57e3b..fd8f9102e 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -174,6 +174,20 @@ static const struct vhost_device_ops
virtio_net_device_ops =
 .destroy_connection = destroy_connection,  };
 
+/* Custom software stats for dpdk ports */ struct netdev_dpdk_sw_stats 
+{
+/* No. of retries when unable to transmit. */
+uint64_t tx_retries;
+/* Packet drops when unable to transmit; Probably Tx queue is full. */
+uint64_t tx_failure_drops;
+/* Packet length greater than device MTU. */
+uint64_t tx_mtu_exceeded_drops;
+/* Packet drops in egress policer processing. */
+uint64_t tx_qos_drops;
+/* Packet drops in ingress policer processing. */
+uint64_t rx_qos_drops;
+};
+
 enum { DPDK_RING_SIZE = 256 };
 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
 enum { DRAIN_TSC = 20ULL };
@@ -413,11 +427,10 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_

Re: [ovs-dev] [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

2019-10-09 Thread Sriram Vatala via dev
Hi All,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 04 October 2019 13:37
To: 'ovs-dev@openvswitch.org' 
Cc: 'ktray...@redhat.com' ; 'Ilya Maximets'

Subject: RE: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 25 September 2019 09:58
To: 'ovs-dev@openvswitch.org' 
Cc: 'ktray...@redhat.com' ; 'Ilya Maximets'

Subject: RE: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi,
Addressed comments given in patch v8. Kindly review patch v9.
 
Changes since patch v8 :
Rebased my changes on top of Ilya's patch " Reuse vhost function for dpdk
ETH custom stats". Patch set 1/2.
Addressed comments given in patch v8.

Thanks & Regards,
Sriram.

-Original Message-----
From: Sriram Vatala  
Sent: 21 September 2019 08:10
To: ovs-dev@openvswitch.org; i.maxim...@samsung.com
Cc: ktray...@redhat.com; Sriram Vatala 
Subject: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

OVS may be unable to transmit packets for multiple reasons and today there
is a single counter to track packets dropped due to any of those reasons.
The most common reason is that a VM is unable to read packets fast enough
causing the vhostuser port transmit queue on the OVS side to become full.
This manifests as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the transmit queue is
full will clearly indicate that the problem is on the VM side and not in
OVS. Similarly maintaining separate counters for all possible drops helps in
indicating sensible cause for packet drops.

This patch adds custom software stats counters to track packets dropped at
port level and these counters are displayed along with other stats in
"ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and vhostuser
ports.

--
Changes since v8:
Addressed comments given by Ilya.

Signed-off-by: Sriram Vatala 
---
 Documentation/topics/dpdk/vhost-user.rst  | 13 ++-
 lib/netdev-dpdk.c | 81 +++
 utilities/bugtool/automake.mk |  3 +-
 utilities/bugtool/ovs-bugtool-get-port-stats  | 25 ++
 .../plugins/network-status/openvswitch.xml|  1 +
 5 files changed, 105 insertions(+), 18 deletions(-)  create mode 100755
utilities/bugtool/ovs-bugtool-get-port-stats

diff --git a/Documentation/topics/dpdk/vhost-user.rst
b/Documentation/topics/dpdk/vhost-user.rst
index 724aa62f6..89388a2bf 100644
--- a/Documentation/topics/dpdk/vhost-user.rst
+++ b/Documentation/topics/dpdk/vhost-user.rst
@@ -551,7 +551,18 @@ processing packets at the required rate.
 The amount of Tx retries on a vhost-user or vhost-user-client interface can
be  shown with::
 
-  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:tx_retries
+  $ ovs-vsctl get Interface dpdkvhostclient0 
+ statistics:netdev_dpdk_tx_retries
+
+When the guest is not able to consume the packets fast enough, the 
+transmit queue of the port gets filled up i.e queue runs out of free
descriptors.
+This is the most likely reason why dpdk transmit API will fail to send 
+packets besides other reasons.
+
+The amount of tx failure drops on a dpdk vhost/physical interface can 
+be shown with::
+
+  $ ovs-vsctl get Interface dpdkvhostclient0 \
+statistics:netdev_dpdk_tx_failure_drops
 
 vhost-user Dequeue Zero Copy (experimental)
 ---
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
652b57e3b..fd8f9102e 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -174,6 +174,20 @@ static const struct vhost_device_ops
virtio_net_device_ops =
 .destroy_connection = destroy_connection,  };
 
+/* Custom software stats for dpdk ports */ struct netdev_dpdk_sw_stats 
+{
+/* No. of retries when unable to transmit. */
+uint64_t tx_retries;
+/* Packet drops when unable to transmit; Probably Tx queue is full. */
+uint64_t tx_failure_drops;
+/* Packet length greater than device MTU. */
+uint64_t tx_mtu_exceeded_drops;
+/* Packet drops in egress policer processing. */
+uint64_t tx_qos_drops;
+/* Packet drops in ingress policer processing. */
+uint64_t rx_qos_drops;
+};
+
 enum { DPDK_RING_SIZE = 256 };
 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
 enum { DRAIN_TSC = 20ULL };
@@ -413,11 +427,10 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
-uint64_t tx_retries;
+struct netdev_dpdk_sw_stats *sw_stats;
 /* Protects stats */
 rte_spinlock_t stats_lock;
-/* 4 pad bytes here. */
+/* 36 pad bytes here. */
 );
 
 PADDED_MEMBERS(CAC

Re: [ovs-dev] [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

2019-10-04 Thread Sriram Vatala via dev
Hi,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 25 September 2019 09:58
To: 'ovs-dev@openvswitch.org' 
Cc: 'ktray...@redhat.com' ; 'Ilya Maximets'

Subject: RE: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

Hi,
Addressed comments given in patch v8. Kindly review patch v9.
 
Changes since patch v8 :
Rebased my changes on top of Ilya's patch " Reuse vhost function for dpdk
ETH custom stats". Patch set 1/2.
Addressed comments given in patch v8.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 21 September 2019 08:10
To: ovs-dev@openvswitch.org; i.maxim...@samsung.com
Cc: ktray...@redhat.com; Sriram Vatala 
Subject: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

OVS may be unable to transmit packets for multiple reasons and today there
is a single counter to track packets dropped due to any of those reasons.
The most common reason is that a VM is unable to read packets fast enough
causing the vhostuser port transmit queue on the OVS side to become full.
This manifests as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the transmit queue is
full will clearly indicate that the problem is on the VM side and not in
OVS. Similarly maintaining separate counters for all possible drops helps in
indicating sensible cause for packet drops.

This patch adds custom software stats counters to track packets dropped at
port level and these counters are displayed along with other stats in
"ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and vhostuser
ports.

--
Changes since v8:
Addressed comments given by Ilya.

Signed-off-by: Sriram Vatala 
---
 Documentation/topics/dpdk/vhost-user.rst  | 13 ++-
 lib/netdev-dpdk.c | 81 +++
 utilities/bugtool/automake.mk |  3 +-
 utilities/bugtool/ovs-bugtool-get-port-stats  | 25 ++
 .../plugins/network-status/openvswitch.xml|  1 +
 5 files changed, 105 insertions(+), 18 deletions(-)  create mode 100755
utilities/bugtool/ovs-bugtool-get-port-stats

diff --git a/Documentation/topics/dpdk/vhost-user.rst
b/Documentation/topics/dpdk/vhost-user.rst
index 724aa62f6..89388a2bf 100644
--- a/Documentation/topics/dpdk/vhost-user.rst
+++ b/Documentation/topics/dpdk/vhost-user.rst
@@ -551,7 +551,18 @@ processing packets at the required rate.
 The amount of Tx retries on a vhost-user or vhost-user-client interface can
be  shown with::
 
-  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:tx_retries
+  $ ovs-vsctl get Interface dpdkvhostclient0 
+ statistics:netdev_dpdk_tx_retries
+
+When the guest is not able to consume the packets fast enough, the 
+transmit queue of the port gets filled up i.e queue runs out of free
descriptors.
+This is the most likely reason why dpdk transmit API will fail to send 
+packets besides other reasons.
+
+The amount of tx failure drops on a dpdk vhost/physical interface can 
+be shown with::
+
+  $ ovs-vsctl get Interface dpdkvhostclient0 \
+statistics:netdev_dpdk_tx_failure_drops
 
 vhost-user Dequeue Zero Copy (experimental)
 ---
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
652b57e3b..fd8f9102e 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -174,6 +174,20 @@ static const struct vhost_device_ops
virtio_net_device_ops =
 .destroy_connection = destroy_connection,  };
 
+/* Custom software stats for dpdk ports */ struct netdev_dpdk_sw_stats 
+{
+/* No. of retries when unable to transmit. */
+uint64_t tx_retries;
+/* Packet drops when unable to transmit; Probably Tx queue is full. */
+uint64_t tx_failure_drops;
+/* Packet length greater than device MTU. */
+uint64_t tx_mtu_exceeded_drops;
+/* Packet drops in egress policer processing. */
+uint64_t tx_qos_drops;
+/* Packet drops in ingress policer processing. */
+uint64_t rx_qos_drops;
+};
+
 enum { DPDK_RING_SIZE = 256 };
 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
 enum { DRAIN_TSC = 20ULL };
@@ -413,11 +427,10 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
-uint64_t tx_retries;
+struct netdev_dpdk_sw_stats *sw_stats;
 /* Protects stats */
 rte_spinlock_t stats_lock;
-/* 4 pad bytes here. */
+/* 36 pad bytes here. */
 );
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
@@ -1173,7 +1186,9 @@ common_construct(struct netdev *netdev, dpdk_port_t
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
+dev->sw_stats = (struct netdev_dpdk_sw_stats *)
+ 

Re: [ovs-dev] [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

2019-09-24 Thread Sriram Vatala via dev
Hi,
Addressed comments given in patch v8. Kindly review patch v9.
 
Changes since patch v8 :
Rebased my changes on top of Ilya's patch " Reuse vhost function for dpdk
ETH custom stats". Patch set 1/2.
Addressed comments given in patch v8.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 21 September 2019 08:10
To: ovs-dev@openvswitch.org; i.maxim...@samsung.com
Cc: ktray...@redhat.com; Sriram Vatala 
Subject: [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

OVS may be unable to transmit packets for multiple reasons and today there
is a single counter to track packets dropped due to any of those reasons.
The most common reason is that a VM is unable to read packets fast enough
causing the vhostuser port transmit queue on the OVS side to become full.
This manifests as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the transmit queue is
full will clearly indicate that the problem is on the VM side and not in
OVS. Similarly maintaining separate counters for all possible drops helps in
indicating sensible cause for packet drops.

This patch adds custom software stats counters to track packets dropped at
port level and these counters are displayed along with other stats in
"ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and vhostuser
ports.

--
Changes since v8:
Addressed comments given by Ilya.

Signed-off-by: Sriram Vatala 
---
 Documentation/topics/dpdk/vhost-user.rst  | 13 ++-
 lib/netdev-dpdk.c | 81 +++
 utilities/bugtool/automake.mk |  3 +-
 utilities/bugtool/ovs-bugtool-get-port-stats  | 25 ++
 .../plugins/network-status/openvswitch.xml|  1 +
 5 files changed, 105 insertions(+), 18 deletions(-)  create mode 100755
utilities/bugtool/ovs-bugtool-get-port-stats

diff --git a/Documentation/topics/dpdk/vhost-user.rst
b/Documentation/topics/dpdk/vhost-user.rst
index 724aa62f6..89388a2bf 100644
--- a/Documentation/topics/dpdk/vhost-user.rst
+++ b/Documentation/topics/dpdk/vhost-user.rst
@@ -551,7 +551,18 @@ processing packets at the required rate.
 The amount of Tx retries on a vhost-user or vhost-user-client interface can
be  shown with::
 
-  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:tx_retries
+  $ ovs-vsctl get Interface dpdkvhostclient0 
+ statistics:netdev_dpdk_tx_retries
+
+When the guest is not able to consume the packets fast enough, the 
+transmit queue of the port gets filled up i.e queue runs out of free
descriptors.
+This is the most likely reason why dpdk transmit API will fail to send 
+packets besides other reasons.
+
+The amount of tx failure drops on a dpdk vhost/physical interface can 
+be shown with::
+
+  $ ovs-vsctl get Interface dpdkvhostclient0 \
+statistics:netdev_dpdk_tx_failure_drops
 
 vhost-user Dequeue Zero Copy (experimental)
 ---
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
652b57e3b..fd8f9102e 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -174,6 +174,20 @@ static const struct vhost_device_ops
virtio_net_device_ops =
 .destroy_connection = destroy_connection,  };
 
+/* Custom software stats for dpdk ports */ struct netdev_dpdk_sw_stats 
+{
+/* No. of retries when unable to transmit. */
+uint64_t tx_retries;
+/* Packet drops when unable to transmit; Probably Tx queue is full. */
+uint64_t tx_failure_drops;
+/* Packet length greater than device MTU. */
+uint64_t tx_mtu_exceeded_drops;
+/* Packet drops in egress policer processing. */
+uint64_t tx_qos_drops;
+/* Packet drops in ingress policer processing. */
+uint64_t rx_qos_drops;
+};
+
 enum { DPDK_RING_SIZE = 256 };
 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
 enum { DRAIN_TSC = 20ULL };
@@ -413,11 +427,10 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
-uint64_t tx_retries;
+struct netdev_dpdk_sw_stats *sw_stats;
 /* Protects stats */
 rte_spinlock_t stats_lock;
-/* 4 pad bytes here. */
+/* 36 pad bytes here. */
 );
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
@@ -1173,7 +1186,9 @@ common_construct(struct netdev *netdev, dpdk_port_t
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
+dev->sw_stats = (struct netdev_dpdk_sw_stats *)
+xzalloc(sizeof *dev->sw_stats);
+dev->sw_stats->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : 
+ UINT64_MAX;
 
 return 0;
 }
@@ -1359,6 +1374,7 @@ common_destruct(struct netdev_dpdk *dev)
 ovs_list_remove(>list_node);
 free(ovsrcu_get_protected(struct ingress_poli

[ovs-dev] [PATCH v9 2/2] netdev-dpdk:Detailed packet drop statistics

2019-09-22 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons and
today there is a single counter to track packets dropped due to
any of those reasons. The most common reason is that a VM is
unable to read packets fast enough causing the vhostuser port
transmit queue on the OVS side to become full. This manifests
as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the
transmit queue is full will clearly indicate that the problem
is on the VM side and not in OVS. Similarly maintaining separate
counters for all possible drops helps in indicating sensible
cause for packet drops.

This patch adds custom software stats counters to track packets
dropped at port level and these counters are displayed along with
other stats in "ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and
vhostuser ports.

--
Changes since v8:
Addressed comments given by Ilya.

Signed-off-by: Sriram Vatala 
---
 Documentation/topics/dpdk/vhost-user.rst  | 13 ++-
 lib/netdev-dpdk.c | 81 +++
 utilities/bugtool/automake.mk |  3 +-
 utilities/bugtool/ovs-bugtool-get-port-stats  | 25 ++
 .../plugins/network-status/openvswitch.xml|  1 +
 5 files changed, 105 insertions(+), 18 deletions(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-port-stats

diff --git a/Documentation/topics/dpdk/vhost-user.rst 
b/Documentation/topics/dpdk/vhost-user.rst
index 724aa62f6..89388a2bf 100644
--- a/Documentation/topics/dpdk/vhost-user.rst
+++ b/Documentation/topics/dpdk/vhost-user.rst
@@ -551,7 +551,18 @@ processing packets at the required rate.
 The amount of Tx retries on a vhost-user or vhost-user-client interface can be
 shown with::
 
-  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:tx_retries
+  $ ovs-vsctl get Interface dpdkvhostclient0 statistics:netdev_dpdk_tx_retries
+
+When the guest is not able to consume the packets fast enough, the transmit
+queue of the port gets filled up i.e queue runs out of free descriptors.
+This is the most likely reason why dpdk transmit API will fail to send packets
+besides other reasons.
+
+The amount of tx failure drops on a dpdk vhost/physical interface can be
+shown with::
+
+  $ ovs-vsctl get Interface dpdkvhostclient0 \
+statistics:netdev_dpdk_tx_failure_drops
 
 vhost-user Dequeue Zero Copy (experimental)
 ---
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 652b57e3b..fd8f9102e 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -174,6 +174,20 @@ static const struct vhost_device_ops virtio_net_device_ops 
=
 .destroy_connection = destroy_connection,
 };
 
+/* Custom software stats for dpdk ports */
+struct netdev_dpdk_sw_stats {
+/* No. of retries when unable to transmit. */
+uint64_t tx_retries;
+/* Packet drops when unable to transmit; Probably Tx queue is full. */
+uint64_t tx_failure_drops;
+/* Packet length greater than device MTU. */
+uint64_t tx_mtu_exceeded_drops;
+/* Packet drops in egress policer processing. */
+uint64_t tx_qos_drops;
+/* Packet drops in ingress policer processing. */
+uint64_t rx_qos_drops;
+};
+
 enum { DPDK_RING_SIZE = 256 };
 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
 enum { DRAIN_TSC = 20ULL };
@@ -413,11 +427,10 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
-uint64_t tx_retries;
+struct netdev_dpdk_sw_stats *sw_stats;
 /* Protects stats */
 rte_spinlock_t stats_lock;
-/* 4 pad bytes here. */
+/* 36 pad bytes here. */
 );
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
@@ -1173,7 +1186,9 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
+dev->sw_stats = (struct netdev_dpdk_sw_stats *)
+xzalloc(sizeof *dev->sw_stats);
+dev->sw_stats->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -1359,6 +1374,7 @@ common_destruct(struct netdev_dpdk *dev)
 ovs_list_remove(>list_node);
 free(ovsrcu_get_protected(struct ingress_policer *,
   >ingress_policer));
+free(dev->sw_stats);
 ovs_mutex_destroy(>mutex);
 }
 
@@ -2209,6 +2225,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,
  nb_rx, dropped);
+dev->sw_stats->rx_qos_drops += dropped;
 rte_spinlock_unlock(>stats_lock);
 
 batch->count = nb_rx;
@@ -2258,6 +2275,7 @@ netde

[ovs-dev] [PATCH v9 1/2] netdev-dpdk: Reuse vhost function for dpdk ETH custom stats.

2019-09-22 Thread Sriram Vatala via dev
From: Ilya Maximets 

This is yet another refactoring for upcoming detailed drop stats.
It allowes to use single function for all the software calculated
statistics in netdev-dpdk for both vhost and ETH ports.

UINT64_MAX used as a marker for non-supported statistics in a
same way as it's done in bridge.c for common netdev stats.

Cc: Sriram Vatala 
Signed-off-by: Ilya Maximets 
Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c | 67 +++
 1 file changed, 39 insertions(+), 28 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index bc20d6843..652b57e3b 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -471,6 +471,8 @@ struct netdev_rxq_dpdk {
 static void netdev_dpdk_destruct(struct netdev *netdev);
 static void netdev_dpdk_vhost_destruct(struct netdev *netdev);
 
+static int netdev_dpdk_get_sw_custom_stats(const struct netdev *,
+   struct netdev_custom_stats *);
 static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev);
 
 int netdev_dpdk_get_vid(const struct netdev_dpdk *dev);
@@ -1171,7 +1173,7 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
+dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
 
 return 0;
 }
@@ -2771,7 +2773,9 @@ netdev_dpdk_get_custom_stats(const struct netdev *netdev,
 
 uint32_t i;
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int rte_xstats_ret;
+int rte_xstats_ret, sw_stats_size;
+
+netdev_dpdk_get_sw_custom_stats(netdev, custom_stats);
 
 ovs_mutex_lock(>mutex);
 
@@ -2786,12 +2790,13 @@ netdev_dpdk_get_custom_stats(const struct netdev 
*netdev,
 if (rte_xstats_ret > 0 &&
 rte_xstats_ret <= dev->rte_xstats_ids_size) {
 
-custom_stats->size = rte_xstats_ret;
-custom_stats->counters =
-(struct netdev_custom_counter *) xcalloc(rte_xstats_ret,
-sizeof(struct netdev_custom_counter));
+sw_stats_size = custom_stats->size;
+custom_stats->size += rte_xstats_ret;
+custom_stats->counters = xrealloc(custom_stats->counters,
+  custom_stats->size *
+  sizeof *custom_stats->counters);
 
-for (i = 0; i < rte_xstats_ret; i++) {
+for (i = sw_stats_size; i < sw_stats_size + rte_xstats_ret; i++) {
 ovs_strlcpy(custom_stats->counters[i].name,
 netdev_dpdk_get_xstat_name(dev,
dev->rte_xstats_ids[i]),
@@ -2801,8 +2806,6 @@ netdev_dpdk_get_custom_stats(const struct netdev *netdev,
 } else {
 VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT,
   dev->port_id);
-custom_stats->counters = NULL;
-custom_stats->size = 0;
 /* Let's clear statistics cache, so it will be
  * reconfigured */
 netdev_dpdk_clear_xstats(dev);
@@ -2817,39 +2820,47 @@ netdev_dpdk_get_custom_stats(const struct netdev 
*netdev,
 }
 
 static int
-netdev_dpdk_vhost_get_custom_stats(const struct netdev *netdev,
-   struct netdev_custom_stats *custom_stats)
+netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
+struct netdev_custom_stats *custom_stats)
 {
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-int i;
+int i, n;
 
-#define VHOST_CSTATS \
-VHOST_CSTAT(tx_retries)
+#define SW_CSTATS \
+SW_CSTAT(tx_retries)
 
-#define VHOST_CSTAT(NAME) + 1
-custom_stats->size = VHOST_CSTATS;
-#undef VHOST_CSTAT
+#define SW_CSTAT(NAME) + 1
+custom_stats->size = SW_CSTATS;
+#undef SW_CSTAT
 custom_stats->counters = xcalloc(custom_stats->size,
  sizeof *custom_stats->counters);
-i = 0;
-#define VHOST_CSTAT(NAME) \
-ovs_strlcpy(custom_stats->counters[i++].name, #NAME, \
-NETDEV_CUSTOM_STATS_NAME_SIZE);
-VHOST_CSTATS;
-#undef VHOST_CSTAT
 
 ovs_mutex_lock(>mutex);
 
 rte_spinlock_lock(>stats_lock);
 i = 0;
-#define VHOST_CSTAT(NAME) \
+#define SW_CSTAT(NAME) \
 custom_stats->counters[i++].value = dev->NAME;
-VHOST_CSTATS;
-#undef VHOST_CSTAT
+SW_CSTATS;
+#undef SW_CSTAT
 rte_spinlock_unlock(>stats_lock);
 
 ovs_mutex_unlock(>mutex);
 
+i = 0;
+n = 0;
+#define SW_CSTAT(NAME) \
+if (custom_stats->counters[i].value != UINT64_MAX) {   \
+ovs_strlcpy(custom_stats->counters[n].name, #NAME, \
+NETDEV_CUSTOM

Re: [ovs-dev] [PATCH v8] netdev-dpdk:Detailed packet drop statistics

2019-09-10 Thread Sriram Vatala via dev
-Original Message-
From: Ilya Maximets 
Sent: 10 September 2019 19:29
To: Sriram Vatala ; ovs-dev@openvswitch.org
Cc: ktray...@redhat.com; ian.sto...@intel.com
Subject: Re: [PATCH v8] netdev-dpdk:Detailed packet drop statistics

On 08.09.2019 19:01, Sriram Vatala wrote:
> OVS may be unable to transmit packets for multiple reasons and today
> there is a single counter to track packets dropped due to any of those
> reasons. The most common reason is that a VM is unable to read packets
> fast enough causing the vhostuser port transmit queue on the OVS side
> to become full. This manifests as a problem with VNFs not receiving
> all packets. Having a separate drop counter to track packets dropped
> because the transmit queue is full will clearly indicate that the
> problem is on the VM side and not in OVS. Similarly maintaining
> separate counters for all possible drops helps in indicating sensible
> cause for packet drops.
>
> This patch adds custom software stats counters to track packets
> dropped at port level and these counters are displayed along with
> other stats in "ovs-vsctl get interface  statistics"
> command. The detailed stats will be available for both dpdk and
> vhostuser ports.
>
> Signed-off-by: Sriram Vatala 
> ---
> Changes since v7 :
> Defined structure netdev_dpdk_sw_stats and moved all custom stats into
> it.
> Placed a pointer to netdev_dpdk_sw_stats structure in netdev_dpdk
> strucure.
> stats are reported with prefix with netdev_dpdk
> ---
>  include/openvswitch/netdev.h  |  14 +++
>  lib/netdev-dpdk.c | 109 +++---
>  utilities/bugtool/automake.mk |   3 +-
>  utilities/bugtool/ovs-bugtool-get-port-stats  |  25 
>  .../plugins/network-status/openvswitch.xml|   1 +
>  vswitchd/vswitch.xml  |  24 
>  6 files changed, 156 insertions(+), 20 deletions(-)  create mode
> 100755 utilities/bugtool/ovs-bugtool-get-port-stats
>
> diff --git a/include/openvswitch/netdev.h
> b/include/openvswitch/netdev.h index 0c10f7b48..c17e6a97d 100644
> --- a/include/openvswitch/netdev.h
> +++ b/include/openvswitch/netdev.h
> @@ -89,6 +89,20 @@ struct netdev_stats {
>  uint64_t rx_jabber_errors;
>  };
>
> +/* Custom software stats for dpdk ports */ struct
> +netdev_dpdk_sw_stats {
> +/* No. of retries when unable to transmit. */
> +uint64_t tx_retries;
> +/* Pkt drops when unable to transmit; Probably Tx queue is full */
> +uint64_t tx_failure_drops;
> +/* Pkt len greater than max dev MTU */
> +uint64_t tx_mtu_exceeded_drops;
> +/* Pkt drops in egress policier processing */
> +uint64_t tx_qos_drops;
> +/* Pkt drops in ingress policier processing */
> +uint64_t rx_qos_drops;
> +};

This should not be in a common header since the only user is netdev-dpdk.c.
Regarding this code itself:
1. s/policier/policer/
2. s/Pkt/Packet/, s/len/length/
3. s/max dev MTU/device MTU/ (MTU already has 'maximum' word).
4. All comments should end with a period.

Sorry I missed to check spell. I will fix this in my next patch v9
I will move this structure definition to netdev-dpdk.c  and make it static.

> +
>  /* Structure representation of custom statistics counter */  struct
> netdev_custom_counter {
>  uint64_t value;
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
> bc20d6843..39aab4672 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -413,11 +413,10 @@ struct netdev_dpdk {
>
>  PADDED_MEMBERS(CACHE_LINE_SIZE,
>  struct netdev_stats stats;
> -/* Custom stat for retries when unable to transmit. */
> -uint64_t tx_retries;
> +struct netdev_dpdk_sw_stats *sw_stats;
>  /* Protects stats */
>  rte_spinlock_t stats_lock;
> -/* 4 pad bytes here. */
> +/* 36 pad bytes here. */
>  );
>
>  PADDED_MEMBERS(CACHE_LINE_SIZE,
> @@ -1171,7 +1170,8 @@ common_construct(struct netdev *netdev, dpdk_port_t 
> port_no,
>  dev->rte_xstats_ids = NULL;
>  dev->rte_xstats_ids_size = 0;
>
> -dev->tx_retries = 0;
> +dev->sw_stats = (struct netdev_dpdk_sw_stats *)
> +xcalloc(1,sizeof(struct
> + netdev_dpdk_sw_stats));

This should be:
   dev->sw_stats = xzalloc(sizeof *dev->sw_stats); Or
   dev->sw_stats = dpdk_rte_mzalloc(sizeof *dev->sw_stats);

The later variant will require proper error handling as it could return NULL.
I will change this in next patch v9.

See the Documentation/internals/contributing/coding-style.rst for details.

>
>  return 0;
>  }
> @@ -1357,6 +1357,7 @@ common_destruct(struct netdev_dpdk *dev)
>  ovs_list_remo

Re: [ovs-dev] [PATCH v8] netdev-dpdk:Detailed packet drop statistics

2019-09-10 Thread Sriram Vatala via dev
Hi All,
I have addressed the comments given in patch v7. Please review the patch v8
and let me know your comments if any.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 08 September 2019 21:32
To: ovs-dev@openvswitch.org; i.maxim...@samsung.com
Cc: ktray...@redhat.com; ian.sto...@intel.com; Sriram Vatala

Subject: [PATCH v8] netdev-dpdk:Detailed packet drop statistics

OVS may be unable to transmit packets for multiple reasons and today there
is a single counter to track packets dropped due to any of those reasons.
The most common reason is that a VM is unable to read packets fast enough
causing the vhostuser port transmit queue on the OVS side to become full.
This manifests as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the transmit queue is
full will clearly indicate that the problem is on the VM side and not in
OVS. Similarly maintaining separate counters for all possible drops helps in
indicating sensible cause for packet drops.

This patch adds custom software stats counters to track packets dropped at
port level and these counters are displayed along with other stats in
"ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and vhostuser
ports.

Signed-off-by: Sriram Vatala 
---
Changes since v7 :
Defined structure netdev_dpdk_sw_stats and moved all custom stats into it.
Placed a pointer to netdev_dpdk_sw_stats structure in netdev_dpdk strucure.
stats are reported with prefix with netdev_dpdk
---
 include/openvswitch/netdev.h  |  14 +++
 lib/netdev-dpdk.c | 109 +++---
 utilities/bugtool/automake.mk |   3 +-
 utilities/bugtool/ovs-bugtool-get-port-stats  |  25 
 .../plugins/network-status/openvswitch.xml|   1 +
 vswitchd/vswitch.xml  |  24 
 6 files changed, 156 insertions(+), 20 deletions(-)  create mode 100755
utilities/bugtool/ovs-bugtool-get-port-stats

diff --git a/include/openvswitch/netdev.h b/include/openvswitch/netdev.h
index 0c10f7b48..c17e6a97d 100644
--- a/include/openvswitch/netdev.h
+++ b/include/openvswitch/netdev.h
@@ -89,6 +89,20 @@ struct netdev_stats {
 uint64_t rx_jabber_errors;
 };
 
+/* Custom software stats for dpdk ports */ struct netdev_dpdk_sw_stats 
+{
+/* No. of retries when unable to transmit. */
+uint64_t tx_retries;
+/* Pkt drops when unable to transmit; Probably Tx queue is full */
+uint64_t tx_failure_drops;
+/* Pkt len greater than max dev MTU */
+uint64_t tx_mtu_exceeded_drops;
+/* Pkt drops in egress policier processing */
+uint64_t tx_qos_drops;
+/* Pkt drops in ingress policier processing */
+uint64_t rx_qos_drops;
+};
+
 /* Structure representation of custom statistics counter */  struct
netdev_custom_counter {
 uint64_t value;
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
bc20d6843..39aab4672 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -413,11 +413,10 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
-uint64_t tx_retries;
+struct netdev_dpdk_sw_stats *sw_stats;
 /* Protects stats */
 rte_spinlock_t stats_lock;
-/* 4 pad bytes here. */
+/* 36 pad bytes here. */
 );
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
@@ -1171,7 +1170,8 @@ common_construct(struct netdev *netdev, dpdk_port_t
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
+dev->sw_stats = (struct netdev_dpdk_sw_stats *)
+xcalloc(1,sizeof(struct netdev_dpdk_sw_stats));
 
 return 0;
 }
@@ -1357,6 +1357,7 @@ common_destruct(struct netdev_dpdk *dev)
 ovs_list_remove(>list_node);
 free(ovsrcu_get_protected(struct ingress_policer *,
   >ingress_policer));
+free(dev->sw_stats);
 ovs_mutex_destroy(>mutex);
 }
 
@@ -2171,6 +2172,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 uint16_t nb_rx = 0;
 uint16_t dropped = 0;
+uint16_t qos_drops = 0;
 int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
 int vid = netdev_dpdk_get_vid(dev);
 
@@ -2202,11 +2204,13 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 (struct rte_mbuf **) batch->packets,
 nb_rx, true);
 dropped -= nb_rx;
+qos_drops = dropped;
 }
 
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,
  nb_rx, dropped);
+dev->sw_stats->rx_qos_drops += qos_drops;
 rte_spinlock_unlock(&g

[ovs-dev] [PATCH v8] netdev-dpdk:Detailed packet drop statistics

2019-09-08 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons and
today there is a single counter to track packets dropped due to
any of those reasons. The most common reason is that a VM is
unable to read packets fast enough causing the vhostuser port
transmit queue on the OVS side to become full. This manifests
as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the
transmit queue is full will clearly indicate that the problem
is on the VM side and not in OVS. Similarly maintaining separate
counters for all possible drops helps in indicating sensible
cause for packet drops.

This patch adds custom software stats counters to track packets
dropped at port level and these counters are displayed along with
other stats in "ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and
vhostuser ports.

Signed-off-by: Sriram Vatala 
---
Changes since v7 :
Defined structure netdev_dpdk_sw_stats and moved all custom stats
into it.
Placed a pointer to netdev_dpdk_sw_stats structure in netdev_dpdk
strucure.
stats are reported with prefix with netdev_dpdk
---
 include/openvswitch/netdev.h  |  14 +++
 lib/netdev-dpdk.c | 109 +++---
 utilities/bugtool/automake.mk |   3 +-
 utilities/bugtool/ovs-bugtool-get-port-stats  |  25 
 .../plugins/network-status/openvswitch.xml|   1 +
 vswitchd/vswitch.xml  |  24 
 6 files changed, 156 insertions(+), 20 deletions(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-port-stats

diff --git a/include/openvswitch/netdev.h b/include/openvswitch/netdev.h
index 0c10f7b48..c17e6a97d 100644
--- a/include/openvswitch/netdev.h
+++ b/include/openvswitch/netdev.h
@@ -89,6 +89,20 @@ struct netdev_stats {
 uint64_t rx_jabber_errors;
 };
 
+/* Custom software stats for dpdk ports */
+struct netdev_dpdk_sw_stats {
+/* No. of retries when unable to transmit. */
+uint64_t tx_retries;
+/* Pkt drops when unable to transmit; Probably Tx queue is full */
+uint64_t tx_failure_drops;
+/* Pkt len greater than max dev MTU */
+uint64_t tx_mtu_exceeded_drops;
+/* Pkt drops in egress policier processing */
+uint64_t tx_qos_drops;
+/* Pkt drops in ingress policier processing */
+uint64_t rx_qos_drops;
+};
+
 /* Structure representation of custom statistics counter */
 struct netdev_custom_counter {
 uint64_t value;
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index bc20d6843..39aab4672 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -413,11 +413,10 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
-uint64_t tx_retries;
+struct netdev_dpdk_sw_stats *sw_stats;
 /* Protects stats */
 rte_spinlock_t stats_lock;
-/* 4 pad bytes here. */
+/* 36 pad bytes here. */
 );
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
@@ -1171,7 +1170,8 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
+dev->sw_stats = (struct netdev_dpdk_sw_stats *)
+xcalloc(1,sizeof(struct netdev_dpdk_sw_stats));
 
 return 0;
 }
@@ -1357,6 +1357,7 @@ common_destruct(struct netdev_dpdk *dev)
 ovs_list_remove(>list_node);
 free(ovsrcu_get_protected(struct ingress_policer *,
   >ingress_policer));
+free(dev->sw_stats);
 ovs_mutex_destroy(>mutex);
 }
 
@@ -2171,6 +2172,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 uint16_t nb_rx = 0;
 uint16_t dropped = 0;
+uint16_t qos_drops = 0;
 int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
 int vid = netdev_dpdk_get_vid(dev);
 
@@ -2202,11 +2204,13 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 (struct rte_mbuf **) batch->packets,
 nb_rx, true);
 dropped -= nb_rx;
+qos_drops = dropped;
 }
 
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,
  nb_rx, dropped);
+dev->sw_stats->rx_qos_drops += qos_drops;
 rte_spinlock_unlock(>stats_lock);
 
 batch->count = nb_rx;
@@ -2232,6 +2236,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct 
dp_packet_batch *batch,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 int nb_rx;
 int dropped = 0;
+int qos_drops = 0;
 
 if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) {
 return EAGAIN;
@@ -2250,12 +2255,14 @@ netdev_

Re: [ovs-dev] [PATCH v7] Detailed packet drop statistics per dpdk and vhostuser ports

2019-09-05 Thread Sriram Vatala via dev
Hi Ilya,
Thanks a lot for the explanation. As per your suggestion, I will move all the 
counters (including 'tx_retries')to some structure and place a pointer to it 
in netdev_dpdk structure so that the padding size will not vary with the 
introduction of new counters in future.

@Kevin Traynor : I will change the comment line for the number of padding 
bytes in my next patch instead of you sending a new patch just for changing 
the comment line.

Thanks & Regards,
Sriram.

-Original Message-
From: Ilya Maximets 
Sent: 04 September 2019 19:34
To: Sriram Vatala ; Kevin Traynor 

Cc: ovs-dev@openvswitch.org; Stokes, Ian 
Subject: Re: [PATCH v7] Detailed packet drop statistics per dpdk and vhostuser 
ports

On 04.09.2019 16:31, Sriram Vatala wrote:
> Hi Ilya,
> 1) I was working on addressing the comments provided by you. Had a
> small query on one of your comments.
> 2) I am trying to understand the problem of padding bytes in struct
> netdev_dpdk which you are referring to in your comment.
> 3) If I understand correctly, the macro "PADDED_MEMBERS(UNIT, MEMBERS)"
> ensures that the memory to be allocated for all 'MEMBERS' is rounded
> off to nearest multiple of CACHE_LINE_SIZE which  is 64 in this case.
> This macro adds pad bytes to roundoff to multiple of 64.
> 4) At runtime, I checked the size of stats section of netdev_dpdk
> without new counters that I have introduced in my patch. It is 384
> bytes, out of which struct netdev_stats alone occupies 336 bytes and 8
> bytes for tx_retries counter. (I could see there is no padding between
> netdev_stats and tx_retries). Total of 344 is rounded to 384 [((344 + 64 - 
> 1)/64) * 64].
> 5) With my new counters, the size remains same after padding.
> (336 + 8 + 8 +8 +8 +8 = 376) which is rounded to 384 bytes  [((376 +64
> -1)/64) *64]  at runtime.
>
> I want to check with you, if I have correctly understood the problem
> that you are referring in your comment. If you can explain a bit more
> on this, it would be helpful for me to understand the problem and think of 
> possible solutions.
>
> Following is the snippet of memory layout of netdev_dpdk at runtime :
> union {
> OVS_CACHE_LINE_MARKER cacheline1;
> struct {...};
> uint8_t pad50[64];
> };
> union {
> struct {...};
> uint8_t pad51[192];
> };
> union {
> struct {...};
> uint8_t pad52[384];
> };
> union {
> struct {...};
> uint8_t pad53[128];
> };
> union {
> struct {...};
> uint8_t pad54[64];
> };
> }

Hi.

The code looks like this:

 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
 /* Custom stat for retries when unable to transmit. */
 uint64_t tx_retries;
 /* Protects stats */
 rte_spinlock_t stats_lock;
 /* 4 pad bytes here. */<-- This comment I'm talking about.
 );

The only thing you need to change is to update the comment while adding new 
structure fields.

Your calculations are missing the size of 'stats_lock' which is 4 bytes.
So, on current master total size is:
336 bytes for stats + 8 bytes for tx_retries + 4 bytes for lock = 352 And 
it's rounded up to 384 by padding, i.e. we have 384 - 352 = 32 pad bytes.
The comment on current master should be "/* 32 pad bytes here. */".

BTW, Kevin, how did you calculate 4 here? My pahole output is following:

union {
struct {
struct netdev_stats stats;   /*   320   336 */
uint64_t   tx_retries;   /*   656 8 */
rte_spinlock_t stats_lock;   /*   664 4 */
};   /* 352 */
uint8_tpad52[0]; /*   0 */
};   /*   320   384 */

Returning to the patch. After adding 4 more stats (4 * 8 bytes), the new 
layout
takes:
   336 bytes for stats + 8 bytes for tx_retries \
   + 4*8 bytes for new stats + 4 bytes for lock = 384 bytes.

Now the structure takes exactly 384 bytes and you need to remove the comment 
or change it to "/* 0 pad bytes here. */".

Sorry, I didn't check the actual layout until now so I was confused a bit by 
the comment on current master. Anyway, you need to update that comment.
However, It might be still useful to move these stats to a separate structure 
to avoid big padding in case we'll want to add one more. And I'm still 
thinking that we need to drop paddings at all for most of the structure 
fields, but this should be a separate change.

Best regards, Ilya Maximets.
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


Re: [ovs-dev] [PATCH v7] Detailed packet drop statistics per dpdk and vhostuser ports

2019-09-04 Thread Sriram Vatala via dev
Hi Ilya,
1) I was working on addressing the comments provided by you. Had a small query 
on one of your comments.
2) I am trying to understand the problem of padding bytes in struct 
netdev_dpdk which you are referring to in your comment.
3) If I understand correctly, the macro "PADDED_MEMBERS(UNIT, MEMBERS)" 
ensures that the memory to be allocated for all 'MEMBERS' is rounded off to 
nearest multiple of CACHE_LINE_SIZE which  is 64 in this case. This macro adds 
pad bytes to roundoff to multiple of 64.
4) At runtime, I checked the size of stats section of netdev_dpdk without new 
counters that I have introduced in my patch. It is 384 bytes, out of which 
struct netdev_stats alone occupies 336 bytes and 8 bytes for tx_retries 
counter. (I could see there is no padding between netdev_stats and 
tx_retries). Total of 344 is rounded to 384 [((344 + 64 - 1)/64) * 64].
5) With my new counters, the size remains same after padding.
(336 + 8 + 8 +8 +8 +8 = 376) which is rounded to 384 bytes  [((376 +64 -1)/64) 
*64]  at runtime.

I want to check with you, if I have correctly understood the problem that you 
are referring in your comment. If you can explain a bit more on this, it would 
be helpful for me to understand the problem and think of possible solutions.

Following is the snippet of memory layout of netdev_dpdk at runtime :
union {
OVS_CACHE_LINE_MARKER cacheline1;
struct {...};
uint8_t pad50[64];
};
union {
struct {...};
uint8_t pad51[192];
};
union {
struct {...};
uint8_t pad52[384];
};
union {
struct {...};
uint8_t pad53[128];
};
union {
struct {...};
uint8_t pad54[64];
};
}

Thanks & Regards,
Sriram.

-Original Message-
From: Ilya Maximets 
Sent: 30 August 2019 18:53
To: Sriram Vatala ; ovs-dev@openvswitch.org
Cc: b...@ovn.org; ian.sto...@intel.com; Kevin Traynor 
Subject: Re: [PATCH v7] Detailed packet drop statistics per dpdk and vhostuser 
ports

On 28.08.2019 15:58, Sriram Vatala wrote:
> OVS may be unable to transmit packets for multiple reasons and today
> there is a single counter to track packets dropped due to any of those
> reasons. The most common reason is that a VM is unable to read packets
> fast enough causing the vhostuser port transmit queue on the OVS side
> to become full. This manifests as a problem with VNFs not receiving
> all packets. Having a separate drop counter to track packets dropped
> because the transmit queue is full will clearly indicate that the
> problem is on the VM side and not in OVS. Similarly maintaining
> separate counters for all possible drops helps in indicating sensible
> cause for packet drops.
>
> This patch adds custom stats counters to track packets dropped at port
> level and these counters are displayed along with other stats in
> "ovs-vsctl get interface  statistics"
> command. The detailed stats will be available for both dpdk and
> vhostuser ports.
>
> Signed-off-by: Sriram Vatala 
> ---

Hi. Thanks for a new version.

It'll be good if you'll include version history right here under the '---' cut 
line, so it'll be easier to track changes.
Like this:

Version 7:
  * Implemented something.
  * Fixed something.
  * Patch renamed. Previous name: ...

Version 6:
  * ...


Another general comment is that, IMHO, it's better to rename this patch to 
"netdev-dpdk: Detailed packet drop statistics.".
This patch adds statistics for all the dpdk based interfaces, so there is no 
need to list them in the commit name. Prefix clearly describes the patch area. 
(Please, do not drop the version number because of patch re-naming. Just add a 
note about renaming in a version history.)

More comments inline.

Best regards, Ilya Maximets.

>  lib/netdev-dpdk.c  | 99 
> +++---
>  utilities/bugtool/automake.mk  |  3 +-
>  utilities/bugtool/ovs-bugtool-get-iface-stats  | 25 ++
>  .../bugtool/plugins/network-status/openvswitch.xml |  1 +
>  vswitchd/vswitch.xml   | 24 ++
>  5 files changed, 139 insertions(+), 13 deletions(-)  create mode
> 100755 utilities/bugtool/ovs-bugtool-get-iface-stats
>
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
> bc20d68..a679c5b 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -413,8 +413,17 @@ struct netdev_dpdk {
>
>  PADDED_MEMBERS(CACHE_LINE_SIZE,
>  struct netdev_stats stats;
> -/* Custom stat for retries when unable to transmit. */
> +/* Custom device stats */
> +/* No. of retries when unable to transmit. */
>  uint64_t tx_retries;
> +/* Pkts dropped when unable to transmit; Probably Tx queue is full 
> */
> +uint64_t tx_failure_drops;
> + 

Re: [ovs-dev] [PATCH v7] Detailed packet drop statistics per dpdk and vhostuser ports

2019-09-02 Thread Sriram Vatala via dev
Hi Ilya,
Thanks for reviewing the patch and suggestions. Will address your comments in 
next patch.

Thanks & Regards,
Sriram.

-Original Message-
From: Ilya Maximets 
Sent: 30 August 2019 18:53
To: Sriram Vatala ; ovs-dev@openvswitch.org
Cc: b...@ovn.org; ian.sto...@intel.com; Kevin Traynor 
Subject: Re: [PATCH v7] Detailed packet drop statistics per dpdk and vhostuser 
ports

On 28.08.2019 15:58, Sriram Vatala wrote:
> OVS may be unable to transmit packets for multiple reasons and today
> there is a single counter to track packets dropped due to any of those
> reasons. The most common reason is that a VM is unable to read packets
> fast enough causing the vhostuser port transmit queue on the OVS side
> to become full. This manifests as a problem with VNFs not receiving
> all packets. Having a separate drop counter to track packets dropped
> because the transmit queue is full will clearly indicate that the
> problem is on the VM side and not in OVS. Similarly maintaining
> separate counters for all possible drops helps in indicating sensible
> cause for packet drops.
>
> This patch adds custom stats counters to track packets dropped at port
> level and these counters are displayed along with other stats in
> "ovs-vsctl get interface  statistics"
> command. The detailed stats will be available for both dpdk and
> vhostuser ports.
>
> Signed-off-by: Sriram Vatala 
> ---

Hi. Thanks for a new version.

It'll be good if you'll include version history right here under the '---' cut 
line, so it'll be easier to track changes.
Like this:

Version 7:
  * Implemented something.
  * Fixed something.
  * Patch renamed. Previous name: ...

Version 6:
  * ...


Another general comment is that, IMHO, it's better to rename this patch to 
"netdev-dpdk: Detailed packet drop statistics.".
This patch adds statistics for all the dpdk based interfaces, so there is no 
need to list them in the commit name. Prefix clearly describes the patch area. 
(Please, do not drop the version number because of patch re-naming. Just add a 
note about renaming in a version history.)

More comments inline.

Best regards, Ilya Maximets.

>  lib/netdev-dpdk.c  | 99 
> +++---
>  utilities/bugtool/automake.mk  |  3 +-
>  utilities/bugtool/ovs-bugtool-get-iface-stats  | 25 ++
>  .../bugtool/plugins/network-status/openvswitch.xml |  1 +
>  vswitchd/vswitch.xml   | 24 ++
>  5 files changed, 139 insertions(+), 13 deletions(-)  create mode
> 100755 utilities/bugtool/ovs-bugtool-get-iface-stats
>
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
> bc20d68..a679c5b 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -413,8 +413,17 @@ struct netdev_dpdk {
>
>  PADDED_MEMBERS(CACHE_LINE_SIZE,
>  struct netdev_stats stats;
> -/* Custom stat for retries when unable to transmit. */
> +/* Custom device stats */
> +/* No. of retries when unable to transmit. */
>  uint64_t tx_retries;
> +/* Pkts dropped when unable to transmit; Probably Tx queue is full 
> */
> +uint64_t tx_failure_drops;
> +/* Pkts len greater than max dev MTU */
> +uint64_t tx_mtu_exceeded_drops;
> +/* Pkt drops in egress policier processing */
> +uint64_t tx_qos_drops;
> +/* Pkts drops in ingress policier processing */
> +uint64_t rx_qos_drops;
>  /* Protects stats */
>  rte_spinlock_t stats_lock;
>  /* 4 pad bytes here. */

Padding still needs re-calculation.
Another option is to move all the counters to separate structure like 'struct 
netdev_dpdk_sw_stats' and keep a pointer in 'struct netdev_dpdk'.

Another point is about naming. We, probably, should add some prefix to these 
stats to distinguish them from HW/driver stats and avoid possible collisions.
This could be done here in variable names or while reporting them.
I guess 'netdev_dpdk_' prefix might be suitable to clearly highlight the 
origin of the counter (It's, probably, better to add prefix like this while 
reporting because this is not very useful inside the netdev-dpdk code).
Suggestions are welcome.


> @@ -2171,6 +2180,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
>  struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
>  uint16_t nb_rx = 0;
>  uint16_t dropped = 0;
> +uint16_t qos_drops = 0;
>  int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
>  int vid = netdev_dpdk_get_vid(dev);
>
> @@ -2202,11 +2212,13 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
>  (struct rte_mbuf **) batch->packets,
>  

Re: [ovs-dev] [PATCH v7] Detailed packet drop statistics per dpdk and vhostuser ports

2019-08-30 Thread Sriram Vatala via dev
Hi All,
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 28 August 2019 18:29
To: ovs-dev@openvswitch.org; i.maxim...@samsung.com
Cc: b...@ovn.org; ian.sto...@intel.com; Sriram Vatala

Subject: [PATCH v7] Detailed packet drop statistics per dpdk and vhostuser
ports

OVS may be unable to transmit packets for multiple reasons and today there
is a single counter to track packets dropped due to any of those reasons.
The most common reason is that a VM is unable to read packets fast enough
causing the vhostuser port transmit queue on the OVS side to become full.
This manifests as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the transmit queue is
full will clearly indicate that the problem is on the VM side and not in
OVS. Similarly maintaining separate counters for all possible drops helps in
indicating sensible cause for packet drops.

This patch adds custom stats counters to track packets dropped at port level
and these counters are displayed along with other stats in "ovs-vsctl get
interface  statistics"
command. The detailed stats will be available for both dpdk and vhostuser
ports.

Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c  | 99
+++---
 utilities/bugtool/automake.mk  |  3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats  | 25 ++
 .../bugtool/plugins/network-status/openvswitch.xml |  1 +
 vswitchd/vswitch.xml   | 24 ++
 5 files changed, 139 insertions(+), 13 deletions(-)  create mode 100755
utilities/bugtool/ovs-bugtool-get-iface-stats

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index bc20d68..a679c5b
100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -413,8 +413,17 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
+/* Custom device stats */
+/* No. of retries when unable to transmit. */
 uint64_t tx_retries;
+/* Pkts dropped when unable to transmit; Probably Tx queue is full
*/
+uint64_t tx_failure_drops;
+/* Pkts len greater than max dev MTU */
+uint64_t tx_mtu_exceeded_drops;
+/* Pkt drops in egress policier processing */
+uint64_t tx_qos_drops;
+/* Pkts drops in ingress policier processing */
+uint64_t rx_qos_drops;
 /* Protects stats */
 rte_spinlock_t stats_lock;
 /* 4 pad bytes here. */
@@ -2171,6 +2180,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 uint16_t nb_rx = 0;
 uint16_t dropped = 0;
+uint16_t qos_drops = 0;
 int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
 int vid = netdev_dpdk_get_vid(dev);
 
@@ -2202,11 +2212,13 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 (struct rte_mbuf **) batch->packets,
 nb_rx, true);
 dropped -= nb_rx;
+qos_drops = dropped;
 }
 
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,
  nb_rx, dropped);
+dev->rx_qos_drops += qos_drops;
 rte_spinlock_unlock(>stats_lock);
 
 batch->count = nb_rx;
@@ -2232,6 +2244,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct
dp_packet_batch *batch,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 int nb_rx;
 int dropped = 0;
+int qos_drops = 0;
 
 if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) {
 return EAGAIN;
@@ -2250,12 +2263,14 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct
dp_packet_batch *batch,
 (struct rte_mbuf **) batch->packets,
 nb_rx, true);
 dropped -= nb_rx;
+qos_drops = dropped;
 }
 
 /* Update stats to reflect dropped packets */
 if (OVS_UNLIKELY(dropped)) {
 rte_spinlock_lock(>stats_lock);
 dev->stats.rx_dropped += dropped;
+dev->rx_qos_drops += qos_drops;
 rte_spinlock_unlock(>stats_lock);
 }
 
@@ -2339,6 +2354,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int
qid,
 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
 unsigned int total_pkts = cnt;
 unsigned int dropped = 0;
+unsigned int tx_failure;
+unsigned int mtu_drops;
+unsigned int qos_drops;
 int i, retries = 0;
 int max_retries = VHOST_ENQ_RETRY_MIN;
 int vid = netdev_dpdk_get_vid(dev); @@ -2356,9 +2374,12 @@
__netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 rte_spinlock_lock(>tx_q[qid].tx_lock);
 
 cnt = netdev_dpdk_filter_pa

[ovs-dev] [PATCH v7] Detailed packet drop statistics per dpdk and vhostuser ports

2019-08-28 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons and
today there is a single counter to track packets dropped due to
any of those reasons. The most common reason is that a VM is
unable to read packets fast enough causing the vhostuser port
transmit queue on the OVS side to become full. This manifests
as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the
transmit queue is full will clearly indicate that the problem
is on the VM side and not in OVS. Similarly maintaining separate
counters for all possible drops helps in indicating sensible
cause for packet drops.

This patch adds custom stats counters to track packets dropped
at port level and these counters are displayed along with
other stats in "ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and
vhostuser ports.

Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c  | 99 +++---
 utilities/bugtool/automake.mk  |  3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats  | 25 ++
 .../bugtool/plugins/network-status/openvswitch.xml |  1 +
 vswitchd/vswitch.xml   | 24 ++
 5 files changed, 139 insertions(+), 13 deletions(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-iface-stats

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index bc20d68..a679c5b 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -413,8 +413,17 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
+/* Custom device stats */
+/* No. of retries when unable to transmit. */
 uint64_t tx_retries;
+/* Pkts dropped when unable to transmit; Probably Tx queue is full */
+uint64_t tx_failure_drops;
+/* Pkts len greater than max dev MTU */
+uint64_t tx_mtu_exceeded_drops;
+/* Pkt drops in egress policier processing */
+uint64_t tx_qos_drops;
+/* Pkts drops in ingress policier processing */
+uint64_t rx_qos_drops;
 /* Protects stats */
 rte_spinlock_t stats_lock;
 /* 4 pad bytes here. */
@@ -2171,6 +2180,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 uint16_t nb_rx = 0;
 uint16_t dropped = 0;
+uint16_t qos_drops = 0;
 int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
 int vid = netdev_dpdk_get_vid(dev);
 
@@ -2202,11 +2212,13 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 (struct rte_mbuf **) batch->packets,
 nb_rx, true);
 dropped -= nb_rx;
+qos_drops = dropped;
 }
 
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,
  nb_rx, dropped);
+dev->rx_qos_drops += qos_drops;
 rte_spinlock_unlock(>stats_lock);
 
 batch->count = nb_rx;
@@ -2232,6 +2244,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct 
dp_packet_batch *batch,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 int nb_rx;
 int dropped = 0;
+int qos_drops = 0;
 
 if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) {
 return EAGAIN;
@@ -2250,12 +2263,14 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct 
dp_packet_batch *batch,
 (struct rte_mbuf **) batch->packets,
 nb_rx, true);
 dropped -= nb_rx;
+qos_drops = dropped;
 }
 
 /* Update stats to reflect dropped packets */
 if (OVS_UNLIKELY(dropped)) {
 rte_spinlock_lock(>stats_lock);
 dev->stats.rx_dropped += dropped;
+dev->rx_qos_drops += qos_drops;
 rte_spinlock_unlock(>stats_lock);
 }
 
@@ -2339,6 +2354,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
 unsigned int total_pkts = cnt;
 unsigned int dropped = 0;
+unsigned int tx_failure;
+unsigned int mtu_drops;
+unsigned int qos_drops;
 int i, retries = 0;
 int max_retries = VHOST_ENQ_RETRY_MIN;
 int vid = netdev_dpdk_get_vid(dev);
@@ -2356,9 +2374,12 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 rte_spinlock_lock(>tx_q[qid].tx_lock);
 
 cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
+mtu_drops = total_pkts - cnt;
+qos_drops = cnt;
 /* Check has QoS has been configured for the netdev */
 cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
-dropped = total_pkts - cnt;
+qos_drops -= cnt;
+dropped = qos_drops + mtu_drops;
 
 do {
 int vhost

Re: [ovs-dev] [PATCH v6] Detailed packet drop statistics per dpdk and vhostuser ports

2019-08-28 Thread Sriram Vatala via dev
Hi All,

The patch " netdev-dpdk: Refactor vhost custom stats for extensibility" by
Ilya is merged to master. So I will rebase and send updated patch to avoid
any git conflicts.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 26 August 2019 18:29
To: ovs-dev@openvswitch.org; i.maxim...@samsung.com
Cc: b...@ovn.org; ian.sto...@intel.com; Sriram Vatala

Subject: [PATCH v6] Detailed packet drop statistics per dpdk and vhostuser
ports

OVS may be unable to transmit packets for multiple reasons and today there
is a single counter to track packets dropped due to any of those reasons.
The most common reason is that a VM is unable to read packets fast enough
causing the vhostuser port transmit queue on the OVS side to become full.
This manifests as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the transmit queue is
full will clearly indicate that the problem is on the VM side and not in
OVS. Similarly maintaining separate counters for all possible drops helps in
indicating sensible cause for packet drops.

This patch adds custom stats counters to track packets dropped at port level
and these counters are displayed along with other stats in "ovs-vsctl get
interface  statistics"
command. The detailed stats will be available for both dpdk and vhostuser
ports.

Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c  | 120
++---
 utilities/bugtool/automake.mk  |   3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats  |  25 +
 .../bugtool/plugins/network-status/openvswitch.xml |   1 +
 vswitchd/vswitch.xml   |  24 +
 5 files changed, 157 insertions(+), 16 deletions(-)  create mode 100755
utilities/bugtool/ovs-bugtool-get-iface-stats

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index 4805783..6685f32
100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -447,8 +447,14 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
+/* Counters for Custom device stats */
+/* No. of retries when unable to transmit. */
 uint64_t tx_retries;
+/* Pkts left untransmitted in Tx buffers. Probably Tx Que is full
*/
+uint64_t tx_failure_drops;
+uint64_t tx_mtu_exceeded_drops;
+uint64_t tx_qos_drops;
+uint64_t rx_qos_drops;
 /* Protects stats */
 rte_spinlock_t stats_lock;
 /* 4 pad bytes here. */
@@ -2205,6 +2211,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 uint16_t nb_rx = 0;
 uint16_t dropped = 0;
+uint16_t qos_drops = 0;
 int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
 int vid = netdev_dpdk_get_vid(dev);
 
@@ -2236,11 +2243,13 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 (struct rte_mbuf **) batch->packets,
 nb_rx, true);
 dropped -= nb_rx;
+qos_drops = dropped;
 }
 
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,
  nb_rx, dropped);
+dev->rx_qos_drops += qos_drops;
 rte_spinlock_unlock(>stats_lock);
 
 batch->count = nb_rx;
@@ -2266,6 +2275,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct
dp_packet_batch *batch,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 int nb_rx;
 int dropped = 0;
+int qos_drops = 0;
 
 if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) {
 return EAGAIN;
@@ -2284,12 +2294,14 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct
dp_packet_batch *batch,
 (struct rte_mbuf **) batch->packets,
 nb_rx, true);
 dropped -= nb_rx;
+qos_drops = dropped;
 }
 
 /* Update stats to reflect dropped packets */
 if (OVS_UNLIKELY(dropped)) {
 rte_spinlock_lock(>stats_lock);
 dev->stats.rx_dropped += dropped;
+dev->rx_qos_drops += qos_drops;
 rte_spinlock_unlock(>stats_lock);
 }
 
@@ -2373,6 +2385,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int
qid,
 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
 unsigned int total_pkts = cnt;
 unsigned int dropped = 0;
+unsigned int tx_failure;
+unsigned int mtu_drops;
+unsigned int qos_drops;
 int i, retries = 0;
 int max_retries = VHOST_ENQ_RETRY_MIN;
 int vid = netdev_dpdk_get_vid(dev); @@ -2390,9 +2405,12 @@
__netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 rte_spinlock_lock(>tx_q[qid].tx_lock);
 
 cnt = netdev_dpdk_filter_packet_len(dev

[ovs-dev] [PATCH v6] Detailed packet drop statistics per dpdk and vhostuser ports

2019-08-26 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons and
today there is a single counter to track packets dropped due to
any of those reasons. The most common reason is that a VM is
unable to read packets fast enough causing the vhostuser port
transmit queue on the OVS side to become full. This manifests
as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the
transmit queue is full will clearly indicate that the problem
is on the VM side and not in OVS. Similarly maintaining separate
counters for all possible drops helps in indicating sensible
cause for packet drops.

This patch adds custom stats counters to track packets dropped
at port level and these counters are displayed along with
other stats in "ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and
vhostuser ports.

Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c  | 120 ++---
 utilities/bugtool/automake.mk  |   3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats  |  25 +
 .../bugtool/plugins/network-status/openvswitch.xml |   1 +
 vswitchd/vswitch.xml   |  24 +
 5 files changed, 157 insertions(+), 16 deletions(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-iface-stats

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 4805783..6685f32 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -447,8 +447,14 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
+/* Counters for Custom device stats */
+/* No. of retries when unable to transmit. */
 uint64_t tx_retries;
+/* Pkts left untransmitted in Tx buffers. Probably Tx Que is full */
+uint64_t tx_failure_drops;
+uint64_t tx_mtu_exceeded_drops;
+uint64_t tx_qos_drops;
+uint64_t rx_qos_drops;
 /* Protects stats */
 rte_spinlock_t stats_lock;
 /* 4 pad bytes here. */
@@ -2205,6 +2211,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 uint16_t nb_rx = 0;
 uint16_t dropped = 0;
+uint16_t qos_drops = 0;
 int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
 int vid = netdev_dpdk_get_vid(dev);
 
@@ -2236,11 +2243,13 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 (struct rte_mbuf **) batch->packets,
 nb_rx, true);
 dropped -= nb_rx;
+qos_drops = dropped;
 }
 
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,
  nb_rx, dropped);
+dev->rx_qos_drops += qos_drops;
 rte_spinlock_unlock(>stats_lock);
 
 batch->count = nb_rx;
@@ -2266,6 +2275,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct 
dp_packet_batch *batch,
 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
 int nb_rx;
 int dropped = 0;
+int qos_drops = 0;
 
 if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) {
 return EAGAIN;
@@ -2284,12 +2294,14 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct 
dp_packet_batch *batch,
 (struct rte_mbuf **) batch->packets,
 nb_rx, true);
 dropped -= nb_rx;
+qos_drops = dropped;
 }
 
 /* Update stats to reflect dropped packets */
 if (OVS_UNLIKELY(dropped)) {
 rte_spinlock_lock(>stats_lock);
 dev->stats.rx_dropped += dropped;
+dev->rx_qos_drops += qos_drops;
 rte_spinlock_unlock(>stats_lock);
 }
 
@@ -2373,6 +2385,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
 unsigned int total_pkts = cnt;
 unsigned int dropped = 0;
+unsigned int tx_failure;
+unsigned int mtu_drops;
+unsigned int qos_drops;
 int i, retries = 0;
 int max_retries = VHOST_ENQ_RETRY_MIN;
 int vid = netdev_dpdk_get_vid(dev);
@@ -2390,9 +2405,12 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 rte_spinlock_lock(>tx_q[qid].tx_lock);
 
 cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
+mtu_drops = total_pkts - cnt;
+qos_drops = cnt;
 /* Check has QoS has been configured for the netdev */
 cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
-dropped = total_pkts - cnt;
+qos_drops -= cnt;
+dropped = qos_drops + mtu_drops;
 
 do {
 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
@@ -2417,12 +2435,16 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 }
 } while (cnt && (retries++

Re: [ovs-dev] [PATCH v5] Detailed packet drop statistics per dpdk and vhostuser ports

2019-08-23 Thread Sriram Vatala via dev


-Original Message-
From: Ilya Maximets  
Sent: 06 August 2019 22:02
To: Sriram Vatala ; 'Ben Pfaff'
; 'Stokes, Ian' 
Cc: ovs-dev@openvswitch.org
Subject: Re: [PATCH v5] Detailed packet drop statistics per dpdk and
vhostuser ports

On 24.07.2019 11:55, Sriram Vatala wrote:
> Hi,
> 
> @Ben : Thanks for the response.
> 
> @Ilya, @Ian : Can you please review the patch and provide comments if any.

Hi.
Thanks for working on this!

One thing about the patch is that it modifies the hot path, thus needs a
performance evaluation before applying. I hope to have some time for it this
week. Have you tested performance difference with and without this patch?

>>>> Sorry for late reply. I have tested the performance without and with
this patch. I had tested performance for various combinations of  packet
sizes and flows like 64 B , 128 B, 256 B, 512 B packets each with 1, 10, 400
,1000, 1 and 100 flows. I tested these combinations with UDP traffic
and VxLan traffic. There is no degradation observed in test cases. 

Also, I see is that you're inserting fairly big array into PADDED_MEMBERS
block. There are few issues with that:

1. There is no need to store the whole 'struct netdev_custom_counter' for
   each netdev instance because names takes 64 bytes each and they are
   same for each netdev instance anyway.

2. You're not paying attention to the amount of pad bytes in this section
   after the change. I suspect a big hole in the structure here.

>>>> I have checked your recent patch in the mailing list, implementation
for stats fetching ("Refactor vhost custom stats for extensibility"). I will
adopt the same implementation for fetching the dpdk/vhost custom stats. Will
send the updated patch v6. Thanks for your comments.

Thanks & Regards,
Sriram.

   Regarding this issue, actually, I'd like to remove this cacheline
   alignments completely from the structure (it had no performance impact
   for me previously), but it's a different change and there was no active
   support from the community when I wanted to do that few years ago.
   However, there was no strong objections too.
   Ian, do you have some thought about this?

Best regards, Ilya Maximets.

> 
> Thanks,
> Sriram.
> 
> -Original Message-----
> From: Ben Pfaff 
> Sent: 22 July 2019 21:37
> To: Sriram Vatala 
> Cc: ovs-dev@openvswitch.org
> Subject: Re: [PATCH v5] Detailed packet drop statistics per dpdk and 
> vhostuser ports
> 
> On Mon, Jul 22, 2019 at 03:31:53PM +0530, Sriram Vatala wrote:
>> OVS may be unable to transmit packets for multiple reasons and today 
>> there is a single counter to track packets dropped due to any of 
>> those reasons. The most common reason is that a VM is unable to read 
>> packets fast enough causing the vhostuser port transmit queue on the 
>> OVS side to become full. This manifests as a problem with VNFs not 
>> receiving all packets. Having a separate drop counter to track 
>> packets dropped because the transmit queue is full will clearly 
>> indicate that the problem is on the VM side and not in OVS. Similarly 
>> maintaining separate counters for all possible drops helps in 
>> indicating sensible cause for packet drops.
>>
>> This patch adds counters for custom stats to track packets dropped at 
>> port level and these stats are displayed along with other stats in 
>> "ovs-vsctl get interface  statistics"
>> command. The detailed stats will be available for both dpdk and 
>> vhostuser ports.
>>
>> Signed-off-by: Sriram Vatala 
> 
> Thanks for the revision!  I'm happy with the bits that are important to
me.
> I'll leave the final review to Ian or Ilya.
> 
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


Re: [ovs-dev] [PATCH 2/2] netdev-dpdk: Refactor vhost custom stats for extensibility.

2019-08-19 Thread Sriram Vatala via dev

-Original Message-
From: Ilya Maximets 
Sent: 09 August 2019 17:44
To: ovs-dev@openvswitch.org
Cc: Ian Stokes ; Kevin Traynor ; 
David Marchand ; Sriram Vatala 
; Ilya Maximets 
Subject: [PATCH 2/2] netdev-dpdk: Refactor vhost custom stats for 
extensibility.

vHost interfaces currently has only one custom statistic, but there might be 
others in the near future. This refactoring makes the code work in the same 
way as it done for dpdk and afxdp stats to keep the common style over the 
different code places and makes it easily extensible for the new stats 
addition.

Signed-off-by: Ilya Maximets 
---
 lib/netdev-dpdk.c | 33 ++---
 1 file changed, 22 insertions(+), 11 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index 52ecf7576..90aabe3fb 
100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -110,12 +110,6 @@ BUILD_ASSERT_DECL(MAX_NB_MBUF % 
ROUND_DOWN_POW2(MAX_NB_MBUF / MIN_NB_MBUF)  BUILD_ASSERT_DECL((MAX_NB_MBUF / 
ROUND_DOWN_POW2(MAX_NB_MBUF / MIN_NB_MBUF))
   % MP_CACHE_SZ == 0);

-/* Size of vHost custom stats. */
-#define VHOST_CUSTOM_STATS_SIZE  1
-
-/* Names of vHost custom stats. */
-#define VHOST_STAT_TX_RETRIES"tx_retries"
-
 #define SOCKET0  0

 /* Default size of Physical NIC RXQ */
@@ -2827,17 +2821,34 @@ netdev_dpdk_vhost_get_custom_stats(const struct netdev 
*netdev,
struct netdev_custom_stats *custom_stats) 
{
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+int i;

-ovs_mutex_lock(>mutex);
+#define VHOST_CSTATS \
+VHOST_CSTAT(tx_retries)

-custom_stats->size = VHOST_CUSTOM_STATS_SIZE;
+#define VHOST_CSTAT(NAME) + 1
+custom_stats->size = VHOST_CSTATS;
+#undef VHOST_CSTAT
 custom_stats->counters = xcalloc(custom_stats->size,
  sizeof *custom_stats->counters);
-ovs_strlcpy(custom_stats->counters[0].name, VHOST_STAT_TX_RETRIES,
-NETDEV_CUSTOM_STATS_NAME_SIZE);
+
+for (i = 0; i < custom_stats->size; i++) { #define
+VHOST_CSTAT(NAME) \
+ovs_strlcpy(custom_stats->counters[i].name, #NAME, \
+NETDEV_CUSTOM_STATS_NAME_SIZE);
+VHOST_CSTATS;
+#undef VHOST_CSTAT
+}
+
+ovs_mutex_lock(>mutex);

>>>In case of multiple VHOST_CSTAT entries declared under VHOST_CTATS, inside 
>>>the for loop, VHOST_CSTATS would expand to string copy  for each entry with 
>>>same index resulting in only updating  the last entry under   VHOST_CSTATS. 
>>>So for all entries in custom_stats, counters field  will be same.

 rte_spinlock_lock(>stats_lock);
-custom_stats->counters[0].value = dev->tx_retries;
+for (i = 0; i < custom_stats->size; i++) { #define
+VHOST_CSTAT(NAME) \
+custom_stats->counters[i].value = dev->NAME;
+VHOST_CSTATS;
+#undef VHOST_CSTAT
+}
 rte_spinlock_unlock(>stats_lock);

>>> Here also "value" member for all  custom_stats  entries will be updated to 
>>> same count in case of multiple VHOST_CSTAT entries.
 ovs_mutex_unlock(>mutex);
--
2.17.1

Thanks for working on generic implementation for updating custom stats.

Thanks & Regards,
Sriram.
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


Re: [ovs-dev] [PATCH v5] Detailed packet drop statistics per dpdk and vhostuser ports

2019-08-04 Thread Sriram Vatala via dev
Please consider this as a gentle remainder.

Thanks,
Sriram.

-Original Message-
From: ovs-dev-boun...@openvswitch.org  On
Behalf Of Sriram Vatala via dev
Sent: 24 July 2019 14:26
To: 'Ben Pfaff' ; 'Ilya Maximets' ;
'Stokes, Ian' 
Cc: ovs-dev@openvswitch.org
Subject: Re: [ovs-dev] [PATCH v5] Detailed packet drop statistics per dpdk
and vhostuser ports

Hi,

@Ben : Thanks for the response.

@Ilya, @Ian : Can you please review the patch and provide comments if any.

Thanks,
Sriram.

-Original Message-
From: Ben Pfaff 
Sent: 22 July 2019 21:37
To: Sriram Vatala 
Cc: ovs-dev@openvswitch.org
Subject: Re: [PATCH v5] Detailed packet drop statistics per dpdk and
vhostuser ports

On Mon, Jul 22, 2019 at 03:31:53PM +0530, Sriram Vatala wrote:
> OVS may be unable to transmit packets for multiple reasons and today 
> there is a single counter to track packets dropped due to any of those 
> reasons. The most common reason is that a VM is unable to read packets 
> fast enough causing the vhostuser port transmit queue on the OVS side 
> to become full. This manifests as a problem with VNFs not receiving 
> all packets. Having a separate drop counter to track packets dropped 
> because the transmit queue is full will clearly indicate that the 
> problem is on the VM side and not in OVS. Similarly maintaining 
> separate counters for all possible drops helps in indicating sensible 
> cause for packet drops.
> 
> This patch adds counters for custom stats to track packets dropped at 
> port level and these stats are displayed along with other stats in 
> "ovs-vsctl get interface  statistics"
> command. The detailed stats will be available for both dpdk and 
> vhostuser ports.
> 
> Signed-off-by: Sriram Vatala 

Thanks for the revision!  I'm happy with the bits that are important to me.
I'll leave the final review to Ian or Ilya.
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


Re: [ovs-dev] [PATCH v5] Detailed packet drop statistics per dpdk and vhostuser ports

2019-07-24 Thread Sriram Vatala via dev
Hi,

@Ben : Thanks for the response.

@Ilya, @Ian : Can you please review the patch and provide comments if any.

Thanks,
Sriram.

-Original Message-
From: Ben Pfaff  
Sent: 22 July 2019 21:37
To: Sriram Vatala 
Cc: ovs-dev@openvswitch.org
Subject: Re: [PATCH v5] Detailed packet drop statistics per dpdk and
vhostuser ports

On Mon, Jul 22, 2019 at 03:31:53PM +0530, Sriram Vatala wrote:
> OVS may be unable to transmit packets for multiple reasons and today 
> there is a single counter to track packets dropped due to any of those 
> reasons. The most common reason is that a VM is unable to read packets 
> fast enough causing the vhostuser port transmit queue on the OVS side 
> to become full. This manifests as a problem with VNFs not receiving 
> all packets. Having a separate drop counter to track packets dropped 
> because the transmit queue is full will clearly indicate that the 
> problem is on the VM side and not in OVS. Similarly maintaining 
> separate counters for all possible drops helps in indicating sensible 
> cause for packet drops.
> 
> This patch adds counters for custom stats to track packets dropped at 
> port level and these stats are displayed along with other stats in 
> "ovs-vsctl get interface  statistics"
> command. The detailed stats will be available for both dpdk and 
> vhostuser ports.
> 
> Signed-off-by: Sriram Vatala 

Thanks for the revision!  I'm happy with the bits that are important to me.
I'll leave the final review to Ian or Ilya.
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


[ovs-dev] [PATCH v5] Detailed packet drop statistics per dpdk and vhostuser ports

2019-07-22 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons and
today there is a single counter to track packets dropped due to
any of those reasons. The most common reason is that a VM is
unable to read packets fast enough causing the vhostuser port
transmit queue on the OVS side to become full. This manifests
as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the
transmit queue is full will clearly indicate that the problem
is on the VM side and not in OVS. Similarly maintaining separate
counters for all possible drops helps in indicating sensible
cause for packet drops.

This patch adds counters for custom stats to track packets dropped
at port level and these stats are displayed along with
other stats in "ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and
vhostuser ports.

Signed-off-by: Sriram Vatala 
---
 lib/netdev-dpdk.c  | 132 +++--
 utilities/bugtool/automake.mk  |   3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats  |  25 
 .../bugtool/plugins/network-status/openvswitch.xml |   1 +
 vswitchd/vswitch.xml   |  24 
 5 files changed, 150 insertions(+), 35 deletions(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-iface-stats

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 4805783..5f1ff7a 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -138,11 +138,14 @@ BUILD_ASSERT_DECL((MAX_NB_MBUF / 
ROUND_DOWN_POW2(MAX_NB_MBUF / MIN_NB_MBUF))
 #define XSTAT_RX_FRAGMENTED_ERRORS   "rx_fragmented_errors"
 #define XSTAT_RX_JABBER_ERRORS   "rx_jabber_errors"
 
-/* Size of vHost custom stats. */
-#define VHOST_CUSTOM_STATS_SIZE  1
+static const char *custom_stats_names[] = {"tx_retries",
+   "tx_failure_drops",
+   "tx_mtu_exceeded_drops",
+   "tx_qos_drops",
+   "rx_qos_drops"};
+#define CUSTOM_STATS_SIZE 5
 
-/* Names of vHost custom stats. */
-#define VHOST_STAT_TX_RETRIES"tx_retries"
+enum {TX_RETRIES, TX_FAILURE_DROPS, TX_MTU_DROPS, TX_QOS_DROPS, RX_QOS_DROPS};
 
 #define SOCKET0  0
 
@@ -447,8 +450,8 @@ struct netdev_dpdk {
 
 PADDED_MEMBERS(CACHE_LINE_SIZE,
 struct netdev_stats stats;
-/* Custom stat for retries when unable to transmit. */
-uint64_t tx_retries;
+/* Custom device statisitcs */
+struct netdev_custom_counter custom_stats[CUSTOM_STATS_SIZE];
 /* Protects stats */
 rte_spinlock_t stats_lock;
 /* 4 pad bytes here. */
@@ -1153,6 +1156,7 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
  enum dpdk_dev_type type, int socket_id)
 OVS_REQUIRES(dpdk_mutex)
 {
+int i;
 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
 
 ovs_mutex_init(>mutex);
@@ -1190,6 +1194,13 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
 /* Initialize the flow control to NULL */
 memset(>fc_conf, 0, sizeof dev->fc_conf);
 
+/* Initialize the custom stats to zeros */
+memset(dev->custom_stats, 0, sizeof dev->custom_stats);
+
+for (i = 0; i < CUSTOM_STATS_SIZE; i++) {
+ ovs_strlcpy(dev->custom_stats[i].name,
+ custom_stats_names[i], NETDEV_CUSTOM_STATS_NAME_SIZE);
+}
 /* Initilize the hardware offload flags to 0 */
 dev->hw_ol_features = 0;
 
@@ -1205,7 +1216,6 @@ common_construct(struct netdev *netdev, dpdk_port_t 
port_no,
 dev->rte_xstats_ids = NULL;
 dev->rte_xstats_ids_size = 0;
 
-dev->tx_retries = 0;
 
 return 0;
 }
@@ -2160,7 +2170,7 @@ netdev_dpdk_vhost_update_rx_size_counters(struct 
netdev_stats *stats,
 }
 
 static inline void
-netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
+netdev_dpdk_vhost_update_rx_counters(struct netdev_dpdk *dev,
  struct dp_packet **packets, int count,
  int dropped)
 {
@@ -2168,8 +2178,11 @@ netdev_dpdk_vhost_update_rx_counters(struct netdev_stats 
*stats,
 unsigned int packet_size;
 struct dp_packet *packet;
 
+struct netdev_stats *stats = >stats;
 stats->rx_packets += count;
 stats->rx_dropped += dropped;
+dev->custom_stats[RX_QOS_DROPS].value += dropped;
+
 for (i = 0; i < count; i++) {
 packet = packets[i];
 packet_size = dp_packet_size(packet);
@@ -2239,7 +2252,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
 }
 
 rte_spinlock_lock(>stats_lock);
-netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets,
+netdev_dpdk_vhost_update_rx_c

Re: [ovs-dev] [PATCH v4] Detailed packet drop statistics per dpdk and vhostuser ports

2019-07-17 Thread Sriram Vatala via dev
Hi Ben,

Thanks for reviewing the patch. As per your suggestion, I will use use
net_dev_custom_stats to fetch the new statistics counters and document them
in vswitchd/vswitchd.xml.

Thanks,
Sriram.

-Original Message-
From: Ben Pfaff  
Sent: 17 July 2019 01:21
To: Sriram Vatala 
Cc: ovs-dev@openvswitch.org
Subject: Re: [ovs-dev] [PATCH v4] Detailed packet drop statistics per dpdk
and vhostuser ports

On Tue, Jul 16, 2019 at 07:02:29PM +0530, Sriram Vatala via dev wrote:
> OVS may be unable to transmit packets for multiple reasons and today 
> there is a single counter to track packets dropped due to any of those 
> reasons. The most common reason is that a VM is unable to read packets 
> fast enough causing the vhostuser port transmit queue on the OVS side 
> to become full. This manifests as a problem with VNFs not receiving 
> all packets. Having a separate drop counter to track packets dropped 
> because the transmit queue is full will clearly indicate that the 
> problem is on the VM side and not in OVS. Similarly maintaining 
> separate counters for all possible drops helps in indicating sensible 
> cause for packet drops.
> 
> This patch adds counters to track packets dropped due to all possible 
> reasons and these counters are displayed along with other stats in 
> "ovs-vsctl get interface  statistics"
> command. The detailed stats will be available for both dpdk and 
> vhostuser ports.

Did you consider using netdev_get_custom_stats() for these counters?  I do
not know whether they are likely to be implemented by other network devices,
and custom stats are an appropriate way to implement specialized statistics.

> Following are the details of the new counters :
> 
> tx_failed_drops : Sometimes DPDK physical/vHost port transmit API 
> fails to send all/some packets. These untransmited packets are 
> dropped.The most likely reason for this to happen is because of 
> transmit queue overrun. Besides transmit queue overrun, there are 
> other unlikely reasons such as invalid queue id etc.
> 
> tx_mtu_exceeded_drops : These are the packets dropped due to MTU 
> mismatch (i.e Pkt len > Max Dev MTU).
> 
> tx_qos_drops/rx_qos_drops : These are the packets dropped due to 
> transmission/reception rate exceeding the configured Egress/Ingress 
> policer rate on the interface.

It would make sense to include the above descriptions in
vswitchd/vswitch.xml alongside the other statistics under the "Statistics"
group for the Interface table.

Thanks for working to make OVS better!
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


[ovs-dev] [PATCH v4] Detailed packet drop statistics per dpdk and vhostuser ports

2019-07-16 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons and
today there is a single counter to track packets dropped due to
any of those reasons. The most common reason is that a VM is
unable to read packets fast enough causing the vhostuser port
transmit queue on the OVS side to become full. This manifests
as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the
transmit queue is full will clearly indicate that the problem
is on the VM side and not in OVS. Similarly maintaining separate
counters for all possible drops helps in indicating sensible
cause for packet drops.

This patch adds counters to track packets dropped due to all
possible reasons and these counters are displayed along with
other stats in "ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and
vhostuser ports.

Following are the details of the new counters :

tx_failed_drops : Sometimes DPDK physical/vHost port transmit
API fails to send all/some packets. These untransmited packets
are dropped.The most likely reason for this to happen is
because of transmit queue overrun. Besides transmit queue
overrun, there are other unlikely reasons such as invalid
queue id etc.

tx_mtu_exceeded_drops : These are the packets dropped due
to MTU mismatch (i.e Pkt len > Max Dev MTU).

tx_qos_drops/rx_qos_drops : These are the packets dropped due
to transmission/reception rate exceeding the configured
Egress/Ingress policer rate on the interface.

Signed-off-by: Sriram Vatala 
---
 Documentation/topics/dpdk/bridge.rst   | 28 +++
 include/openvswitch/netdev.h   |  9 
 lib/netdev-dpdk.c  | 56 ++
 utilities/bugtool/automake.mk  |  3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats  | 25 ++
 .../bugtool/plugins/network-status/openvswitch.xml |  1 +
 vswitchd/bridge.c  |  6 ++-
 7 files changed, 118 insertions(+), 10 deletions(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-iface-stats

diff --git a/Documentation/topics/dpdk/bridge.rst 
b/Documentation/topics/dpdk/bridge.rst
index a3ed926..43080c2 100644
--- a/Documentation/topics/dpdk/bridge.rst
+++ b/Documentation/topics/dpdk/bridge.rst
@@ -74,6 +74,34 @@ OpenFlow14`` option::
 
 $ ovs-ofctl -O OpenFlow14 dump-ports br0
 
+Stats counter to track packet drops at Bridge port level
+
+
+Detail statistics counters for transmit dropped packets and receive
+dropped packets are implemented and supported only for DPDK physical
+and vHost ports.
+
+Following are the details of these counters :
+
+tx_failure_drops : Sometimes DPDK transmit API for physical/vHost ports
+may fail to transmit all/some packets in its packet buffer which is
+passed as input argument to the dpdk xmit API. These untransmitted
+packets are dropped. The most likely reason for this to happens is when
+the transmit queue is full or has been filled up. There are other
+unlikely reasons such as invalid Tx queue id etc.
+
+tx_mtu_exceeded_drops : These are the packets dropped due to MTU mismatch
+(i.e Pkt len > Max Dev MTU).
+
+tx_qos_drops/rx_qos_drops : These are the packets dropped due to
+transmission/reception rate exceeding the
+configured Egress/Ingress policer rate on the interface.
+
+These statistic counters can be viewed with the following commands::
+
+$ ovs-vsctl get interface  statistics
+$ ovs-vsctl list interface
+
 EMC Insertion Probability
 -
 
diff --git a/include/openvswitch/netdev.h b/include/openvswitch/netdev.h
index 0c10f7b..7851736 100644
--- a/include/openvswitch/netdev.h
+++ b/include/openvswitch/netdev.h
@@ -61,6 +61,15 @@ struct netdev_stats {
 uint64_t tx_heartbeat_errors;
 uint64_t tx_window_errors;
 
+/* Detailed receive drops. */
+uint64_t rx_qos_drops;  /* rx rate exceeded conf'd qos rate */
+
+/* Detailed transmit drops. */
+uint64_t tx_failure_drops;  /* Dpdk tx failure, probably tx
+ * queue is full. */
+uint64_t tx_qos_drops;  /* tx rate exceeded conf'd qos rate */
+uint64_t tx_mtu_drops;  /* tx pkt len exceeded max dev MTU */
+
 /* Extended statistics based on RFC2819. */
 uint64_t rx_1_to_64_packets;
 uint64_t rx_65_to_127_packets;
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 4805783..3f22701 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -68,6 +68,7 @@
 #include "uuid.h"
 
 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
+enum {QOS_DROPS, MTU_DROPS, TX_FAILURE_DROPS, MAX_DROPS};
 
 VLOG_DEFINE_THIS_MODULE(netdev_dpdk);
 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
@@ -2170,6 +2171,7 @@ netdev_dpdk_vhost_update_rx_counters(struct netdev_stats 
*stats,
 
 stats->rx_packets += count;

[ovs-dev] [PATCH v3] Detailed packet drop statistics per dpdk and vhostuser ports

2019-07-15 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons and
today there is a single counter to track packets dropped due to
any of those reasons. The most common reason is that a VM is
unable to read packets fast enough causing the vhostuser port
transmit queue on the OVS side to become full. This manifests
as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the
transmit queue is full will clearly indicate that the problem
is on the VM side and not in OVS. Similarly maintaining separate
counters for all possible drops helps in indicating sensible
cause for packet drops.

This patch adds counters to track packets dropped due to all
possible reasons and these counters are displayed along with
other stats in "ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and
vhostuser ports.

Following are the details of the new counters :

tx_failed_drops : Sometimes DPDK physical/vHost port transmit
API fails to send all/some packets. These untransmited packets
are dropped.The most likely reason for this to happen is
because of transmit queue overrun. Besides transmit queue
overrun, there are other unlikely reasons such as invalid
queue id etc.

tx_mtu_exceeded_drops : These are the packets dropped due
to MTU mismatch (i.e Pkt len > Max Dev MTU).

tx_qos_drops/rx_qos_drops : These are the packets dropped due
to transmission/reception rate exceeding the configured
Egress/Ingress policer rate on the interface.

Signed-off-by: Sriram Vatala 
---
 Documentation/topics/dpdk/bridge.rst   | 25 ++
 include/openvswitch/netdev.h   |  9 
 lib/netdev-dpdk.c  | 56 ++
 utilities/bugtool/automake.mk  |  3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats  | 25 ++
 .../bugtool/plugins/network-status/openvswitch.xml |  1 +
 vswitchd/bridge.c  |  6 ++-
 7 files changed, 115 insertions(+), 10 deletions(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-iface-stats

diff --git a/Documentation/topics/dpdk/bridge.rst 
b/Documentation/topics/dpdk/bridge.rst
index a3ed926..df87c7a 100644
--- a/Documentation/topics/dpdk/bridge.rst
+++ b/Documentation/topics/dpdk/bridge.rst
@@ -74,6 +74,31 @@ OpenFlow14`` option::
 
 $ ovs-ofctl -O OpenFlow14 dump-ports br0
 
+Detail statistics counters for transmit dropped packets and receive
+dropped packets are implemented and supported only for DPDK physical
+and vHost ports.
+
+Following are the details of the new counters :
+
+tx_failure_drops : Sometimes DPDK transmit API for physical/vHost ports
+   may fail to transmit all/some packets in its packet
+buffer which is passed as input argument to the dpdk xmit API. These
+untransmitted packets are dropped. The most likely reason for this to
+happens is when the transmit queue is full or has been filled up.
+There are other unlikely reasons such as invalid Tx queue id etc.
+
+tx_mtu_exceeded_drops : These are the packets dropped due to MTU mismatch
+(i.e Pkt len > Max Dev MTU).
+
+tx_qos_drops/rx_qos_drops : These are the packets dropped due to
+transmission/reception rate exceeding the
+configured Egress/Ingress policer rate on the interface.
+
+These statistic counters can be viewed with the following commands:
+
+$ ovs-vsctl get interface  statistics
+$ ovs-vsctl list interface
+
 EMC Insertion Probability
 -
 
diff --git a/include/openvswitch/netdev.h b/include/openvswitch/netdev.h
index 0c10f7b..7851736 100644
--- a/include/openvswitch/netdev.h
+++ b/include/openvswitch/netdev.h
@@ -61,6 +61,15 @@ struct netdev_stats {
 uint64_t tx_heartbeat_errors;
 uint64_t tx_window_errors;
 
+/* Detailed receive drops. */
+uint64_t rx_qos_drops;  /* rx rate exceeded conf'd qos rate */
+
+/* Detailed transmit drops. */
+uint64_t tx_failure_drops;  /* Dpdk tx failure, probably tx
+ * queue is full. */
+uint64_t tx_qos_drops;  /* tx rate exceeded conf'd qos rate */
+uint64_t tx_mtu_drops;  /* tx pkt len exceeded max dev MTU */
+
 /* Extended statistics based on RFC2819. */
 uint64_t rx_1_to_64_packets;
 uint64_t rx_65_to_127_packets;
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 4805783..3f22701 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -68,6 +68,7 @@
 #include "uuid.h"
 
 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
+enum {QOS_DROPS, MTU_DROPS, TX_FAILURE_DROPS, MAX_DROPS};
 
 VLOG_DEFINE_THIS_MODULE(netdev_dpdk);
 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
@@ -2170,6 +2171,7 @@ netdev_dpdk_vhost_update_rx_counters(struct netdev_stats 
*stats,
 
 stats->rx_packets += count;
 stats->rx_dropped += dropped;
+stats->rx_qos_d

Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and vhostuser ports

2019-07-02 Thread Sriram Vatala via dev
Hi Kevin,

Please consider this as a gentle remainder regarding below mail discussion and 
let me know your thoughts if any.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala 
Sent: 26 June 2019 11:01
To: 'Venkatesan Pradeep' ; 'Kevin Traynor' 
; 'ovs-dev@openvswitch.org' 
Cc: 'Ilya Maximets' 
Subject: RE: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and 
vhostuser ports

Hi,

Agreed. As per suggestions, I would go a head with generic name for packet
drops due to transmission failure (i.e packets left un-transmitted in output
packet buffer argument after call to transmit API ) and since the most likely
reason for this to happen is when transmit queue  is full or has been filled
up . So,  i will highlight this reason in the documentation.

Thanks & Regards,
Sriram.

-Original Message-
From: Venkatesan Pradeep 
Sent: 24 June 2019 12:30
To: Sriram Vatala ; 'Kevin Traynor'

Cc: 'Ilya Maximets' 
Subject: RE: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and
vhostuser ports

Hi,

Since the reason for introducing a new counter to track drops due to tx queue
being full was to quickly isolate that the problem was on the VM side rather
than on OVS, having a generic counter and name wouldn't help. One would still
need to have some knowledge of the code to understand that the likely reason
for tx failure was the queue being full. Short of having a new API that can
return the failure code we won't be able to say for sure the drop reason.  If
we want to use a generic name perhaps we can update the documentation to
highlight the likely reason.

Thanks,

Pradeep
-Original Message-
From: ovs-dev-boun...@openvswitch.org  On
Behalf Of Sriram Vatala via dev
Sent: Wednesday, June 19, 2019 8:44 PM
To: 'Kevin Traynor' ; ovs-dev@openvswitch.org
Cc: 'Ilya Maximets' 
Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and
vhostuser ports

Hi kevin,

Thanks for your inputs. I will  send the updated patch ASAP.

Thanks & Regards,
Sriram.

-Original Message-
From: Kevin Traynor 
Sent: 19 June 2019 19:07
To: Sriram Vatala ; ovs-dev@openvswitch.org
Cc: 'Ilya Maximets' 
Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and
vhostuser ports

On 19/06/2019 11:07, Sriram Vatala wrote:
>
> Hi Kevin,
>
> Thanks for reviewing the patch. Please find my answers to your
> comments below.
>
> Comment-1
> I'm not sure about this name, you would need to know that it was the
> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
> send pkts.
> ---
> Agree that queue full is not the only reason for which the above DPDK
> apis will fail to send packets. But queue full is the most common reason.
> Instead of overlooking other reasons like invalid queue-id, i can
> change the name of the counter to a generic name something like
> "tx_failed_drops".
> what do you think?
>

Sounds good to me.

> Comment-2
> There can be other drops earlier in this function
> ("__netdev_dpdk_vhost_send"), should they be logged also?
> ---
> These are the drops for invalid queue-id or vhost device id and if
> device is not up.
> Again these are very unlikely to happen, but i can add a rate limiting
> warn log.
>

Well I guess it doesn't invalidate your patches which provides drop stats for
mtu/qos/qfull(or new name), so maybe it's ok to not log those other drops in
the context of this patchset.

> Comment-3
>>  rte_spinlock_lock(>stats_lock);
>>  netdev_dpdk_vhost_update_tx_counters(>stats, pkts,
>> total_pkts,
>> + dropped);
>> +dev->stats.tx_mtu_drops += mtu_drops;
>
> There is a function just above for updating vhost tx stats. Now the
> updates are split with some updated in the function and some updated
> here, it would be better to update them all in the same place.
> ---
> Agree. will change the implementation here.
>
> Comment-4
>>
>> +dropped += mtu_drops + qos_drops + qfull_drops;
>
> = is enough, you don't need +=
> ---
> Actually in the code above to this part in "dpdk_do_tx_copy" function,
> dropped will updated if mbuf alloc fails.
> Here is the code snippet:
>
>
> pkts[txcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
> if (OVS_UNLIKELY(!pkts[txcnt])) {
> dropped += cnt - i;
> break;
> }
>
> To take care not to miss this in total drops, i am using dropped+=,
> Even if the above part of the code doesn't hit, as dropped variable is
> initialized to Zero that expression should not result in incorrect
> value for drops.
>

Yes, you're right, I missed it, thanks.

>
> Comment-5
>
>> +> +
>> filters="ovs">/usr/share/openvswit

Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and vhostuser ports

2019-06-25 Thread Sriram Vatala via dev
Hi,

Agreed. As per suggestions, I would go a head with generic name for packet
drops due to transmission failure (i.e packets left un-transmitted in output
packet buffer argument after call to transmit API ) and since the most likely
reason for this to happen is when transmit queue  is full or has been filled
up . So,  i will highlight this reason in the documentation.

Thanks & Regards,
Sriram.

-Original Message-
From: Venkatesan Pradeep 
Sent: 24 June 2019 12:30
To: Sriram Vatala ; 'Kevin Traynor'

Cc: 'Ilya Maximets' 
Subject: RE: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and
vhostuser ports

Hi,

Since the reason for introducing a new counter to track drops due to tx queue
being full was to quickly isolate that the problem was on the VM side rather
than on OVS, having a generic counter and name wouldn't help. One would still
need to have some knowledge of the code to understand that the likely reason
for tx failure was the queue being full. Short of having a new API that can
return the failure code we won't be able to say for sure the drop reason.  If
we want to use a generic name perhaps we can update the documentation to
highlight the likely reason.

Thanks,

Pradeep
-Original Message-
From: ovs-dev-boun...@openvswitch.org  On
Behalf Of Sriram Vatala via dev
Sent: Wednesday, June 19, 2019 8:44 PM
To: 'Kevin Traynor' ; ovs-dev@openvswitch.org
Cc: 'Ilya Maximets' 
Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and
vhostuser ports

Hi kevin,

Thanks for your inputs. I will  send the updated patch ASAP.

Thanks & Regards,
Sriram.

-Original Message-
From: Kevin Traynor 
Sent: 19 June 2019 19:07
To: Sriram Vatala ; ovs-dev@openvswitch.org
Cc: 'Ilya Maximets' 
Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and
vhostuser ports

On 19/06/2019 11:07, Sriram Vatala wrote:
>
> Hi Kevin,
>
> Thanks for reviewing the patch. Please find my answers to your
> comments below.
>
> Comment-1
> I'm not sure about this name, you would need to know that it was the
> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
> send pkts.
> ---
> Agree that queue full is not the only reason for which the above DPDK
> apis will fail to send packets. But queue full is the most common reason.
> Instead of overlooking other reasons like invalid queue-id, i can
> change the name of the counter to a generic name something like
> "tx_failed_drops".
> what do you think?
>

Sounds good to me.

> Comment-2
> There can be other drops earlier in this function
> ("__netdev_dpdk_vhost_send"), should they be logged also?
> ---
> These are the drops for invalid queue-id or vhost device id and if
> device is not up.
> Again these are very unlikely to happen, but i can add a rate limiting
> warn log.
>

Well I guess it doesn't invalidate your patches which provides drop stats for
mtu/qos/qfull(or new name), so maybe it's ok to not log those other drops in
the context of this patchset.

> Comment-3
>>  rte_spinlock_lock(>stats_lock);
>>  netdev_dpdk_vhost_update_tx_counters(>stats, pkts,
>> total_pkts,
>> + dropped);
>> +dev->stats.tx_mtu_drops += mtu_drops;
>
> There is a function just above for updating vhost tx stats. Now the
> updates are split with some updated in the function and some updated
> here, it would be better to update them all in the same place.
> ---
> Agree. will change the implementation here.
>
> Comment-4
>>
>> +dropped += mtu_drops + qos_drops + qfull_drops;
>
> = is enough, you don't need +=
> ---
> Actually in the code above to this part in "dpdk_do_tx_copy" function,
> dropped will updated if mbuf alloc fails.
> Here is the code snippet:
>
>
> pkts[txcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
> if (OVS_UNLIKELY(!pkts[txcnt])) {
> dropped += cnt - i;
> break;
> }
>
> To take care not to miss this in total drops, i am using dropped+=,
> Even if the above part of the code doesn't hit, as dropped variable is
> initialized to Zero that expression should not result in incorrect
> value for drops.
>

Yes, you're right, I missed it, thanks.

>
> Comment-5
>
>> +> +
>> filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-st
>> ats
>> +
>
> Probably better to be consistent and not wrap this line
> ---
> Ok. I wrapped this line as utilities/checkpatch script is giving
> warning as the line length is exceeding 79 characters. I will unwrap this.
>
>
>
> Comment-6
>
>> I think you also need to update vswitchd.xml with some docs for these
>> stats
>

Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and vhostuser ports

2019-06-19 Thread Sriram Vatala via dev
Hi kevin,

Thanks for your inputs. I will  send the updated patch ASAP.

Thanks & Regards,
Sriram.

-Original Message-
From: Kevin Traynor 
Sent: 19 June 2019 19:07
To: Sriram Vatala ; ovs-dev@openvswitch.org
Cc: 'Ilya Maximets' 
Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and 
vhostuser ports

On 19/06/2019 11:07, Sriram Vatala wrote:
>
> Hi Kevin,
>
> Thanks for reviewing the patch. Please find my answers to your comments 
> below.
>
> Comment-1
> I'm not sure about this name, you would need to know that it was the
> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
> send pkts.
> ---
> Agree that queue full is not the only reason for which the above DPDK
> apis will fail to send packets. But queue full is the most common reason.
> Instead of overlooking other reasons like invalid queue-id, i can
> change the name of the counter to a generic name something like 
> "tx_failed_drops".
> what do you think?
>

Sounds good to me.

> Comment-2
> There can be other drops earlier in this function
> ("__netdev_dpdk_vhost_send"), should they be logged also?
> ---
> These are the drops for invalid queue-id or vhost device id and if
> device is not up.
> Again these are very unlikely to happen, but i can add a rate limiting
> warn log.
>

Well I guess it doesn't invalidate your patches which provides drop stats for 
mtu/qos/qfull(or new name), so maybe it's ok to not log those other drops in 
the context of this patchset.

> Comment-3
>>  rte_spinlock_lock(>stats_lock);
>>  netdev_dpdk_vhost_update_tx_counters(>stats, pkts,
>> total_pkts,
>> + dropped);
>> +dev->stats.tx_mtu_drops += mtu_drops;
>
> There is a function just above for updating vhost tx stats. Now the
> updates are split with some updated in the function and some updated
> here, it would be better to update them all in the same place.
> ---
> Agree. will change the implementation here.
>
> Comment-4
>>
>> +dropped += mtu_drops + qos_drops + qfull_drops;
>
> = is enough, you don't need +=
> ---
> Actually in the code above to this part in "dpdk_do_tx_copy" function,
> dropped will updated if mbuf alloc fails.
> Here is the code snippet:
>
>
> pkts[txcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
> if (OVS_UNLIKELY(!pkts[txcnt])) {
> dropped += cnt - i;
> break;
> }
>
> To take care not to miss this in total drops, i am using dropped+=,
> Even if the above part of the code doesn't hit, as dropped variable is
> initialized to Zero that expression should not result in incorrect
> value for drops.
>

Yes, you're right, I missed it, thanks.

>
> Comment-5
>
>> +> +
>> filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-st
>> ats
>> +
>
> Probably better to be consistent and not wrap this line
> ---
> Ok. I wrapped this line as utilities/checkpatch script is giving
> warning as the line length is exceeding 79 characters. I will unwrap this.
>
>
>
> Comment-6
>
>> I think you also need to update vswitchd.xml with some docs for these
>> stats
>>
>
> Actually, it doesn't seem needed there, perhaps something in the dpdk
> docs
>
> ---
> Bit unclear on where to document this new counters. As we have not
> done any modifications to dpdk APIs, can i document these new counters
> in man page of ovs-vsctl.
> what do you think?
>

I'm exactly sure where either, I was thinking about somewhere in one of the 
.rst's in the /Documentation directory. Maybe it's not strictly necessary for 
all stats, but when there's small explanations in the commit message for these 
ones, seems like they might be helpful in the docs.

thanks,
Kevin.

> Thanks & Regards,
> Sriram.
>
> -Original Message-
> From: Kevin Traynor 
> Sent: 19 June 2019 01:09
> To: Sriram Vatala ;
> ovs-dev@openvswitch.org
> Cc: Ilya Maximets 
> Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per
> dpdk and vhostuser ports
>
> On 18/06/2019 15:05, Kevin Traynor wrote:
>> Hi Sriram,
>>
>> On 14/06/2019 14:38, Sriram Vatala via dev wrote:
>>> OVS may be unable to transmit packets for multiple reasons and today
>>> there is a single counter to track packets dropped due to any of
>>> those reasons. The most common reason is that a VM is unable to read
>>> packets fast enough causing the vhostuser port transmit queue on the
>>> OVS side to become full. This manifests as a problem with VNFs not
>>> receiving all packets. 

Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and vhostuser ports

2019-06-19 Thread Sriram Vatala via dev

Hi Kevin,

Thanks for reviewing the patch. Please find my answers to your comments below.

Comment-1
I'm not sure about this name, you would need to know that it was the
only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
send pkts.
---
Agree that queue full is not the only reason for which the above DPDK
apis will fail to send packets. But queue full is the most common reason.
Instead of overlooking other reasons like invalid queue-id, i can change
the name of the counter to a generic name something like "tx_failed".
what do you think?

Comment-2
There can be other drops earlier in this function
("__netdev_dpdk_vhost_send"), should they be logged also?
---
These are the drops for invalid queue-id or vhost device id and if device is
not up.
Again these are very unlikely to happen, but i can add a rate limiting warn
log.

Comment-3
>  rte_spinlock_lock(>stats_lock);
>  netdev_dpdk_vhost_update_tx_counters(>stats, pkts, total_pkts,
> + dropped);
> +dev->stats.tx_mtu_drops += mtu_drops;

There is a function just above for updating vhost tx stats. Now the
updates are split with some updated in the function and some updated
here, it would be better to update them all in the same place.
---
Agree. will change the implementation here.

Comment-4
>
> +dropped += mtu_drops + qos_drops + qfull_drops;

= is enough, you don't need +=
---
Actually in the code above to this part in "dpdk_do_tx_copy" function, dropped
variable Will be updated if mbuf alloc fails.
Here is the code snippet:


pkts[txcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
if (OVS_UNLIKELY(!pkts[txcnt])) {
dropped += cnt - i;
break;
}

To take care not to miss this in total drops, i am using dropped+=, Even if
the above part of the code doesn't hit, as dropped variable is initialized to
Zero, that expression should not result in incorrect value for drops.


Comment-5

> + +
> filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-stats
> +

Probably better to be consistent and not wrap this line
---
Ok. I wrapped this line as utilities/checkpatch script is giving warning as
the line length is exceeding 79 characters. I will unwrap this.



Comment-6

> I think you also need to update vswitchd.xml with some docs for these stats
>

Actually, it doesn't seem needed there, perhaps something in the dpdk docs

---
Bit unclear on where to document this new counters. As we have not done any
modifications to dpdk APIs, can i document these new counters in man page of
ovs-vsctl.
what do you think?

Thanks & Regards,
Sriram.

-Original Message-
From: Kevin Traynor 
Sent: 19 June 2019 01:09
To: Sriram Vatala ; ovs-dev@openvswitch.org
Cc: Ilya Maximets 
Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and
vhostuser ports

On 18/06/2019 15:05, Kevin Traynor wrote:
> Hi Sriram,
>
> On 14/06/2019 14:38, Sriram Vatala via dev wrote:
>> OVS may be unable to transmit packets for multiple reasons and today
>> there is a single counter to track packets dropped due to any of
>> those reasons. The most common reason is that a VM is unable to read
>> packets fast enough causing the vhostuser port transmit queue on the
>> OVS side to become full. This manifests as a problem with VNFs not
>> receiving all packets. Having a separate drop counter to track
>> packets dropped because the transmit queue is full will clearly
>> indicate that the problem is on the VM side and not in OVS. Similarly
>> maintaining separate counters for all possible drops helps in
>> indicating sensible cause for packet drops.
>>
>> This patch adds counters to track packets dropped due to all possible
>> reasons and these counters are displayed along with other stats in
>> "ovs-vsctl get interface  statistics"
>> command. The detailed stats will be available for both dpdk and
>> vhostuser ports.
>>
>> Following are the details of the new counters :
>>
>> tx_qfull_drops : These are the packets dropped due to transmit queue
>> overrun.
>>
>
> I'm not sure about this name, you would need to know that it was the
> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
> send pkts
>
>> tx_mtu_exceeded_drops : These are the packets dropped due to MTU
>> mismatch (i.e Pkt len > Max Dev MTU).
>>
>> tx_qos_drops/rx_qos_drops : These are the packets dropped due to
>> transmission/reception rate exceeding the configured Egress/Ingress
>> policer rate on the interface.
>>
>
> I think you also need to update vswitchd.xml with some docs for these
> stats
>

Actually, it doesn't seem needed there, perhaps something in

Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and vhostuser ports

2019-06-19 Thread Sriram Vatala via dev

Hi Kevin,

Thanks for reviewing the patch. Please find my answers to your comments below.

Comment-1
I'm not sure about this name, you would need to know that it was the
only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
send pkts.
---
Agree that queue full is not the only reason for which the above DPDK
apis will fail to send packets. But queue full is the most common reason.
Instead of overlooking other reasons like invalid queue-id, i can change
the name of the counter to a generic name something like "tx_failed_drops".
what do you think?

Comment-2
There can be other drops earlier in this function 
("__netdev_dpdk_vhost_send"), should they be logged also?
---
These are the drops for invalid queue-id or vhost device id and if device is 
not up.
Again these are very unlikely to happen, but i can add a rate limiting warn 
log.

Comment-3
>  rte_spinlock_lock(>stats_lock);
>  netdev_dpdk_vhost_update_tx_counters(>stats, pkts, total_pkts,
> + dropped);
> +dev->stats.tx_mtu_drops += mtu_drops;

There is a function just above for updating vhost tx stats. Now the
updates are split with some updated in the function and some updated
here, it would be better to update them all in the same place.
---
Agree. will change the implementation here.

Comment-4
>
> +dropped += mtu_drops + qos_drops + qfull_drops;

= is enough, you don't need +=
---
Actually in the code above to this part in "dpdk_do_tx_copy" function, dropped 
will updated if mbuf alloc fails.
Here is the code snippet:


pkts[txcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
if (OVS_UNLIKELY(!pkts[txcnt])) {
dropped += cnt - i;
break;
}

To take care not to miss this in total drops, i am using dropped+=, Even if 
the above part of the code doesn't hit, as dropped variable is initialized to 
Zero
that expression should not result in incorrect value for drops.


Comment-5

> + + 
> filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-stats
> +

Probably better to be consistent and not wrap this line
---
Ok. I wrapped this line as utilities/checkpatch script is giving warning as 
the line length is exceeding 79 characters. I will unwrap this.



Comment-6

> I think you also need to update vswitchd.xml with some docs for these stats
>

Actually, it doesn't seem needed there, perhaps something in the dpdk docs

---
Bit unclear on where to document this new counters. As we have not done any 
modifications to dpdk APIs, can i document these new counters in man page of 
ovs-vsctl.
what do you think?

Thanks & Regards,
Sriram.

-Original Message-
From: Kevin Traynor 
Sent: 19 June 2019 01:09
To: Sriram Vatala ; ovs-dev@openvswitch.org
Cc: Ilya Maximets 
Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and 
vhostuser ports

On 18/06/2019 15:05, Kevin Traynor wrote:
> Hi Sriram,
>
> On 14/06/2019 14:38, Sriram Vatala via dev wrote:
>> OVS may be unable to transmit packets for multiple reasons and today
>> there is a single counter to track packets dropped due to any of
>> those reasons. The most common reason is that a VM is unable to read
>> packets fast enough causing the vhostuser port transmit queue on the
>> OVS side to become full. This manifests as a problem with VNFs not
>> receiving all packets. Having a separate drop counter to track
>> packets dropped because the transmit queue is full will clearly
>> indicate that the problem is on the VM side and not in OVS. Similarly
>> maintaining separate counters for all possible drops helps in
>> indicating sensible cause for packet drops.
>>
>> This patch adds counters to track packets dropped due to all possible
>> reasons and these counters are displayed along with other stats in
>> "ovs-vsctl get interface  statistics"
>> command. The detailed stats will be available for both dpdk and
>> vhostuser ports.
>>
>> Following are the details of the new counters :
>>
>> tx_qfull_drops : These are the packets dropped due to transmit queue
>> overrun.
>>
>
> I'm not sure about this name, you would need to know that it was the
> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
> send pkts
>
>> tx_mtu_exceeded_drops : These are the packets dropped due to MTU
>> mismatch (i.e Pkt len > Max Dev MTU).
>>
>> tx_qos_drops/rx_qos_drops : These are the packets dropped due to
>> transmission/reception rate exceeding the configured Egress/Ingress
>> policer rate on the interface.
>>
>
> I think you also need to update vswitchd.xml with some docs for these
> stats
>

Actually, it doesn't seem needed 

Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and vhostuser ports

2019-06-18 Thread Sriram Vatala via dev
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-Original Message-
From: Sriram Vatala  
Sent: 14 June 2019 19:08
To: ovs-dev@openvswitch.org
Cc: Sriram Vatala 
Subject: [PATCH v2] Detailed packet drop statistics per dpdk and vhostuser
ports

OVS may be unable to transmit packets for multiple reasons and today there
is a single counter to track packets dropped due to any of those reasons.
The most common reason is that a VM is unable to read packets fast enough
causing the vhostuser port transmit queue on the OVS side to become full.
This manifests as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the transmit queue is
full will clearly indicate that the problem is on the VM side and not in
OVS. Similarly maintaining separate counters for all possible drops helps in
indicating sensible cause for packet drops.

This patch adds counters to track packets dropped due to all possible
reasons and these counters are displayed along with other stats in
"ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and vhostuser
ports.

Following are the details of the new counters :

tx_qfull_drops : These are the packets dropped due to transmit queue
overrun.

tx_mtu_exceeded_drops : These are the packets dropped due to MTU mismatch
(i.e Pkt len > Max Dev MTU).

tx_qos_drops/rx_qos_drops : These are the packets dropped due to
transmission/reception rate exceeding the configured Egress/Ingress policer
rate on the interface.

Signed-off-by: Sriram Vatala 
---
 include/openvswitch/netdev.h   |  8 
 lib/netdev-dpdk.c  | 49
++
 utilities/bugtool/automake.mk  |  3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats  | 25 +++
 .../bugtool/plugins/network-status/openvswitch.xml |  3 ++
 vswitchd/bridge.c  |  6 ++-
 6 files changed, 85 insertions(+), 9 deletions(-)  create mode 100755
utilities/bugtool/ovs-bugtool-get-iface-stats

diff --git a/include/openvswitch/netdev.h b/include/openvswitch/netdev.h
index 0c10f7b..69480a4 100644
--- a/include/openvswitch/netdev.h
+++ b/include/openvswitch/netdev.h
@@ -61,6 +61,14 @@ struct netdev_stats {
 uint64_t tx_heartbeat_errors;
 uint64_t tx_window_errors;
 
+/* Detailed receive drops. */
+uint64_t rx_qos_drops;
+
+/* Detailed transmit drops. */
+uint64_t tx_qfull_drops;
+uint64_t tx_qos_drops;
+uint64_t tx_mtu_drops;
+
 /* Extended statistics based on RFC2819. */
 uint64_t rx_1_to_64_packets;
 uint64_t rx_65_to_127_packets;
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index 3498b32..6c2eb38
100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -2124,6 +2124,7 @@ netdev_dpdk_vhost_update_rx_counters(struct
netdev_stats *stats,
 
 stats->rx_packets += count;
 stats->rx_dropped += dropped;
+stats->rx_qos_drops += dropped;
 for (i = 0; i < count; i++) {
 packet = packets[i];
 packet_size = dp_packet_size(packet); @@ -2236,6 +2237,7 @@
netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch,
 if (OVS_UNLIKELY(dropped)) {
 rte_spinlock_lock(>stats_lock);
 dev->stats.rx_dropped += dropped;
+dev->stats.rx_qos_drops += dropped;
 rte_spinlock_unlock(>stats_lock);
 }
 
@@ -2319,6 +2321,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int
qid,
 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
 unsigned int total_pkts = cnt;
 unsigned int dropped = 0;
+unsigned int mtu_drops;
+unsigned int qos_drops;
+unsigned int qfull_drops;
 int i, retries = 0;
 int vid = netdev_dpdk_get_vid(dev);
 
@@ -2335,9 +2340,11 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int
qid,
 rte_spinlock_lock(>tx_q[qid].tx_lock);
 
 cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
+mtu_drops = total_pkts - cnt;
+qos_drops = cnt;
 /* Check has QoS has been configured for the netdev */
 cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
-dropped = total_pkts - cnt;
+qos_drops -= cnt;
 
 do {
 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ; @@ -2357,9 +2364,14
@@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 
 rte_spinlock_unlock(>tx_q[qid].tx_lock);
 
+qfull_drops = cnt;
+dropped = mtu_drops + qos_drops + qfull_drops;
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_tx_counters(>stats, pkts, total_pkts,
- cnt + dropped);
+ dropped);
+dev->stats.tx_mtu_drops += mtu_drops;
+dev->stats.tx_qos_drops += qos_drops;
+dev->stats.tx_qfull_drops += qfull_drops;
 rte_spinlock_unlock(>stats_lock);
 
 out:
@@ -2384,12 +

[ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and vhostuser ports

2019-06-14 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons and
today there is a single counter to track packets dropped due to
any of those reasons. The most common reason is that a VM is
unable to read packets fast enough causing the vhostuser port
transmit queue on the OVS side to become full. This manifests
as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the
transmit queue is full will clearly indicate that the problem
is on the VM side and not in OVS. Similarly maintaining separate
counters for all possible drops helps in indicating sensible
cause for packet drops.

This patch adds counters to track packets dropped due to all
possible reasons and these counters are displayed along with
other stats in "ovs-vsctl get interface  statistics"
command. The detailed stats will be available for both dpdk and
vhostuser ports.

Following are the details of the new counters :

tx_qfull_drops : These are the packets dropped due to
transmit queue overrun.

tx_mtu_exceeded_drops : These are the packets dropped due
to MTU mismatch (i.e Pkt len > Max Dev MTU).

tx_qos_drops/rx_qos_drops : These are the packets dropped due
to transmission/reception rate exceeding the configured
Egress/Ingress policer rate on the interface.

Signed-off-by: Sriram Vatala 
---
 include/openvswitch/netdev.h   |  8 
 lib/netdev-dpdk.c  | 49 ++
 utilities/bugtool/automake.mk  |  3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats  | 25 +++
 .../bugtool/plugins/network-status/openvswitch.xml |  3 ++
 vswitchd/bridge.c  |  6 ++-
 6 files changed, 85 insertions(+), 9 deletions(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-iface-stats

diff --git a/include/openvswitch/netdev.h b/include/openvswitch/netdev.h
index 0c10f7b..69480a4 100644
--- a/include/openvswitch/netdev.h
+++ b/include/openvswitch/netdev.h
@@ -61,6 +61,14 @@ struct netdev_stats {
 uint64_t tx_heartbeat_errors;
 uint64_t tx_window_errors;
 
+/* Detailed receive drops. */
+uint64_t rx_qos_drops;
+
+/* Detailed transmit drops. */
+uint64_t tx_qfull_drops;
+uint64_t tx_qos_drops;
+uint64_t tx_mtu_drops;
+
 /* Extended statistics based on RFC2819. */
 uint64_t rx_1_to_64_packets;
 uint64_t rx_65_to_127_packets;
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 3498b32..6c2eb38 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -2124,6 +2124,7 @@ netdev_dpdk_vhost_update_rx_counters(struct netdev_stats 
*stats,
 
 stats->rx_packets += count;
 stats->rx_dropped += dropped;
+stats->rx_qos_drops += dropped;
 for (i = 0; i < count; i++) {
 packet = packets[i];
 packet_size = dp_packet_size(packet);
@@ -2236,6 +2237,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct 
dp_packet_batch *batch,
 if (OVS_UNLIKELY(dropped)) {
 rte_spinlock_lock(>stats_lock);
 dev->stats.rx_dropped += dropped;
+dev->stats.rx_qos_drops += dropped;
 rte_spinlock_unlock(>stats_lock);
 }
 
@@ -2319,6 +2321,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
 unsigned int total_pkts = cnt;
 unsigned int dropped = 0;
+unsigned int mtu_drops;
+unsigned int qos_drops;
+unsigned int qfull_drops;
 int i, retries = 0;
 int vid = netdev_dpdk_get_vid(dev);
 
@@ -2335,9 +2340,11 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 rte_spinlock_lock(>tx_q[qid].tx_lock);
 
 cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
+mtu_drops = total_pkts - cnt;
+qos_drops = cnt;
 /* Check has QoS has been configured for the netdev */
 cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
-dropped = total_pkts - cnt;
+qos_drops -= cnt;
 
 do {
 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
@@ -2357,9 +2364,14 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 
 rte_spinlock_unlock(>tx_q[qid].tx_lock);
 
+qfull_drops = cnt;
+dropped = mtu_drops + qos_drops + qfull_drops;
 rte_spinlock_lock(>stats_lock);
 netdev_dpdk_vhost_update_tx_counters(>stats, pkts, total_pkts,
- cnt + dropped);
+ dropped);
+dev->stats.tx_mtu_drops += mtu_drops;
+dev->stats.tx_qos_drops += qos_drops;
+dev->stats.tx_qfull_drops += qfull_drops;
 rte_spinlock_unlock(>stats_lock);
 
 out:
@@ -2384,12 +2396,15 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct 
dp_packet_batch *batch)
 struct rte_mbuf *pkts[PKT_ARRAY_SIZE];
 uint32_t cnt = batch_cnt;
 uint32_t dropped = 0;
+uint32_t mtu_drops = 0;
+uint32_t qos_drops = 0;
+uint32_t qfull_drops = 0;
 
   

Re: [ovs-dev] [PATCH] Detailed packet drop statistics per dpdk and vhostuser ports

2019-06-12 Thread Sriram Vatala via dev
Hi Ilya,

Sorry for the late reply. Yes, whatever you suggested is doable.  As of now 
ovs-bugtool has only dpctl/show command and i think we need to add this command 
"ovs-vsctl get interface" to ovs-bugtool.  I will float the updated patch ASAP.

Thanks & Regards,
Sriram.

-Original Message-
From: Ilya Maximets  
Sent: 07 June 2019 20:08
To: Sriram Vatala ; ovs-dev@openvswitch.org
Cc: Stokes, Ian ; Kevin Traynor 
Subject: Re: [ovs-dev] [PATCH] Detailed packet drop statistics per dpdk and 
vhostuser ports

On 07.06.2019 17:28, Ilya Maximets wrote:
> On 07.06.2019 15:39, Sriram Vatala wrote:
>> OVS may be unable to transmit packets for multiple reasons and today 
>> there is a single counter to track packets dropped due to any of 
>> those reasons. The most common reason is that a VM is unable to read 
>> packets fast enough causing the vhostuser port transmit queue on the 
>> OVS side to become full. This manifests as a problem with VNFs not 
>> receiving all packets. Having a separate drop counter to track 
>> packets dropped because the transmit queue is full will clearly 
>> indicate that the problem is on the VM side and not in OVS. Similarly 
>> maintaining separate counters for all possible drops helps in 
>> indicating sensible cause for packet drops.
>>
>> This patch adds counters to track packets dropped due to all possible 
>> reasons and display them when "--details" optional flag is passed  to 
>> “ovs-appctl dpctl/show -s” . The detailed stats will be available for 
>> both dpdk and vhostuser ports.
>>
>> cmd Usage : "ovs-appctl dpctl/show -s --details" (OR)
>> "ovs-appctl dpctl/show --statistics --details"
>>
>> Following are the details of the new counters :
>>
>> queue_full : These are the packets dropped due to Transmit queue overrun.
>> mtu_exceeded : These are the packets dropped due to MTU mismatch.
>>(i.e Pkt len > Max Dev MTU) qos : These are the 
>> packets dropped due to transmission/reception rate
>>   exceeding the configured Egress/Ingress policer rate on the interface.
>>
>> Signed-off-by: Sriram Vatala 
>> ---
>>  include/openvswitch/netdev.h |  8 
>>  lib/dpctl.c  | 26 ++-
>>  lib/dpctl.h  |  5 +
>>  lib/dpctl.man|  8 ++--
>>  lib/netdev-dpdk.c| 49 
>> +---
>>  5 files changed, 86 insertions(+), 10 deletions(-)
> 
> Hi, Sriram.
> Thanks for working on OVS improvement.
> 
> I didn't look at the code yet, but one thing I wanted to mention is 
> that dpctl interface seems redundant here. It provides only basic 
> statistics, but you're adding some really specific stuff. Also, full 
> port stats are always available via db interface:
> 
>   ovs-vsctl get interface  statistics
> 
> Your new stats counters will be there automatically.

Only need to add a few lines to '#define IFACE_STATS' in vswitchd/bridge.c.

> I think, it's better to drop dpctl parts of this patch.
> 
> What do you think?
> 
> Best regards, Ilya Maximets.  
> 
> 
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


[ovs-dev] [PATCH] Detailed packet drop statistics per dpdk and vhostuser ports

2019-06-07 Thread Sriram Vatala via dev
OVS may be unable to transmit packets for multiple reasons and
today there is a single counter to track packets dropped due to
any of those reasons. The most common reason is that a VM is
unable to read packets fast enough causing the vhostuser port
transmit queue on the OVS side to become full. This manifests
as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the
transmit queue is full will clearly indicate that the problem
is on the VM side and not in OVS. Similarly maintaining separate
counters for all possible drops helps in indicating sensible
cause for packet drops.

This patch adds counters to track packets dropped due to all possible
reasons and display them when "--details" optional flag is passed  to
“ovs-appctl dpctl/show -s” . The detailed stats will be available
for both dpdk and vhostuser ports.

cmd Usage : "ovs-appctl dpctl/show -s --details" (OR)
"ovs-appctl dpctl/show --statistics --details"

Following are the details of the new counters :

queue_full : These are the packets dropped due to Transmit queue overrun.
mtu_exceeded : These are the packets dropped due to MTU mismatch.
   (i.e Pkt len > Max Dev MTU)
qos : These are the packets dropped due to transmission/reception rate
  exceeding the configured Egress/Ingress policer rate on the interface.

Signed-off-by: Sriram Vatala 
---
 include/openvswitch/netdev.h |  8 
 lib/dpctl.c  | 26 ++-
 lib/dpctl.h  |  5 +
 lib/dpctl.man|  8 ++--
 lib/netdev-dpdk.c| 49 +---
 5 files changed, 86 insertions(+), 10 deletions(-)

diff --git a/include/openvswitch/netdev.h b/include/openvswitch/netdev.h
index 0c10f7b..69480a4 100644
--- a/include/openvswitch/netdev.h
+++ b/include/openvswitch/netdev.h
@@ -61,6 +61,14 @@ struct netdev_stats {
 uint64_t tx_heartbeat_errors;
 uint64_t tx_window_errors;
 
+/* Detailed receive drops. */
+uint64_t rx_qos_drops;
+
+/* Detailed transmit drops. */
+uint64_t tx_qfull_drops;
+uint64_t tx_qos_drops;
+uint64_t tx_mtu_drops;
+
 /* Extended statistics based on RFC2819. */
 uint64_t rx_1_to_64_packets;
 uint64_t rx_65_to_127_packets;
diff --git a/lib/dpctl.c b/lib/dpctl.c
index 9c4eb65..cdbf740 100644
--- a/lib/dpctl.c
+++ b/lib/dpctl.c
@@ -38,6 +38,7 @@
 #include "flow.h"
 #include "openvswitch/match.h"
 #include "netdev.h"
+#include "netdev-provider.h"
 #include "netlink.h"
 #include "odp-util.h"
 #include "openvswitch/ofpbuf.h"
@@ -683,6 +684,12 @@ show_dpif(struct dpif *dpif, struct dpctl_params *dpctl_p)
 print_stat(dpctl_p, "RX packets:", s.rx_packets);
 print_stat(dpctl_p, " errors:", s.rx_errors);
 print_stat(dpctl_p, " dropped:", s.rx_dropped);
+if (dpctl_p->print_detailed_stats &&
+netdev->netdev_class->is_pmd) {
+dpctl_print(dpctl_p, " (");
+print_stat(dpctl_p, "qos:", s.rx_qos_drops);
+dpctl_print(dpctl_p, ") ");
+}
 print_stat(dpctl_p, " overruns:", s.rx_over_errors);
 print_stat(dpctl_p, " frame:", s.rx_frame_errors);
 dpctl_print(dpctl_p, "\n");
@@ -690,6 +697,14 @@ show_dpif(struct dpif *dpif, struct dpctl_params *dpctl_p)
 print_stat(dpctl_p, "TX packets:", s.tx_packets);
 print_stat(dpctl_p, " errors:", s.tx_errors);
 print_stat(dpctl_p, " dropped:", s.tx_dropped);
+if (dpctl_p->print_detailed_stats &&
+netdev->netdev_class->is_pmd) {
+dpctl_print(dpctl_p, " (");
+print_stat(dpctl_p, "queue_full:", s.tx_qfull_drops);
+print_stat(dpctl_p, " mtu_exceeded:", s.tx_mtu_drops);
+print_stat(dpctl_p, " qos:", s.tx_qos_drops);
+dpctl_print(dpctl_p, ") ");
+}
 print_stat(dpctl_p, " aborted:", s.tx_aborted_errors);
 print_stat(dpctl_p, " carrier:", s.tx_carrier_errors);
 dpctl_print(dpctl_p, "\n");
@@ -2414,7 +2429,8 @@ static const struct dpctl_command all_commands[] = {
 { "del-if", "dp iface...", 2, INT_MAX, dpctl_del_if, DP_RW },
 { "set-if", "dp iface...", 2, INT_MAX, dpctl_set_if, DP_RW },
 { "dump-dps", "", 0, 0, dpctl_dump_dps, DP_RO },
-{ "show", "[dp...]",