Re: [ovs-dev] [PATCH v6] Detailed packet drop statistics per dpdk and vhostuser ports
Hi All, The patch " netdev-dpdk: Refactor vhost custom stats for extensibility" by Ilya is merged to master. So I will rebase and send updated patch to avoid any git conflicts. Thanks & Regards, Sriram. -Original Message- From: Sriram Vatala Sent: 26 August 2019 18:29 To: ovs-dev@openvswitch.org; i.maxim...@samsung.com Cc: b...@ovn.org; ian.sto...@intel.com; Sriram Vatala Subject: [PATCH v6] Detailed packet drop statistics per dpdk and vhostuser ports OVS may be unable to transmit packets for multiple reasons and today there is a single counter to track packets dropped due to any of those reasons. The most common reason is that a VM is unable to read packets fast enough causing the vhostuser port transmit queue on the OVS side to become full. This manifests as a problem with VNFs not receiving all packets. Having a separate drop counter to track packets dropped because the transmit queue is full will clearly indicate that the problem is on the VM side and not in OVS. Similarly maintaining separate counters for all possible drops helps in indicating sensible cause for packet drops. This patch adds custom stats counters to track packets dropped at port level and these counters are displayed along with other stats in "ovs-vsctl get interface statistics" command. The detailed stats will be available for both dpdk and vhostuser ports. Signed-off-by: Sriram Vatala --- lib/netdev-dpdk.c | 120 ++--- utilities/bugtool/automake.mk | 3 +- utilities/bugtool/ovs-bugtool-get-iface-stats | 25 + .../bugtool/plugins/network-status/openvswitch.xml | 1 + vswitchd/vswitch.xml | 24 + 5 files changed, 157 insertions(+), 16 deletions(-) create mode 100755 utilities/bugtool/ovs-bugtool-get-iface-stats diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index 4805783..6685f32 100644 --- a/lib/netdev-dpdk.c +++ b/lib/netdev-dpdk.c @@ -447,8 +447,14 @@ struct netdev_dpdk { PADDED_MEMBERS(CACHE_LINE_SIZE, struct netdev_stats stats; -/* Custom stat for retries when unable to transmit. */ +/* Counters for Custom device stats */ +/* No. of retries when unable to transmit. */ uint64_t tx_retries; +/* Pkts left untransmitted in Tx buffers. Probably Tx Que is full */ +uint64_t tx_failure_drops; +uint64_t tx_mtu_exceeded_drops; +uint64_t tx_qos_drops; +uint64_t rx_qos_drops; /* Protects stats */ rte_spinlock_t stats_lock; /* 4 pad bytes here. */ @@ -2205,6 +2211,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq, struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev); uint16_t nb_rx = 0; uint16_t dropped = 0; +uint16_t qos_drops = 0; int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ; int vid = netdev_dpdk_get_vid(dev); @@ -2236,11 +2243,13 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq, (struct rte_mbuf **) batch->packets, nb_rx, true); dropped -= nb_rx; +qos_drops = dropped; } rte_spinlock_lock(>stats_lock); netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets, nb_rx, dropped); +dev->rx_qos_drops += qos_drops; rte_spinlock_unlock(>stats_lock); batch->count = nb_rx; @@ -2266,6 +2275,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch, struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev); int nb_rx; int dropped = 0; +int qos_drops = 0; if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) { return EAGAIN; @@ -2284,12 +2294,14 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch, (struct rte_mbuf **) batch->packets, nb_rx, true); dropped -= nb_rx; +qos_drops = dropped; } /* Update stats to reflect dropped packets */ if (OVS_UNLIKELY(dropped)) { rte_spinlock_lock(>stats_lock); dev->stats.rx_dropped += dropped; +dev->rx_qos_drops += qos_drops; rte_spinlock_unlock(>stats_lock); } @@ -2373,6 +2385,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts; unsigned int total_pkts = cnt; unsigned int dropped = 0; +unsigned int tx_failure; +unsigned int mtu_drops; +unsigned int qos_drops; int i, retries = 0; int max_retries = VHOST_ENQ_RETRY_MIN; int vid = netdev_dpdk_get_vid(dev); @@ -2390,9 +2405,12 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid, rte_spinlock_lock(>tx_q[qid].tx_lock); cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt); +mtu_drops = total_pkts - cnt; +qos_drops = cnt;
Re: [ovs-dev] [PATCH v6] Detailed packet drop statistics per dpdk and vhostuser ports
Bleep bloop. Greetings Sriram Vatala via dev, I am a robot and I have tried out your patch. Thanks for your contribution. I encountered some error that I wasn't expecting. See the details below. checkpatch: WARNING: Line is 80 characters long (recommended limit is 79) #247 FILE: lib/netdev-dpdk.c:2883: (struct netdev_custom_counter *) xcalloc(custom_stats->size, WARNING: Line is 138 characters long (recommended limit is 79) #384 FILE: utilities/bugtool/plugins/network-status/openvswitch.xml:37: /usr/share/openvswitch/scripts/ovs-bugtool-get-iface-stats Lines checked: 425, Warnings: 2, Errors: 0 Please check this out. If you feel there has been an error, please email acon...@redhat.com Thanks, 0-day Robot ___ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev
[ovs-dev] [PATCH v6] Detailed packet drop statistics per dpdk and vhostuser ports
OVS may be unable to transmit packets for multiple reasons and today there is a single counter to track packets dropped due to any of those reasons. The most common reason is that a VM is unable to read packets fast enough causing the vhostuser port transmit queue on the OVS side to become full. This manifests as a problem with VNFs not receiving all packets. Having a separate drop counter to track packets dropped because the transmit queue is full will clearly indicate that the problem is on the VM side and not in OVS. Similarly maintaining separate counters for all possible drops helps in indicating sensible cause for packet drops. This patch adds custom stats counters to track packets dropped at port level and these counters are displayed along with other stats in "ovs-vsctl get interface statistics" command. The detailed stats will be available for both dpdk and vhostuser ports. Signed-off-by: Sriram Vatala --- lib/netdev-dpdk.c | 120 ++--- utilities/bugtool/automake.mk | 3 +- utilities/bugtool/ovs-bugtool-get-iface-stats | 25 + .../bugtool/plugins/network-status/openvswitch.xml | 1 + vswitchd/vswitch.xml | 24 + 5 files changed, 157 insertions(+), 16 deletions(-) create mode 100755 utilities/bugtool/ovs-bugtool-get-iface-stats diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index 4805783..6685f32 100644 --- a/lib/netdev-dpdk.c +++ b/lib/netdev-dpdk.c @@ -447,8 +447,14 @@ struct netdev_dpdk { PADDED_MEMBERS(CACHE_LINE_SIZE, struct netdev_stats stats; -/* Custom stat for retries when unable to transmit. */ +/* Counters for Custom device stats */ +/* No. of retries when unable to transmit. */ uint64_t tx_retries; +/* Pkts left untransmitted in Tx buffers. Probably Tx Que is full */ +uint64_t tx_failure_drops; +uint64_t tx_mtu_exceeded_drops; +uint64_t tx_qos_drops; +uint64_t rx_qos_drops; /* Protects stats */ rte_spinlock_t stats_lock; /* 4 pad bytes here. */ @@ -2205,6 +2211,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq, struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev); uint16_t nb_rx = 0; uint16_t dropped = 0; +uint16_t qos_drops = 0; int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ; int vid = netdev_dpdk_get_vid(dev); @@ -2236,11 +2243,13 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq, (struct rte_mbuf **) batch->packets, nb_rx, true); dropped -= nb_rx; +qos_drops = dropped; } rte_spinlock_lock(>stats_lock); netdev_dpdk_vhost_update_rx_counters(>stats, batch->packets, nb_rx, dropped); +dev->rx_qos_drops += qos_drops; rte_spinlock_unlock(>stats_lock); batch->count = nb_rx; @@ -2266,6 +2275,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch, struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev); int nb_rx; int dropped = 0; +int qos_drops = 0; if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) { return EAGAIN; @@ -2284,12 +2294,14 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch, (struct rte_mbuf **) batch->packets, nb_rx, true); dropped -= nb_rx; +qos_drops = dropped; } /* Update stats to reflect dropped packets */ if (OVS_UNLIKELY(dropped)) { rte_spinlock_lock(>stats_lock); dev->stats.rx_dropped += dropped; +dev->rx_qos_drops += qos_drops; rte_spinlock_unlock(>stats_lock); } @@ -2373,6 +2385,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts; unsigned int total_pkts = cnt; unsigned int dropped = 0; +unsigned int tx_failure; +unsigned int mtu_drops; +unsigned int qos_drops; int i, retries = 0; int max_retries = VHOST_ENQ_RETRY_MIN; int vid = netdev_dpdk_get_vid(dev); @@ -2390,9 +2405,12 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid, rte_spinlock_lock(>tx_q[qid].tx_lock); cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt); +mtu_drops = total_pkts - cnt; +qos_drops = cnt; /* Check has QoS has been configured for the netdev */ cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true); -dropped = total_pkts - cnt; +qos_drops -= cnt; +dropped = qos_drops + mtu_drops; do { int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ; @@ -2417,12 +2435,16 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid, } } while (cnt && (retries++ < max_retries)); +tx_failure = cnt;