On Fri, 10 May 2024 03:35:51 +0000, Dan Jurgens <dani...@nvidia.com> wrote:
> > From: Xuan Zhuo <xuanz...@linux.alibaba.com>
> > Sent: Thursday, May 9, 2024 8:22 PM
> > To: Dan Jurgens <dani...@nvidia.com>
> > Cc: m...@redhat.com; jasow...@redhat.com; xuanz...@linux.alibaba.com;
> > virtualizat...@lists.linux.dev; da...@davemloft.net;
> > eduma...@google.com; k...@kernel.org; pab...@redhat.com; Jiri Pirko
> > <j...@nvidia.com>; Dan Jurgens <dani...@nvidia.com>;
> > netdev@vger.kernel.org
> > Subject: Re: [PATCH net-next 2/2] virtio_net: Add TX stopped and wake
> > counters
> >
> > On Thu, 9 May 2024 11:32:16 -0500, Daniel Jurgens <dani...@nvidia.com>
> > wrote:
> > > Add a tx queue stop and wake counters, they are useful for debugging.
> > >
> > > $ ./tools/net/ynl/cli.py --spec netlink/specs/netdev.yaml \ --dump
> > > qstats-get --json '{"scope": "queue"}'
> > > ...
> > >  {'ifindex': 13,
> > >   'queue-id': 0,
> > >   'queue-type': 'tx',
> > >   'tx-bytes': 14756682850,
> > >   'tx-packets': 226465,
> > >   'tx-stop': 113208,
> > >   'tx-wake': 113208},
> > >  {'ifindex': 13,
> > >   'queue-id': 1,
> > >   'queue-type': 'tx',
> > >   'tx-bytes': 18167675008,
> > >   'tx-packets': 278660,
> > >   'tx-stop': 8632,
> > >   'tx-wake': 8632}]
> > >
> > > Signed-off-by: Daniel Jurgens <dani...@nvidia.com>
> > > Reviewed-by: Jiri Pirko <j...@nvidia.com>
> > > ---
> > >  drivers/net/virtio_net.c | 28 ++++++++++++++++++++++++++--
> > >  1 file changed, 26 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index
> > > 218a446c4c27..df6121c38a1b 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -95,6 +95,8 @@ struct virtnet_sq_stats {
> > >   u64_stats_t xdp_tx_drops;
> > >   u64_stats_t kicks;
> > >   u64_stats_t tx_timeouts;
> > > + u64_stats_t stop;
> > > + u64_stats_t wake;
> > >  };
> > >
> > >  struct virtnet_rq_stats {
> > > @@ -145,6 +147,8 @@ static const struct virtnet_stat_desc
> > > virtnet_rq_stats_desc[] = {  static const struct virtnet_stat_desc
> > virtnet_sq_stats_desc_qstat[] = {
> > >   VIRTNET_SQ_STAT_QSTAT("packets", packets),
> > >   VIRTNET_SQ_STAT_QSTAT("bytes",   bytes),
> > > + VIRTNET_SQ_STAT_QSTAT("stop",    stop),
> > > + VIRTNET_SQ_STAT_QSTAT("wake",    wake),
> > >  };
> > >
> > >  static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] =
> > > { @@ -1014,6 +1018,9 @@ static void check_sq_full_and_disable(struct
> > virtnet_info *vi,
> > >    */
> > >   if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
> > >           netif_stop_subqueue(dev, qnum);
> > > +         u64_stats_update_begin(&sq->stats.syncp);
> > > +         u64_stats_inc(&sq->stats.stop);
> > > +         u64_stats_update_end(&sq->stats.syncp);
> >
> > How about introduce two helpers to wrap
> > netif_tx_queue_stopped and netif_start_subqueue?
> >
> > >           if (use_napi) {
> > >                   if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
> > >                           virtqueue_napi_schedule(&sq->napi, sq-
> > >vq); @@ -1022,6 +1029,9 @@
> > > static void check_sq_full_and_disable(struct virtnet_info *vi,
> > >                   free_old_xmit(sq, false);
> > >                   if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> > >                           netif_start_subqueue(dev, qnum);
> > > +                         u64_stats_update_begin(&sq->stats.syncp);
> > > +                         u64_stats_inc(&sq->stats.wake);
> > > +                         u64_stats_update_end(&sq->stats.syncp);
> >
> > If we start the queue immediately, should we update the counter?
>
> I intentionally only counted the wakes on restarts after stopping the queue.
> I don't think counting the initial wake adds any value since it always 
> happens.

Here, we start the queue immediately after the queue is stopped.
So for the upper layer, the queue "has not" changed the status,
I think we do not need to update the wake counter.

Thanks.


>
> >
> > Thanks.
> >
> > >                           virtqueue_disable_cb(sq->vq);
> > >                   }
> > >           }
> > > @@ -2322,8 +2332,14 @@ static void virtnet_poll_cleantx(struct
> > receive_queue *rq)
> > >                   free_old_xmit(sq, true);
> > >           } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
> > >
> > > -         if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> > > +         if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
> > > +                 if (netif_tx_queue_stopped(txq)) {
> > > +                         u64_stats_update_begin(&sq->stats.syncp);
> > > +                         u64_stats_inc(&sq->stats.wake);
> > > +                         u64_stats_update_end(&sq->stats.syncp);
> > > +                 }
> > >                   netif_tx_wake_queue(txq);
> > > +         }
> > >
> > >           __netif_tx_unlock(txq);
> > >   }
> > > @@ -2473,8 +2489,14 @@ static int virtnet_poll_tx(struct napi_struct
> > *napi, int budget)
> > >   virtqueue_disable_cb(sq->vq);
> > >   free_old_xmit(sq, true);
> > >
> > > - if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> > > + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
> > > +         if (netif_tx_queue_stopped(txq)) {
> > > +                 u64_stats_update_begin(&sq->stats.syncp);
> > > +                 u64_stats_inc(&sq->stats.wake);
> > > +                 u64_stats_update_end(&sq->stats.syncp);
> > > +         }
> > >           netif_tx_wake_queue(txq);
> > > + }
> > >
> > >   opaque = virtqueue_enable_cb_prepare(sq->vq);
> > >
> > > @@ -4790,6 +4812,8 @@ static void virtnet_get_base_stats(struct
> > > net_device *dev,
> > >
> > >   tx->bytes = 0;
> > >   tx->packets = 0;
> > > + tx->stop = 0;
> > > + tx->wake = 0;
> > >
> > >   if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
> > >           tx->hw_drops = 0;
> > > --
> > > 2.44.0
> > >
>

Reply via email to