virtnet_free_old_xmit distinguishes three type ptr(skb, xdp frame, xsk
buffer) by the last bits of the pointer.

Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 85 ++++++++++++++++++++++++++++------------
 1 file changed, 59 insertions(+), 26 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ce8ac9239158..700d080d8c5b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -85,6 +85,7 @@ struct virtnet_sq_free_stats {
        u64 bytes;
        u64 napi_packets;
        u64 napi_bytes;
+       u64 xsk;
 };
 
 struct virtnet_sq_stats {
@@ -511,6 +512,7 @@ static struct sk_buff *virtnet_skb_append_frag(struct 
sk_buff *head_skb,
                                               struct sk_buff *curr_skb,
                                               struct page *page, void *buf,
                                               int len, int truesize);
+static void virtnet_xsk_completed(struct send_queue *sq, int num);
 
 enum virtnet_xmit_type {
        VIRTNET_XMIT_TYPE_SKB,
@@ -595,12 +597,24 @@ static void __free_old_xmit(struct send_queue *sq, struct 
netdev_queue *txq,
 
                case VIRTNET_XMIT_TYPE_XSK:
                        stats->bytes += virtnet_ptr_to_xsk(ptr);
+                       stats->xsk++;
                        break;
                }
        }
        netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
 }
 
+static void virtnet_free_old_xmit(struct send_queue *sq,
+                                 struct netdev_queue *txq,
+                                 bool in_napi,
+                                 struct virtnet_sq_free_stats *stats)
+{
+       __free_old_xmit(sq, txq, in_napi, stats);
+
+       if (stats->xsk)
+               virtnet_xsk_completed(sq, stats->xsk);
+}
+
 /* Converting between virtqueue no. and kernel tx/rx queue no.
  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
  */
@@ -1021,7 +1035,7 @@ static void free_old_xmit(struct send_queue *sq, struct 
netdev_queue *txq,
 {
        struct virtnet_sq_free_stats stats = {0};
 
-       __free_old_xmit(sq, txq, in_napi, &stats);
+       virtnet_free_old_xmit(sq, txq, in_napi, &stats);
 
        /* Avoid overhead when no packets have been processed
         * happens when called speculatively from start_xmit.
@@ -1382,29 +1396,6 @@ static int virtnet_add_recvbuf_xsk(struct virtnet_info 
*vi, struct receive_queue
        return err;
 }
 
-static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
-{
-       struct virtnet_info *vi = netdev_priv(dev);
-       struct send_queue *sq;
-
-       if (!netif_running(dev))
-               return -ENETDOWN;
-
-       if (qid >= vi->curr_queue_pairs)
-               return -EINVAL;
-
-       sq = &vi->sq[qid];
-
-       if (napi_if_scheduled_mark_missed(&sq->napi))
-               return 0;
-
-       local_bh_disable();
-       virtqueue_napi_schedule(&sq->napi, sq->vq);
-       local_bh_enable();
-
-       return 0;
-}
-
 static void *virtnet_xsk_to_ptr(u32 len)
 {
        unsigned long p;
@@ -1476,8 +1467,12 @@ static bool virtnet_xsk_xmit(struct send_queue *sq, 
struct xsk_buff_pool *pool,
        u64 kicks = 0;
        int sent;
 
+       /* Avoid to wakeup napi meanless, so call __free_old_xmit. */
        __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, 
&stats);
 
+       if (stats.xsk)
+               xsk_tx_completed(sq->xsk_pool, stats.xsk);
+
        sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
 
        if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
@@ -1496,6 +1491,44 @@ static bool virtnet_xsk_xmit(struct send_queue *sq, 
struct xsk_buff_pool *pool,
        return sent == budget;
 }
 
+static void xsk_wakeup(struct send_queue *sq)
+{
+       if (napi_if_scheduled_mark_missed(&sq->napi))
+               return;
+
+       local_bh_disable();
+       virtqueue_napi_schedule(&sq->napi, sq->vq);
+       local_bh_enable();
+}
+
+static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       struct send_queue *sq;
+
+       if (!netif_running(dev))
+               return -ENETDOWN;
+
+       if (qid >= vi->curr_queue_pairs)
+               return -EINVAL;
+
+       sq = &vi->sq[qid];
+
+       xsk_wakeup(sq);
+       return 0;
+}
+
+static void virtnet_xsk_completed(struct send_queue *sq, int num)
+{
+       xsk_tx_completed(sq->xsk_pool, num);
+
+       /* If this is called by rx poll, start_xmit and xdp xmit we should
+        * wakeup the tx napi to consume the xsk tx queue, because the tx
+        * interrupt may not be triggered.
+        */
+       xsk_wakeup(sq);
+}
+
 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
                                   struct send_queue *sq,
                                   struct xdp_frame *xdpf)
@@ -1609,8 +1642,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
        }
 
        /* Free up any pending old buffers before queueing new ones. */
-       __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
-                       false, &stats);
+       virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
+                             false, &stats);
 
        for (i = 0; i < n; i++) {
                struct xdp_frame *xdpf = frames[i];
-- 
2.32.0.3.g01195cf9f


Reply via email to