xsk wakeup is used to trigger the logic for xsk xmit by xsk framework or
user.

Virtio-net does not support to actively generate an interruption, so it
tries to trigger tx NAPI on the local cpu.

Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
---
 drivers/net/virtio/main.c       | 20 ++++++--------------
 drivers/net/virtio/virtio_net.h |  9 +++++++++
 drivers/net/virtio/xsk.c        | 23 +++++++++++++++++++++++
 drivers/net/virtio/xsk.h        |  1 +
 4 files changed, 39 insertions(+), 14 deletions(-)

diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
index cb6c8916f605..2c82418b0344 100644
--- a/drivers/net/virtio/main.c
+++ b/drivers/net/virtio/main.c
@@ -233,15 +233,6 @@ static void disable_delayed_refill(struct virtnet_info *vi)
        spin_unlock_bh(&vi->refill_lock);
 }
 
-static void virtqueue_napi_schedule(struct napi_struct *napi,
-                                   struct virtqueue *vq)
-{
-       if (napi_schedule_prep(napi)) {
-               virtqueue_disable_cb(vq);
-               __napi_schedule(napi);
-       }
-}
-
 static void virtqueue_napi_complete(struct napi_struct *napi,
                                    struct virtqueue *vq, int processed)
 {
@@ -250,7 +241,7 @@ static void virtqueue_napi_complete(struct napi_struct 
*napi,
        opaque = virtqueue_enable_cb_prepare(vq);
        if (napi_complete_done(napi, processed)) {
                if (unlikely(virtqueue_poll(vq, opaque)))
-                       virtqueue_napi_schedule(napi, vq);
+                       virtnet_vq_napi_schedule(napi, vq);
        } else {
                virtqueue_disable_cb(vq);
        }
@@ -265,7 +256,7 @@ static void skb_xmit_done(struct virtqueue *vq)
        virtqueue_disable_cb(vq);
 
        if (napi->weight)
-               virtqueue_napi_schedule(napi, vq);
+               virtnet_vq_napi_schedule(napi, vq);
        else
                /* We were probably waiting for more output buffers. */
                netif_wake_subqueue(vi->dev, vq2txq(vq));
@@ -635,7 +626,7 @@ void virtnet_check_sq_full_and_disable(struct virtnet_info 
*vi,
                netif_stop_subqueue(dev, qnum);
                if (use_napi) {
                        if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
-                               virtqueue_napi_schedule(&sq->napi, sq->vq);
+                               virtnet_vq_napi_schedule(&sq->napi, sq->vq);
                } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
                        /* More just got used, free them then recheck. */
                        free_old_xmit(sq, false);
@@ -1802,7 +1793,7 @@ static void skb_recv_done(struct virtqueue *rvq)
        struct virtnet_info *vi = rvq->vdev->priv;
        struct virtnet_rq *rq = &vi->rq[vq2rxq(rvq)];
 
-       virtqueue_napi_schedule(&rq->napi, rvq);
+       virtnet_vq_napi_schedule(&rq->napi, rvq);
 }
 
 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
@@ -1814,7 +1805,7 @@ static void virtnet_napi_enable(struct virtqueue *vq, 
struct napi_struct *napi)
         * Call local_bh_enable after to trigger softIRQ processing.
         */
        local_bh_disable();
-       virtqueue_napi_schedule(napi, vq);
+       virtnet_vq_napi_schedule(napi, vq);
        local_bh_enable();
 }
 
@@ -3785,6 +3776,7 @@ static const struct net_device_ops virtnet_netdev = {
        .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
        .ndo_bpf                = virtnet_xdp,
        .ndo_xdp_xmit           = virtnet_xdp_xmit,
+       .ndo_xsk_wakeup         = virtnet_xsk_wakeup,
        .ndo_features_check     = passthru_features_check,
        .ndo_get_phys_port_name = virtnet_get_phys_port_name,
        .ndo_set_features       = virtnet_set_features,
diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
index 7dcbd1d40fba..82a56d640b11 100644
--- a/drivers/net/virtio/virtio_net.h
+++ b/drivers/net/virtio/virtio_net.h
@@ -284,6 +284,15 @@ static inline bool virtnet_is_xdp_raw_buffer_queue(struct 
virtnet_info *vi, int
                return false;
 }
 
+static inline void virtnet_vq_napi_schedule(struct napi_struct *napi,
+                                           struct virtqueue *vq)
+{
+       if (napi_schedule_prep(napi)) {
+               virtqueue_disable_cb(vq);
+               __napi_schedule(napi);
+       }
+}
+
 void virtnet_rx_pause(struct virtnet_info *vi, struct virtnet_rq *rq);
 void virtnet_rx_resume(struct virtnet_info *vi, struct virtnet_rq *rq);
 void virtnet_tx_pause(struct virtnet_info *vi, struct virtnet_sq *sq);
diff --git a/drivers/net/virtio/xsk.c b/drivers/net/virtio/xsk.c
index d2a96424ade9..9e5523ff5707 100644
--- a/drivers/net/virtio/xsk.c
+++ b/drivers/net/virtio/xsk.c
@@ -95,6 +95,29 @@ bool virtnet_xsk_xmit(struct virtnet_sq *sq, struct 
xsk_buff_pool *pool,
        return sent == budget;
 }
 
+int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       struct virtnet_sq *sq;
+
+       if (!netif_running(dev))
+               return -ENETDOWN;
+
+       if (qid >= vi->curr_queue_pairs)
+               return -EINVAL;
+
+       sq = &vi->sq[qid];
+
+       if (napi_if_scheduled_mark_missed(&sq->napi))
+               return 0;
+
+       local_bh_disable();
+       virtnet_vq_napi_schedule(&sq->napi, sq->vq);
+       local_bh_enable();
+
+       return 0;
+}
+
 static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct virtnet_rq 
*rq,
                                    struct xsk_buff_pool *pool)
 {
diff --git a/drivers/net/virtio/xsk.h b/drivers/net/virtio/xsk.h
index 73ca8cd5308b..1bd19dcda649 100644
--- a/drivers/net/virtio/xsk.h
+++ b/drivers/net/virtio/xsk.h
@@ -17,4 +17,5 @@ static inline void *virtnet_xsk_to_ptr(u32 len)
 int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp);
 bool virtnet_xsk_xmit(struct virtnet_sq *sq, struct xsk_buff_pool *pool,
                      int budget);
+int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag);
 #endif
-- 
2.32.0.3.g01195cf9f


Reply via email to