Since we switched to retry refilling receive buffer in NAPI poll instead
of delayed worker, remove all now unused delayed refill worker code.

Acked-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Bui Quang Minh <[email protected]>
---
 drivers/net/virtio_net.c | 86 ----------------------------------------
 1 file changed, 86 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f986abf0c236..a4dbc958689b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -441,9 +441,6 @@ struct virtnet_info {
        /* Packet virtio header size */
        u8 hdr_len;
 
-       /* Work struct for delayed refilling if we run low on memory. */
-       struct delayed_work refill;
-
        /* UDP tunnel support */
        bool tx_tnl;
 
@@ -451,12 +448,6 @@ struct virtnet_info {
 
        bool rx_tnl_csum;
 
-       /* Is delayed refill enabled? */
-       bool refill_enabled;
-
-       /* The lock to synchronize the access to refill_enabled */
-       spinlock_t refill_lock;
-
        /* Work struct for config space updates */
        struct work_struct config_work;
 
@@ -720,20 +711,6 @@ static void virtnet_rq_free_buf(struct virtnet_info *vi,
                put_page(virt_to_head_page(buf));
 }
 
-static void enable_delayed_refill(struct virtnet_info *vi)
-{
-       spin_lock_bh(&vi->refill_lock);
-       vi->refill_enabled = true;
-       spin_unlock_bh(&vi->refill_lock);
-}
-
-static void disable_delayed_refill(struct virtnet_info *vi)
-{
-       spin_lock_bh(&vi->refill_lock);
-       vi->refill_enabled = false;
-       spin_unlock_bh(&vi->refill_lock);
-}
-
 static void enable_rx_mode_work(struct virtnet_info *vi)
 {
        rtnl_lock();
@@ -2948,42 +2925,6 @@ static void virtnet_napi_disable(struct receive_queue 
*rq)
        napi_disable(napi);
 }
 
-static void refill_work(struct work_struct *work)
-{
-       struct virtnet_info *vi =
-               container_of(work, struct virtnet_info, refill.work);
-       bool still_empty;
-       int i;
-
-       for (i = 0; i < vi->curr_queue_pairs; i++) {
-               struct receive_queue *rq = &vi->rq[i];
-
-               /*
-                * When queue API support is added in the future and the call
-                * below becomes napi_disable_locked, this driver will need to
-                * be refactored.
-                *
-                * One possible solution would be to:
-                *   - cancel refill_work with cancel_delayed_work (note:
-                *     non-sync)
-                *   - cancel refill_work with cancel_delayed_work_sync in
-                *     virtnet_remove after the netdev is unregistered
-                *   - wrap all of the work in a lock (perhaps the netdev
-                *     instance lock)
-                *   - check netif_running() and return early to avoid a race
-                */
-               napi_disable(&rq->napi);
-               still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
-               virtnet_napi_do_enable(rq->vq, &rq->napi);
-
-               /* In theory, this can happen: if we don't get any buffers in
-                * we will *never* try to fill again.
-                */
-               if (still_empty)
-                       schedule_delayed_work(&vi->refill, HZ/2);
-       }
-}
-
 static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
                                    struct receive_queue *rq,
                                    int budget,
@@ -3226,8 +3167,6 @@ static int virtnet_open(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i, err;
 
-       enable_delayed_refill(vi);
-
        for (i = 0; i < vi->max_queue_pairs; i++) {
                if (i < vi->curr_queue_pairs)
                        /* Pre-fill rq agressively, to make sure we are ready to
@@ -3252,9 +3191,6 @@ static int virtnet_open(struct net_device *dev)
        return 0;
 
 err_enable_qp:
-       disable_delayed_refill(vi);
-       cancel_delayed_work_sync(&vi->refill);
-
        for (i--; i >= 0; i--) {
                virtnet_disable_queue_pair(vi, i);
                virtnet_cancel_dim(vi, &vi->rq[i].dim);
@@ -3448,24 +3384,12 @@ static void virtnet_rx_pause_all(struct virtnet_info 
*vi)
 {
        int i;
 
-       /*
-        * Make sure refill_work does not run concurrently to
-        * avoid napi_disable race which leads to deadlock.
-        */
-       disable_delayed_refill(vi);
-       cancel_delayed_work_sync(&vi->refill);
        for (i = 0; i < vi->max_queue_pairs; i++)
                __virtnet_rx_pause(vi, &vi->rq[i]);
 }
 
 static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
 {
-       /*
-        * Make sure refill_work does not run concurrently to
-        * avoid napi_disable race which leads to deadlock.
-        */
-       disable_delayed_refill(vi);
-       cancel_delayed_work_sync(&vi->refill);
        __virtnet_rx_pause(vi, rq);
 }
 
@@ -3488,7 +3412,6 @@ static void virtnet_rx_resume_all(struct virtnet_info *vi)
 {
        int i;
 
-       enable_delayed_refill(vi);
        for (i = 0; i < vi->max_queue_pairs; i++) {
                if (i < vi->curr_queue_pairs)
                        __virtnet_rx_resume(vi, &vi->rq[i], true);
@@ -3499,7 +3422,6 @@ static void virtnet_rx_resume_all(struct virtnet_info *vi)
 
 static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue 
*rq)
 {
-       enable_delayed_refill(vi);
        __virtnet_rx_resume(vi, rq, true);
 }
 
@@ -3845,10 +3767,6 @@ static int virtnet_close(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i;
 
-       /* Make sure NAPI doesn't schedule refill work */
-       disable_delayed_refill(vi);
-       /* Make sure refill_work doesn't re-enable napi! */
-       cancel_delayed_work_sync(&vi->refill);
        /* Prevent the config change callback from changing carrier
         * after close
         */
@@ -5804,7 +5722,6 @@ static int virtnet_restore_up(struct virtio_device *vdev)
 
        virtio_device_ready(vdev);
 
-       enable_delayed_refill(vi);
        enable_rx_mode_work(vi);
 
        if (netif_running(vi->dev)) {
@@ -6561,7 +6478,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
        if (!vi->rq)
                goto err_rq;
 
-       INIT_DELAYED_WORK(&vi->refill, refill_work);
        for (i = 0; i < vi->max_queue_pairs; i++) {
                vi->rq[i].pages = NULL;
                netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
@@ -6903,7 +6819,6 @@ static int virtnet_probe(struct virtio_device *vdev)
 
        INIT_WORK(&vi->config_work, virtnet_config_changed_work);
        INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
-       spin_lock_init(&vi->refill_lock);
 
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
                vi->mergeable_rx_bufs = true;
@@ -7167,7 +7082,6 @@ static int virtnet_probe(struct virtio_device *vdev)
        net_failover_destroy(vi->failover);
 free_vqs:
        virtio_reset_device(vdev);
-       cancel_delayed_work_sync(&vi->refill);
        free_receive_page_frags(vi);
        virtnet_del_vqs(vi);
 free:
-- 
2.43.0


Reply via email to