From: Stefano Garzarella <sgarz...@redhat.com>

[ Upstream commit 17dd1367389cfe7f150790c83247b68e0c19d106 ]

Before to call vdev->config->reset(vdev) we need to be sure that
no one is accessing the device, for this reason, we add new variables
in the struct virtio_vsock to stop the workers during the .remove().

This patch also add few comments before vdev->config->reset(vdev)
and vdev->config->del_vqs(vdev).

Suggested-by: Stefan Hajnoczi <stefa...@redhat.com>
Suggested-by: Michael S. Tsirkin <m...@redhat.com>
Signed-off-by: Stefano Garzarella <sgarz...@redhat.com>
Signed-off-by: David S. Miller <da...@davemloft.net>
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 net/vmw_vsock/virtio_transport.c | 51 +++++++++++++++++++++++++++++++-
 1 file changed, 50 insertions(+), 1 deletion(-)

diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 68186419c445f..4bc217ef56945 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -39,6 +39,7 @@ struct virtio_vsock {
         * must be accessed with tx_lock held.
         */
        struct mutex tx_lock;
+       bool tx_run;
 
        struct work_struct send_pkt_work;
        spinlock_t send_pkt_list_lock;
@@ -54,6 +55,7 @@ struct virtio_vsock {
         * must be accessed with rx_lock held.
         */
        struct mutex rx_lock;
+       bool rx_run;
        int rx_buf_nr;
        int rx_buf_max_nr;
 
@@ -61,6 +63,7 @@ struct virtio_vsock {
         * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
         */
        struct mutex event_lock;
+       bool event_run;
        struct virtio_vsock_event event_list[8];
 
        u32 guest_cid;
@@ -95,6 +98,10 @@ static void virtio_transport_loopback_work(struct 
work_struct *work)
        spin_unlock_bh(&vsock->loopback_list_lock);
 
        mutex_lock(&vsock->rx_lock);
+
+       if (!vsock->rx_run)
+               goto out;
+
        while (!list_empty(&pkts)) {
                struct virtio_vsock_pkt *pkt;
 
@@ -103,6 +110,7 @@ static void virtio_transport_loopback_work(struct 
work_struct *work)
 
                virtio_transport_recv_pkt(pkt);
        }
+out:
        mutex_unlock(&vsock->rx_lock);
 }
 
@@ -131,6 +139,9 @@ virtio_transport_send_pkt_work(struct work_struct *work)
 
        mutex_lock(&vsock->tx_lock);
 
+       if (!vsock->tx_run)
+               goto out;
+
        vq = vsock->vqs[VSOCK_VQ_TX];
 
        for (;;) {
@@ -189,6 +200,7 @@ virtio_transport_send_pkt_work(struct work_struct *work)
        if (added)
                virtqueue_kick(vq);
 
+out:
        mutex_unlock(&vsock->tx_lock);
 
        if (restart_rx)
@@ -324,6 +336,10 @@ static void virtio_transport_tx_work(struct work_struct 
*work)
 
        vq = vsock->vqs[VSOCK_VQ_TX];
        mutex_lock(&vsock->tx_lock);
+
+       if (!vsock->tx_run)
+               goto out;
+
        do {
                struct virtio_vsock_pkt *pkt;
                unsigned int len;
@@ -334,6 +350,8 @@ static void virtio_transport_tx_work(struct work_struct 
*work)
                        added = true;
                }
        } while (!virtqueue_enable_cb(vq));
+
+out:
        mutex_unlock(&vsock->tx_lock);
 
        if (added)
@@ -362,6 +380,9 @@ static void virtio_transport_rx_work(struct work_struct 
*work)
 
        mutex_lock(&vsock->rx_lock);
 
+       if (!vsock->rx_run)
+               goto out;
+
        do {
                virtqueue_disable_cb(vq);
                for (;;) {
@@ -471,6 +492,9 @@ static void virtio_transport_event_work(struct work_struct 
*work)
 
        mutex_lock(&vsock->event_lock);
 
+       if (!vsock->event_run)
+               goto out;
+
        do {
                struct virtio_vsock_event *event;
                unsigned int len;
@@ -485,7 +509,7 @@ static void virtio_transport_event_work(struct work_struct 
*work)
        } while (!virtqueue_enable_cb(vq));
 
        virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
-
+out:
        mutex_unlock(&vsock->event_lock);
 }
 
@@ -621,12 +645,18 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
        INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
        INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work);
 
+       mutex_lock(&vsock->tx_lock);
+       vsock->tx_run = true;
+       mutex_unlock(&vsock->tx_lock);
+
        mutex_lock(&vsock->rx_lock);
        virtio_vsock_rx_fill(vsock);
+       vsock->rx_run = true;
        mutex_unlock(&vsock->rx_lock);
 
        mutex_lock(&vsock->event_lock);
        virtio_vsock_event_fill(vsock);
+       vsock->event_run = true;
        mutex_unlock(&vsock->event_lock);
 
        vdev->priv = vsock;
@@ -661,6 +691,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
        /* Reset all connected sockets when the device disappear */
        vsock_for_each_connected_socket(virtio_vsock_reset_sock);
 
+       /* Stop all work handlers to make sure no one is accessing the device,
+        * so we can safely call vdev->config->reset().
+        */
+       mutex_lock(&vsock->rx_lock);
+       vsock->rx_run = false;
+       mutex_unlock(&vsock->rx_lock);
+
+       mutex_lock(&vsock->tx_lock);
+       vsock->tx_run = false;
+       mutex_unlock(&vsock->tx_lock);
+
+       mutex_lock(&vsock->event_lock);
+       vsock->event_run = false;
+       mutex_unlock(&vsock->event_lock);
+
+       /* Flush all device writes and interrupts, device will not use any
+        * more buffers.
+        */
        vdev->config->reset(vdev);
 
        mutex_lock(&vsock->rx_lock);
@@ -691,6 +739,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
        }
        spin_unlock_bh(&vsock->loopback_list_lock);
 
+       /* Delete virtqueues and flush outstanding callbacks if any */
        vdev->config->del_vqs(vdev);
 
        mutex_unlock(&the_virtio_vsock_mutex);
-- 
2.25.1



Reply via email to