The access of vsock is not protected by vhost_vsock_lock. This may
lead to use after free since vhost_vsock_dev_release() may free the
pointer at the same time.

Fix this by holding the lock during the access.

Reported-by: syzbot+e3e074963495f92a8...@syzkaller.appspotmail.com
Fixes: 16320f363ae1 ("vhost-vsock: add pkt cancel capability")
Fixes: 433fc58e6bf2 ("VSOCK: Introduce vhost_vsock.ko")
Cc: Stefan Hajnoczi <stefa...@redhat.com>
Signed-off-by: Jason Wang <jasow...@redhat.com>
---
- V2: fix typos
- The patch is needed for -stable.
---
 drivers/vhost/vsock.c | 26 +++++++++++++++++++-------
 1 file changed, 19 insertions(+), 7 deletions(-)

diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 34bc3ab40c6d..7d0b292867fd 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -210,21 +210,27 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
        struct vhost_vsock *vsock;
        int len = pkt->len;
 
+       spin_lock_bh(&vhost_vsock_lock);
+
        /* Find the vhost_vsock according to guest context id  */
-       vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
+       vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
        if (!vsock) {
                virtio_transport_free_pkt(pkt);
+               spin_unlock_bh(&vhost_vsock_lock);
                return -ENODEV;
        }
 
        if (pkt->reply)
                atomic_inc(&vsock->queued_replies);
 
-       spin_lock_bh(&vsock->send_pkt_list_lock);
+       spin_lock(&vsock->send_pkt_list_lock);
        list_add_tail(&pkt->list, &vsock->send_pkt_list);
-       spin_unlock_bh(&vsock->send_pkt_list_lock);
+       spin_unlock(&vsock->send_pkt_list_lock);
 
        vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
+       spin_unlock_bh(&vhost_vsock_lock);
+
        return len;
 }
 
@@ -236,18 +242,22 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
        int cnt = 0;
        LIST_HEAD(freeme);
 
+       spin_lock_bh(&vhost_vsock_lock);
+
        /* Find the vhost_vsock according to guest context id  */
-       vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
-       if (!vsock)
+       vsock = __vhost_vsock_get(vsk->remote_addr.svm_cid);
+       if (!vsock) {
+               spin_unlock_bh(&vhost_vsock_lock);
                return -ENODEV;
+       }
 
-       spin_lock_bh(&vsock->send_pkt_list_lock);
+       spin_lock(&vsock->send_pkt_list_lock);
        list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
                if (pkt->vsk != vsk)
                        continue;
                list_move(&pkt->list, &freeme);
        }
-       spin_unlock_bh(&vsock->send_pkt_list_lock);
+       spin_unlock(&vsock->send_pkt_list_lock);
 
        list_for_each_entry_safe(pkt, n, &freeme, list) {
                if (pkt->reply)
@@ -265,6 +275,8 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
                        vhost_poll_queue(&tx_vq->poll);
        }
 
+       spin_unlock_bh(&vhost_vsock_lock);
+
        return 0;
 }
 
-- 
2.17.1

Reply via email to