We used to call irq_bypass_unregister_producer() in
vhost_vdpa_setup_vq_irq() which is problematic as we don't know if the
token pointer is still valid or not.

Actually, we use the eventfd_ctx as the token so the life cycle of the
token should be bound to the VHOST_SET_VRING_CALL instead of
vhost_vdpa_setup_vq_irq() which could be called by set_status().

Fixing this by setting up irq bypass producer's token when handling
VHOST_SET_VRING_CALL and un-registering the producer before calling
vhost_vring_ioctl() to prevent a possible use after free as eventfd
could have been released in vhost_vring_ioctl(). And such registering
and unregistering will only be done if DRIVER_OK is set.

Reported-by: Dragos Tatulea <dtatu...@nvidia.com>
Tested-by: Dragos Tatulea <dtatu...@nvidia.com>
Reviewed-by: Dragos Tatulea <dtatu...@nvidia.com>
Fixes: 2cf1ba9a4d15 ("vhost_vdpa: implement IRQ offloading in vhost_vdpa")
Signed-off-by: Jason Wang <jasow...@redhat.com>
---
Changes since RFC:
- only do the reg/dereg of the producer when DRIVER_OK is set
---
 drivers/vhost/vdpa.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index e31ec9ebc4ce..ab441b8ccd2e 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -209,11 +209,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, 
u16 qid)
        if (irq < 0)
                return;
 
-       irq_bypass_unregister_producer(&vq->call_ctx.producer);
        if (!vq->call_ctx.ctx)
                return;
 
-       vq->call_ctx.producer.token = vq->call_ctx.ctx;
        vq->call_ctx.producer.irq = irq;
        ret = irq_bypass_register_producer(&vq->call_ctx.producer);
        if (unlikely(ret))
@@ -709,6 +707,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, 
unsigned int cmd,
                        vq->last_avail_idx = vq_state.split.avail_index;
                }
                break;
+       case VHOST_SET_VRING_CALL:
+               if (vq->call_ctx.ctx) {
+                       if (ops->get_status(vdpa) &
+                           VIRTIO_CONFIG_S_DRIVER_OK)
+                               vhost_vdpa_unsetup_vq_irq(v, idx);
+                       vq->call_ctx.producer.token = NULL;
+               }
+               break;
        }
 
        r = vhost_vring_ioctl(&v->vdev, cmd, argp);
@@ -747,13 +753,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, 
unsigned int cmd,
                        cb.callback = vhost_vdpa_virtqueue_cb;
                        cb.private = vq;
                        cb.trigger = vq->call_ctx.ctx;
+                       vq->call_ctx.producer.token = vq->call_ctx.ctx;
+                       if (ops->get_status(vdpa) &
+                           VIRTIO_CONFIG_S_DRIVER_OK)
+                               vhost_vdpa_setup_vq_irq(v, idx);
                } else {
                        cb.callback = NULL;
                        cb.private = NULL;
                        cb.trigger = NULL;
                }
                ops->set_vq_cb(vdpa, idx, &cb);
-               vhost_vdpa_setup_vq_irq(v, idx);
                break;
 
        case VHOST_SET_VRING_NUM:
@@ -1419,6 +1428,7 @@ static int vhost_vdpa_open(struct inode *inode, struct 
file *filep)
        for (i = 0; i < nvqs; i++) {
                vqs[i] = &v->vqs[i];
                vqs[i]->handle_kick = handle_vq_kick;
+               vqs[i]->call_ctx.ctx = NULL;
        }
        vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
                       vhost_vdpa_process_iotlb_msg);
-- 
2.31.1


Reply via email to