On Tue, Feb 03, 2026 at 10:13:51AM +0800, Li Chen wrote:
> Under heavy concurrent flush traffic, virtio-pmem can overflow its request
> virtqueue (req_vq): virtqueue_add_sgs() starts returning -ENOSPC and the
> driver logs "no free slots in the virtqueue". Shortly after that the
> device enters VIRTIO_CONFIG_S_NEEDS_RESET and flush requests fail with
> "virtio pmem device needs a reset".
>
> Serialize virtio_pmem_flush() with a per-device mutex so only one flush
> request is in-flight at a time. This prevents req_vq descriptor overflow
> under high concurrency.
>
> Reproducer (guest with virtio-pmem):
> - mkfs.ext4 -F /dev/pmem0
> - mount -t ext4 -o dax,noatime /dev/pmem0 /mnt/bench
> - fio: ioengine=io_uring rw=randwrite bs=4k iodepth=64 numjobs=64
> direct=1 fsync=1 runtime=30s time_based=1
> - dmesg: "no free slots in the virtqueue"
> "virtio pmem device needs a reset"
>
> Fixes: 6e84200c0a29 ("virtio-pmem: Add virtio pmem driver")
> Signed-off-by: Li Chen <[email protected]>
Thanks!
And the commit message looks good now and includes the
reproducer.
Acked-by: Michael S. Tsirkin <[email protected]>
Ira are you picking this up?
> ---
> v2:
> - Use guard(mutex)() for flush_lock (as suggested by Ira Weiny).
> - Drop redundant might_sleep() next to guard(mutex)() (as suggested by
> Michael S. Tsirkin).
>
> drivers/nvdimm/nd_virtio.c | 3 ++-
> drivers/nvdimm/virtio_pmem.c | 1 +
> drivers/nvdimm/virtio_pmem.h | 4 ++++
> 3 files changed, 7 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
> index c3f07be4aa22..af82385be7c6 100644
> --- a/drivers/nvdimm/nd_virtio.c
> +++ b/drivers/nvdimm/nd_virtio.c
> @@ -44,6 +44,8 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
> unsigned long flags;
> int err, err1;
>
> + guard(mutex)(&vpmem->flush_lock);
> +
> /*
> * Don't bother to submit the request to the device if the device is
> * not activated.
> @@ -53,7 +55,6 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
> return -EIO;
> }
>
> - might_sleep();
> req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
> if (!req_data)
> return -ENOMEM;
> diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
> index 2396d19ce549..77b196661905 100644
> --- a/drivers/nvdimm/virtio_pmem.c
> +++ b/drivers/nvdimm/virtio_pmem.c
> @@ -64,6 +64,7 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
> goto out_err;
> }
>
> + mutex_init(&vpmem->flush_lock);
> vpmem->vdev = vdev;
> vdev->priv = vpmem;
> err = init_vq(vpmem);
> diff --git a/drivers/nvdimm/virtio_pmem.h b/drivers/nvdimm/virtio_pmem.h
> index 0dddefe594c4..f72cf17f9518 100644
> --- a/drivers/nvdimm/virtio_pmem.h
> +++ b/drivers/nvdimm/virtio_pmem.h
> @@ -13,6 +13,7 @@
> #include <linux/module.h>
> #include <uapi/linux/virtio_pmem.h>
> #include <linux/libnvdimm.h>
> +#include <linux/mutex.h>
> #include <linux/spinlock.h>
>
> struct virtio_pmem_request {
> @@ -35,6 +36,9 @@ struct virtio_pmem {
> /* Virtio pmem request queue */
> struct virtqueue *req_vq;
>
> + /* Serialize flush requests to the device. */
> + struct mutex flush_lock;
> +
> /* nvdimm bus registers virtio pmem device */
> struct nvdimm_bus *nvdimm_bus;
> struct nvdimm_bus_descriptor nd_desc;
> --
> 2.52.0