[PATCH v9 23/32] virtio_pci: queue_reset: support VIRTIO_F_RING_RESET

2022-04-05 Thread Xuan Zhuo
This patch implements virtio pci support for QUEUE RESET.

Performing reset on a queue is divided into these steps:

 1. notify the device to reset the queue
 2. recycle the buffer submitted
 3. reset the vring (may re-alloc)
 4. mmap vring to device, and enable the queue

This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
pci scenario.

Signed-off-by: Xuan Zhuo 
---
 drivers/virtio/virtio_pci_common.c |  8 +--
 drivers/virtio/virtio_pci_modern.c | 84 ++
 drivers/virtio/virtio_ring.c   |  2 +
 include/linux/virtio.h |  1 +
 4 files changed, 92 insertions(+), 3 deletions(-)

diff --git a/drivers/virtio/virtio_pci_common.c 
b/drivers/virtio/virtio_pci_common.c
index fdbde1db5ec5..863d3a8a0956 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
 
-   spin_lock_irqsave(&vp_dev->lock, flags);
-   list_del(&info->node);
-   spin_unlock_irqrestore(&vp_dev->lock, flags);
+   if (!vq->reset) {
+   spin_lock_irqsave(&vp_dev->lock, flags);
+   list_del(&info->node);
+   spin_unlock_irqrestore(&vp_dev->lock, flags);
+   }
 
vp_dev->del_vq(info);
kfree(info);
diff --git a/drivers/virtio/virtio_pci_modern.c 
b/drivers/virtio/virtio_pci_modern.c
index 49a4493732cf..cb5d38f1c9c8 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, 
u64 features)
if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
+
+   if (features & BIT_ULL(VIRTIO_F_RING_RESET))
+   __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
 }
 
 /* virtio config->finalize_features() implementation */
@@ -199,6 +202,83 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
return 0;
 }
 
+static int vp_modern_reset_vq(struct virtqueue *vq)
+{
+   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+   struct virtio_pci_vq_info *info;
+   unsigned long flags;
+
+   if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+   return -ENOENT;
+
+   vp_modern_set_queue_reset(mdev, vq->index);
+
+   info = vp_dev->vqs[vq->index];
+
+   /* delete vq from irq handler */
+   spin_lock_irqsave(&vp_dev->lock, flags);
+   list_del(&info->node);
+   spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+   INIT_LIST_HEAD(&info->node);
+
+   /* For the case where vq has an exclusive irq, to prevent the irq from
+* being received again and the pending irq, call disable_irq().
+*
+* In the scenario based on shared interrupts, vq will be searched from
+* the queue virtqueues. Since the previous list_del() has been deleted
+* from the queue, it is impossible for vq to be called in this case.
+* There is no need to close the corresponding interrupt.
+*/
+   if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+   disable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+   vq->reset = true;
+
+   return 0;
+}
+
+static int vp_modern_enable_reset_vq(struct virtqueue *vq)
+{
+   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+   struct virtio_pci_vq_info *info;
+   unsigned long flags, index;
+   int err;
+
+   if (!vq->reset)
+   return -EBUSY;
+
+   index = vq->index;
+   info = vp_dev->vqs[index];
+
+   /* check queue reset status */
+   if (vp_modern_get_queue_reset(mdev, index) != 1)
+   return -EBUSY;
+
+   err = vp_active_vq(vq, info->msix_vector);
+   if (err)
+   return err;
+
+   if (vq->callback) {
+   spin_lock_irqsave(&vp_dev->lock, flags);
+   list_add(&info->node, &vp_dev->virtqueues);
+   spin_unlock_irqrestore(&vp_dev->lock, flags);
+   } else {
+   INIT_LIST_HEAD(&info->node);
+   }
+
+   vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+
+   if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+   enable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+   vq->reset = false;
+
+   return 0;
+}
+
 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 {
return vp_modern_config_vector(&vp_dev->mdev, vector);
@@ -407,6 +487,8 @@ static const struct virtio_config_ops 
virtio_pci_config_nodev_ops = {
.set_vq_affinity = vp_set_vq

Re: [PATCH v9 23/32] virtio_pci: queue_reset: support VIRTIO_F_RING_RESET

2022-04-12 Thread Jason Wang


在 2022/4/6 上午11:43, Xuan Zhuo 写道:

This patch implements virtio pci support for QUEUE RESET.

Performing reset on a queue is divided into these steps:

  1. notify the device to reset the queue
  2. recycle the buffer submitted
  3. reset the vring (may re-alloc)
  4. mmap vring to device, and enable the queue

This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
pci scenario.

Signed-off-by: Xuan Zhuo 
---
  drivers/virtio/virtio_pci_common.c |  8 +--
  drivers/virtio/virtio_pci_modern.c | 84 ++
  drivers/virtio/virtio_ring.c   |  2 +
  include/linux/virtio.h |  1 +
  4 files changed, 92 insertions(+), 3 deletions(-)

diff --git a/drivers/virtio/virtio_pci_common.c 
b/drivers/virtio/virtio_pci_common.c
index fdbde1db5ec5..863d3a8a0956 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
  
-	spin_lock_irqsave(&vp_dev->lock, flags);

-   list_del(&info->node);
-   spin_unlock_irqrestore(&vp_dev->lock, flags);
+   if (!vq->reset) {



On which condition that we may hit this path?



+   spin_lock_irqsave(&vp_dev->lock, flags);
+   list_del(&info->node);
+   spin_unlock_irqrestore(&vp_dev->lock, flags);
+   }
  
  	vp_dev->del_vq(info);

kfree(info);
diff --git a/drivers/virtio/virtio_pci_modern.c 
b/drivers/virtio/virtio_pci_modern.c
index 49a4493732cf..cb5d38f1c9c8 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, 
u64 features)
if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
+
+   if (features & BIT_ULL(VIRTIO_F_RING_RESET))
+   __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
  }
  
  /* virtio config->finalize_features() implementation */

@@ -199,6 +202,83 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
return 0;
  }
  
+static int vp_modern_reset_vq(struct virtqueue *vq)

+{
+   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+   struct virtio_pci_vq_info *info;
+   unsigned long flags;
+
+   if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+   return -ENOENT;
+
+   vp_modern_set_queue_reset(mdev, vq->index);
+
+   info = vp_dev->vqs[vq->index];
+
+   /* delete vq from irq handler */
+   spin_lock_irqsave(&vp_dev->lock, flags);
+   list_del(&info->node);
+   spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+   INIT_LIST_HEAD(&info->node);
+
+   /* For the case where vq has an exclusive irq, to prevent the irq from
+* being received again and the pending irq, call disable_irq().
+*
+* In the scenario based on shared interrupts, vq will be searched from
+* the queue virtqueues. Since the previous list_del() has been deleted
+* from the queue, it is impossible for vq to be called in this case.
+* There is no need to close the corresponding interrupt.
+*/
+   if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+   disable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));



See the previous discussion and the revert of the first try to harden 
the interrupt. We probably can't use disable_irq() since it conflicts 
with the affinity managed IRQ that is used by some drivers.


We need to use synchonize_irq() and per virtqueue flag instead. As 
mentioned in previous patches, this could be done on top of my rework on 
the IRQ hardening .




+
+   vq->reset = true;
+
+   return 0;
+}
+
+static int vp_modern_enable_reset_vq(struct virtqueue *vq)
+{
+   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+   struct virtio_pci_vq_info *info;
+   unsigned long flags, index;
+   int err;
+
+   if (!vq->reset)
+   return -EBUSY;
+
+   index = vq->index;
+   info = vp_dev->vqs[index];
+
+   /* check queue reset status */
+   if (vp_modern_get_queue_reset(mdev, index) != 1)
+   return -EBUSY;
+
+   err = vp_active_vq(vq, info->msix_vector);
+   if (err)
+   return err;
+
+   if (vq->callback) {
+   spin_lock_irqsave(&vp_dev->lock, flags);
+   list_add(&info->node, &vp_dev->virtqueues);
+   spin_unlock_irqrestore(&vp_dev->lock, flags);
+   } else {
+   INIT_LIST_HEAD(&info->node);
+   }
+
+   vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+
+   if (

Re: [PATCH v9 23/32] virtio_pci: queue_reset: support VIRTIO_F_RING_RESET

2022-04-12 Thread Xuan Zhuo
On Tue, 12 Apr 2022 15:07:58 +0800, Jason Wang  wrote:
>
> 在 2022/4/6 上午11:43, Xuan Zhuo 写道:
> > This patch implements virtio pci support for QUEUE RESET.
> >
> > Performing reset on a queue is divided into these steps:
> >
> >   1. notify the device to reset the queue
> >   2. recycle the buffer submitted
> >   3. reset the vring (may re-alloc)
> >   4. mmap vring to device, and enable the queue
> >
> > This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
> > pci scenario.
> >
> > Signed-off-by: Xuan Zhuo 
> > ---
> >   drivers/virtio/virtio_pci_common.c |  8 +--
> >   drivers/virtio/virtio_pci_modern.c | 84 ++
> >   drivers/virtio/virtio_ring.c   |  2 +
> >   include/linux/virtio.h |  1 +
> >   4 files changed, 92 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_pci_common.c 
> > b/drivers/virtio/virtio_pci_common.c
> > index fdbde1db5ec5..863d3a8a0956 100644
> > --- a/drivers/virtio/virtio_pci_common.c
> > +++ b/drivers/virtio/virtio_pci_common.c
> > @@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
> > struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
> > unsigned long flags;
> >
> > -   spin_lock_irqsave(&vp_dev->lock, flags);
> > -   list_del(&info->node);
> > -   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +   if (!vq->reset) {
>
>
> On which condition that we may hit this path?
>
>
> > +   spin_lock_irqsave(&vp_dev->lock, flags);
> > +   list_del(&info->node);
> > +   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +   }
> >
> > vp_dev->del_vq(info);
> > kfree(info);
> > diff --git a/drivers/virtio/virtio_pci_modern.c 
> > b/drivers/virtio/virtio_pci_modern.c
> > index 49a4493732cf..cb5d38f1c9c8 100644
> > --- a/drivers/virtio/virtio_pci_modern.c
> > +++ b/drivers/virtio/virtio_pci_modern.c
> > @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device 
> > *vdev, u64 features)
> > if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
> > pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
> > __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> > +
> > +   if (features & BIT_ULL(VIRTIO_F_RING_RESET))
> > +   __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
> >   }
> >
> >   /* virtio config->finalize_features() implementation */
> > @@ -199,6 +202,83 @@ static int vp_active_vq(struct virtqueue *vq, u16 
> > msix_vec)
> > return 0;
> >   }
> >
> > +static int vp_modern_reset_vq(struct virtqueue *vq)
> > +{
> > +   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > +   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > +   struct virtio_pci_vq_info *info;
> > +   unsigned long flags;
> > +
> > +   if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
> > +   return -ENOENT;
> > +
> > +   vp_modern_set_queue_reset(mdev, vq->index);
> > +
> > +   info = vp_dev->vqs[vq->index];
> > +
> > +   /* delete vq from irq handler */
> > +   spin_lock_irqsave(&vp_dev->lock, flags);
> > +   list_del(&info->node);
> > +   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +
> > +   INIT_LIST_HEAD(&info->node);
> > +
> > +   /* For the case where vq has an exclusive irq, to prevent the irq from
> > +* being received again and the pending irq, call disable_irq().
> > +*
> > +* In the scenario based on shared interrupts, vq will be searched from
> > +* the queue virtqueues. Since the previous list_del() has been deleted
> > +* from the queue, it is impossible for vq to be called in this case.
> > +* There is no need to close the corresponding interrupt.
> > +*/
> > +   if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
> > +   disable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
>
>
> See the previous discussion and the revert of the first try to harden
> the interrupt. We probably can't use disable_irq() since it conflicts
> with the affinity managed IRQ that is used by some drivers.
>
> We need to use synchonize_irq() and per virtqueue flag instead. As
> mentioned in previous patches, this could be done on top of my rework on
> the IRQ hardening .

OK, the next version will contain hardened features by per virtqueue flag.

Thanks.

>
>
> > +
> > +   vq->reset = true;
> > +
> > +   return 0;
> > +}
> > +
> > +static int vp_modern_enable_reset_vq(struct virtqueue *vq)
> > +{
> > +   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > +   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > +   struct virtio_pci_vq_info *info;
> > +   unsigned long flags, index;
> > +   int err;
> > +
> > +   if (!vq->reset)
> > +   return -EBUSY;
> > +
> > +   index = vq->index;
> > +   info = vp_dev->vqs[index];
> > +
> > +   /* check queue reset status */
> > +   if (vp_modern_get_queue_reset(mdev, index) != 1)
> > +   return -EBUSY;
> > +
> > +   err = vp_active_vq(vq, info->msix_vector);
> > +   if (err)

Re: [PATCH v9 23/32] virtio_pci: queue_reset: support VIRTIO_F_RING_RESET

2022-04-13 Thread Xuan Zhuo
On Tue, 12 Apr 2022 15:07:58 +0800, Jason Wang  wrote:
>
> 在 2022/4/6 上午11:43, Xuan Zhuo 写道:
> > This patch implements virtio pci support for QUEUE RESET.
> >
> > Performing reset on a queue is divided into these steps:
> >
> >   1. notify the device to reset the queue
> >   2. recycle the buffer submitted
> >   3. reset the vring (may re-alloc)
> >   4. mmap vring to device, and enable the queue
> >
> > This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
> > pci scenario.
> >
> > Signed-off-by: Xuan Zhuo 
> > ---
> >   drivers/virtio/virtio_pci_common.c |  8 +--
> >   drivers/virtio/virtio_pci_modern.c | 84 ++
> >   drivers/virtio/virtio_ring.c   |  2 +
> >   include/linux/virtio.h |  1 +
> >   4 files changed, 92 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_pci_common.c 
> > b/drivers/virtio/virtio_pci_common.c
> > index fdbde1db5ec5..863d3a8a0956 100644
> > --- a/drivers/virtio/virtio_pci_common.c
> > +++ b/drivers/virtio/virtio_pci_common.c
> > @@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
> > struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
> > unsigned long flags;
> >
> > -   spin_lock_irqsave(&vp_dev->lock, flags);
> > -   list_del(&info->node);
> > -   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +   if (!vq->reset) {
>
>
> On which condition that we may hit this path?

As discussed in patch 31, it may fail when renable vq.

Thanks.

>
>
> > +   spin_lock_irqsave(&vp_dev->lock, flags);
> > +   list_del(&info->node);
> > +   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +   }
> >
> > vp_dev->del_vq(info);
> > kfree(info);
> > diff --git a/drivers/virtio/virtio_pci_modern.c 
> > b/drivers/virtio/virtio_pci_modern.c
> > index 49a4493732cf..cb5d38f1c9c8 100644
> > --- a/drivers/virtio/virtio_pci_modern.c
> > +++ b/drivers/virtio/virtio_pci_modern.c
> > @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device 
> > *vdev, u64 features)
> > if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
> > pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
> > __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> > +
> > +   if (features & BIT_ULL(VIRTIO_F_RING_RESET))
> > +   __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
> >   }
> >
> >   /* virtio config->finalize_features() implementation */
> > @@ -199,6 +202,83 @@ static int vp_active_vq(struct virtqueue *vq, u16 
> > msix_vec)
> > return 0;
> >   }
> >
> > +static int vp_modern_reset_vq(struct virtqueue *vq)
> > +{
> > +   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > +   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > +   struct virtio_pci_vq_info *info;
> > +   unsigned long flags;
> > +
> > +   if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
> > +   return -ENOENT;
> > +
> > +   vp_modern_set_queue_reset(mdev, vq->index);
> > +
> > +   info = vp_dev->vqs[vq->index];
> > +
> > +   /* delete vq from irq handler */
> > +   spin_lock_irqsave(&vp_dev->lock, flags);
> > +   list_del(&info->node);
> > +   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +
> > +   INIT_LIST_HEAD(&info->node);
> > +
> > +   /* For the case where vq has an exclusive irq, to prevent the irq from
> > +* being received again and the pending irq, call disable_irq().
> > +*
> > +* In the scenario based on shared interrupts, vq will be searched from
> > +* the queue virtqueues. Since the previous list_del() has been deleted
> > +* from the queue, it is impossible for vq to be called in this case.
> > +* There is no need to close the corresponding interrupt.
> > +*/
> > +   if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
> > +   disable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
>
>
> See the previous discussion and the revert of the first try to harden
> the interrupt. We probably can't use disable_irq() since it conflicts
> with the affinity managed IRQ that is used by some drivers.
>
> We need to use synchonize_irq() and per virtqueue flag instead. As
> mentioned in previous patches, this could be done on top of my rework on
> the IRQ hardening .
>
>
> > +
> > +   vq->reset = true;
> > +
> > +   return 0;
> > +}
> > +
> > +static int vp_modern_enable_reset_vq(struct virtqueue *vq)
> > +{
> > +   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > +   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > +   struct virtio_pci_vq_info *info;
> > +   unsigned long flags, index;
> > +   int err;
> > +
> > +   if (!vq->reset)
> > +   return -EBUSY;
> > +
> > +   index = vq->index;
> > +   info = vp_dev->vqs[index];
> > +
> > +   /* check queue reset status */
> > +   if (vp_modern_get_queue_reset(mdev, index) != 1)
> > +   return -EBUSY;
> > +
> > +   err = vp_active_vq(vq, info->msix_vector);
> > +   if (err)
> > +   ret

Re: [PATCH v9 23/32] virtio_pci: queue_reset: support VIRTIO_F_RING_RESET

2022-04-14 Thread Jason Wang
On Wed, Apr 13, 2022 at 11:23 AM Xuan Zhuo  wrote:
>
> On Tue, 12 Apr 2022 15:07:58 +0800, Jason Wang  wrote:
> >
> > 在 2022/4/6 上午11:43, Xuan Zhuo 写道:
> > > This patch implements virtio pci support for QUEUE RESET.
> > >
> > > Performing reset on a queue is divided into these steps:
> > >
> > >   1. notify the device to reset the queue
> > >   2. recycle the buffer submitted
> > >   3. reset the vring (may re-alloc)
> > >   4. mmap vring to device, and enable the queue
> > >
> > > This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
> > > pci scenario.
> > >
> > > Signed-off-by: Xuan Zhuo 
> > > ---
> > >   drivers/virtio/virtio_pci_common.c |  8 +--
> > >   drivers/virtio/virtio_pci_modern.c | 84 ++
> > >   drivers/virtio/virtio_ring.c   |  2 +
> > >   include/linux/virtio.h |  1 +
> > >   4 files changed, 92 insertions(+), 3 deletions(-)
> > >
> > > diff --git a/drivers/virtio/virtio_pci_common.c 
> > > b/drivers/virtio/virtio_pci_common.c
> > > index fdbde1db5ec5..863d3a8a0956 100644
> > > --- a/drivers/virtio/virtio_pci_common.c
> > > +++ b/drivers/virtio/virtio_pci_common.c
> > > @@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
> > > struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
> > > unsigned long flags;
> > >
> > > -   spin_lock_irqsave(&vp_dev->lock, flags);
> > > -   list_del(&info->node);
> > > -   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > > +   if (!vq->reset) {
> >
> >
> > On which condition that we may hit this path?
> >
> >
> > > +   spin_lock_irqsave(&vp_dev->lock, flags);
> > > +   list_del(&info->node);
> > > +   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > > +   }
> > >
> > > vp_dev->del_vq(info);
> > > kfree(info);
> > > diff --git a/drivers/virtio/virtio_pci_modern.c 
> > > b/drivers/virtio/virtio_pci_modern.c
> > > index 49a4493732cf..cb5d38f1c9c8 100644
> > > --- a/drivers/virtio/virtio_pci_modern.c
> > > +++ b/drivers/virtio/virtio_pci_modern.c
> > > @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device 
> > > *vdev, u64 features)
> > > if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
> > > pci_find_ext_capability(pci_dev, 
> > > PCI_EXT_CAP_ID_SRIOV))
> > > __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> > > +
> > > +   if (features & BIT_ULL(VIRTIO_F_RING_RESET))
> > > +   __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
> > >   }
> > >
> > >   /* virtio config->finalize_features() implementation */
> > > @@ -199,6 +202,83 @@ static int vp_active_vq(struct virtqueue *vq, u16 
> > > msix_vec)
> > > return 0;
> > >   }
> > >
> > > +static int vp_modern_reset_vq(struct virtqueue *vq)
> > > +{
> > > +   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > > +   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > > +   struct virtio_pci_vq_info *info;
> > > +   unsigned long flags;
> > > +
> > > +   if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
> > > +   return -ENOENT;
> > > +
> > > +   vp_modern_set_queue_reset(mdev, vq->index);
> > > +
> > > +   info = vp_dev->vqs[vq->index];
> > > +
> > > +   /* delete vq from irq handler */
> > > +   spin_lock_irqsave(&vp_dev->lock, flags);
> > > +   list_del(&info->node);
> > > +   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > > +
> > > +   INIT_LIST_HEAD(&info->node);
> > > +
> > > +   /* For the case where vq has an exclusive irq, to prevent the irq from
> > > +* being received again and the pending irq, call disable_irq().
> > > +*
> > > +* In the scenario based on shared interrupts, vq will be searched 
> > > from
> > > +* the queue virtqueues. Since the previous list_del() has been 
> > > deleted
> > > +* from the queue, it is impossible for vq to be called in this case.
> > > +* There is no need to close the corresponding interrupt.
> > > +*/
> > > +   if (vp_dev->per_vq_vectors && info->msix_vector != 
> > > VIRTIO_MSI_NO_VECTOR)
> > > +   disable_irq(pci_irq_vector(vp_dev->pci_dev, 
> > > info->msix_vector));
> >
> >
> > See the previous discussion and the revert of the first try to harden
> > the interrupt. We probably can't use disable_irq() since it conflicts
> > with the affinity managed IRQ that is used by some drivers.
> >
> > We need to use synchonize_irq() and per virtqueue flag instead. As
> > mentioned in previous patches, this could be done on top of my rework on
> > the IRQ hardening .
>
> OK, the next version will contain hardened features by per virtqueue flag.

Actually, I'm working on a new version of hardening. I plan to switch
to the virtqueue flag so we will be fine here if we do the resize work
on top.

I will cc you in the new version.

Thanks

>
> Thanks.
>
> >
> >
> > > +
> > > +   vq->reset = true;
> > > +
> > > +   return 0;
> > > +}
> > > +
> > > +static int vp_modern_enable_reset_vq(struct virtqueue *vq)
> > > +{
> > > +   struct virtio_pci_devi