On Wed, 9 Mar 2022 16:54:10 +0800, Jason Wang <jasow...@redhat.com> wrote:
>
> 在 2022/3/8 下午8:35, Xuan Zhuo 写道:
> > This patch implements virtio pci support for QUEUE RESET.
> >
> > Performing reset on a queue is divided into these steps:
> >
> >   1. virtio_reset_vq()              - notify the device to reset the queue
> >   2. virtqueue_detach_unused_buf()  - recycle the buffer submitted
> >   3. virtqueue_reset_vring()        - reset the vring (may re-alloc)
> >   4. virtio_enable_resetq()         - mmap vring to device, and enable the 
> > queue
> >
> > This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
> > pci scenario.
> >
> > Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
> > ---
> >   drivers/virtio/virtio_pci_common.c |  8 +--
> >   drivers/virtio/virtio_pci_modern.c | 83 ++++++++++++++++++++++++++++++
> >   2 files changed, 88 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_pci_common.c 
> > b/drivers/virtio/virtio_pci_common.c
> > index fdbde1db5ec5..863d3a8a0956 100644
> > --- a/drivers/virtio/virtio_pci_common.c
> > +++ b/drivers/virtio/virtio_pci_common.c
> > @@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
> >     struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
> >     unsigned long flags;
> >
> > -   spin_lock_irqsave(&vp_dev->lock, flags);
> > -   list_del(&info->node);
> > -   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +   if (!vq->reset) {
> > +           spin_lock_irqsave(&vp_dev->lock, flags);
> > +           list_del(&info->node);
> > +           spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +   }
> >
> >     vp_dev->del_vq(info);
> >     kfree(info);
> > diff --git a/drivers/virtio/virtio_pci_modern.c 
> > b/drivers/virtio/virtio_pci_modern.c
> > index 49a4493732cf..3c67d3607802 100644
> > --- a/drivers/virtio/virtio_pci_modern.c
> > +++ b/drivers/virtio/virtio_pci_modern.c
> > @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device 
> > *vdev, u64 features)
> >     if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
> >                     pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
> >             __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> > +
> > +   if (features & BIT_ULL(VIRTIO_F_RING_RESET))
> > +           __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
> >   }
> >
> >   /* virtio config->finalize_features() implementation */
> > @@ -199,6 +202,82 @@ static int vp_active_vq(struct virtqueue *vq, u16 
> > msix_vec)
> >     return 0;
> >   }
> >
> > +static int vp_modern_reset_vq(struct virtqueue *vq)
> > +{
> > +   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > +   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > +   struct virtio_pci_vq_info *info;
> > +   unsigned long flags;
> > +   unsigned int irq;
> > +
> > +   if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
> > +           return -ENOENT;
> > +
> > +   vp_modern_set_queue_reset(mdev, vq->index);
> > +
> > +   info = vp_dev->vqs[vq->index];
> > +
> > +   /* delete vq from irq handler */
> > +   spin_lock_irqsave(&vp_dev->lock, flags);
> > +   list_del(&info->node);
> > +   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +
> > +   INIT_LIST_HEAD(&info->node);
> > +
> > +   vq->reset = VIRTIO_VQ_RESET_STEP_DEVICE;
> > +
> > +   /* sync irq callback. */
> > +   if (vp_dev->intx_enabled) {
> > +           irq = vp_dev->pci_dev->irq;
> > +
> > +   } else {
> > +           if (info->msix_vector == VIRTIO_MSI_NO_VECTOR)
> > +                   return 0;
> > +
> > +           irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
> > +   }
> > +
> > +   synchronize_irq(irq);
>
>
> Synchronize_irq() is not sufficient here since it breaks the effort of
> the interrupt hardening which is done by commits:
>
> 080cd7c3ac87 virtio-pci: harden INTX interrupts
> 9e35276a5344 virtio_pci: harden MSI-X interrupts
>
> Unfortunately  080cd7c3ac87 introduces an issue that disable_irq() were
> used for the affinity managed irq but we're discussing a fix.
>


ok, I think disable_irq() is still used here.

I want to determine the solution for this detail first. So I posted the code, I
hope Jason can help confirm this point first.

There are three situations in which vq corresponds to an interrupt

1. intx
2. msix: per vq vectors
2. msix: share irq

Essentially can be divided into two categories: per vq vectors and share irq.

For share irq is based on virtqueues to find vq, so I think it is safe as long
as list_del() is executed under the protection of the lock.

In the case of per vq vectors, disable_irq() is used.

Thanks.

+static int vp_modern_reset_vq(struct virtqueue *vq)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+       struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+       struct virtio_pci_vq_info *info;
+       unsigned long flags;
+       unsigned int irq;
+
+       if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+               return -ENOENT;
+
+       vp_modern_set_queue_reset(mdev, vq->index);
+
+       info = vp_dev->vqs[vq->index];
+
+       /* delete vq from irq handler */
+       spin_lock_irqsave(&vp_dev->lock, flags);
+       list_del(&info->node);
+       vp_modern_set_queue_reset(mdev, vq->index);
+
+       info = vp_dev->vqs[vq->index];
+
+       /* delete vq from irq handler */
+       spin_lock_irqsave(&vp_dev->lock, flags);
+       list_del(&info->node);
+       spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+       INIT_LIST_HEAD(&info->node);
+
+       /* For the case where vq has an exclusive irq, to prevent the irq from
+        * being received again and the pending irq, call disable_irq().
+        *
+        * In the scenario based on shared interrupts, vq will be searched from
+        * the queue virtqueues. Since the previous list_del() has been deleted
+        * from the queue, it is impossible for vq to be called in this case.
+        * There is no need to close the corresponding interrupt.
+        */
+       if (vp_dev->per_vq_vectors && msix_vec != VIRTIO_MSI_NO_VECTOR)
+               disable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+       vq->reset = true;
+
+       return 0;
+}
+
+static int vp_modern_enable_reset_vq(struct virtqueue *vq)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+       struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+       struct virtio_pci_vq_info *info;
+       unsigned long flags, index;
+       int err;
+
+       if (!vq->reset)
+               return -EBUSY;
+
+       index = vq->index;
+       info = vp_dev->vqs[index];
+
+       /* check queue reset status */
+       if (vp_modern_get_queue_reset(mdev, index) != 1)
+               return -EBUSY;
+
+       err = vp_active_vq(vq, info->msix_vector);
+       if (err)
+               return err;
+
+       if (vq->callback) {
+               spin_lock_irqsave(&vp_dev->lock, flags);
+               list_add(&info->node, &vp_dev->virtqueues);
+               spin_unlock_irqrestore(&vp_dev->lock, flags);
+       } else {
+               INIT_LIST_HEAD(&info->node);
+       }
+
+       vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+       vq->reset = false;
+
+       if (vp_dev->per_vq_vectors && msix_vec != VIRTIO_MSI_NO_VECTOR)
+               enable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+       return 0;
+}


_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to