One VM only has 128 msix interrupt, virtio-config interrupt
has less workload. This patch shares one normal interrupt
for configuration between virtio devices.

Signed-off-by: Amos Kong <ak...@redhat.com>
---
 drivers/virtio/virtio_pci.c | 41 ++++++++++++++++-------------------------
 1 file changed, 16 insertions(+), 25 deletions(-)

diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 3d1463c..b1263b3 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -52,6 +52,7 @@ struct virtio_pci_device
        /* Name strings for interrupts. This size should be enough,
         * and I'm too lazy to allocate each name separately. */
        char (*msix_names)[256];
+       char config_msix_name[256];
        /* Number of available vectors */
        unsigned msix_vectors;
        /* Vectors allocated, excluding per-vq vectors if any */
@@ -282,12 +283,6 @@ static void vp_free_vectors(struct virtio_device *vdev)
                        free_cpumask_var(vp_dev->msix_affinity_masks[i]);
 
        if (vp_dev->msix_enabled) {
-               /* Disable the vector used for configuration */
-               iowrite16(VIRTIO_MSI_NO_VECTOR,
-                         vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
-               /* Flush the write out to device */
-               ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
-
                pci_disable_msix(vp_dev->pci_dev);
                vp_dev->msix_enabled = 0;
        }
@@ -339,24 +334,18 @@ static int vp_request_msix_vectors(struct virtio_device 
*vdev, int nvectors,
                goto error;
        vp_dev->msix_enabled = 1;
 
-       /* Set the vector used for configuration */
-       v = vp_dev->msix_used_vectors;
-       snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+       /* Set shared IRQ for configuration */
+       snprintf(vp_dev->config_msix_name, sizeof(*vp_dev->msix_names),
                 "%s-config", name);
-       err = request_irq(vp_dev->msix_entries[v].vector,
-                         vp_config_changed, 0, vp_dev->msix_names[v],
+       err = request_irq(vp_dev->pci_dev->irq,
+                         vp_config_changed,
+                         IRQF_SHARED,
+                         vp_dev->config_msix_name,
                          vp_dev);
-       if (err)
-               goto error;
-       ++vp_dev->msix_used_vectors;
-
-       iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
-       /* Verify we had enough resources to assign the vector */
-       v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
-       if (v == VIRTIO_MSI_NO_VECTOR) {
-               err = -EBUSY;
+       if (!err)
+               vp_dev->intx_enabled = 1;
+       else
                goto error;
-       }
 
        if (!per_vq_vectors) {
                /* Shared vector for all VQs */
@@ -535,14 +524,16 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, 
unsigned nvqs,
                        goto error_request;
        } else {
                if (per_vq_vectors) {
-                       /* Best option: one for change interrupt, one per vq. */
-                       nvectors = 1;
+                       /* Best option: one normal interrupt for change,
+                          one msix per vq. */
+                       nvectors = 0;
                        for (i = 0; i < nvqs; ++i)
                                if (callbacks[i])
                                        ++nvectors;
                } else {
-                       /* Second best: one for change, shared for all vqs. */
-                       nvectors = 2;
+                       /* Second best: one normal interrupt for
+                          change, share one msix for all vqs. */
+                       nvectors = 1;
                }
 
                err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to