This is an attempt to make handling of admin queue in a
single scope. This update also fixes a IRQ leak in case
nvme_setup_io_queues() failed to allocate enough iomem
and bailed out with -ENOMEM errno.

Signed-off-by: Alexander Gordeev <agord...@redhat.com>
---
 drivers/block/nvme-core.c |   44 +++++++++++++++++++++++---------------------
 1 files changed, 23 insertions(+), 21 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 3e1ae55..e1e4ad4 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1287,6 +1287,7 @@ static int nvme_configure_admin_queue(struct nvme_dev 
*dev)
        if (result)
                return result;
 
+       dev->entry[0].vector = pdev->pci_dev->irq;
        result = queue_request_irq(dev, nvmeq, "nvme admin");
        if (result)
                return result;
@@ -1297,6 +1298,11 @@ static int nvme_configure_admin_queue(struct nvme_dev 
*dev)
        return result;
 }
 
+static int nvme_teardown_admin_queue(struct nvme_dev *dev)
+{
+       free_irq(dev->entry[0].vector, dev->queues[0]);
+}
+
 struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
                                unsigned long addr, unsigned length)
 {
@@ -1744,17 +1750,10 @@ static size_t db_bar_size(struct nvme_dev *dev, 
unsigned nr_io_queues)
        return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
 }
 
-static int nvme_setup_io_queues(struct nvme_dev *dev)
+static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
 {
        struct pci_dev *pdev = dev->pci_dev;
-       int result, cpu, i, vecs, nr_io_queues, size, q_depth;
-
-       nr_io_queues = num_online_cpus();
-       result = set_queue_count(dev, nr_io_queues);
-       if (result < 0)
-               return result;
-       if (result < nr_io_queues)
-               nr_io_queues = result;
+       int result, cpu, i, vecs, size, q_depth;
 
        size = db_bar_size(dev, nr_io_queues);
        if (size > 8192) {
@@ -1771,20 +1770,15 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
                dev->queues[0]->q_db = dev->dbs;
        }
 
-       /* Deregister the admin queue's interrupt */
-       free_irq(dev->entry[0].vector, dev->queues[0]);
-
        for (i = 0; i < nr_io_queues; i++)
                dev->entry[i].entry = i;
        vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
        if (vecs < 0) {
                vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
-               if (vecs < 0) {
+               if (vecs < 0)
                        vecs = 1;
-               } else {
-                       for (i = 0; i < vecs; i++)
-                               dev->entry[i].vector = i + pdev->irq;
-               }
+               for (i = 0; i < vecs; i++)
+                       dev->entry[i].vector = i + pdev->irq;
        }
 
        /*
@@ -1928,7 +1922,6 @@ static int nvme_dev_map(struct nvme_dev *dev)
        if (pci_enable_device_mem(pdev))
                return result;
 
-       dev->entry[0].vector = pdev->irq;
        pci_set_master(pdev);
        bars = pci_select_bars(pdev, IORESOURCE_MEM);
        if (pci_request_selected_regions(pdev, bars, "nvme"))
@@ -2116,11 +2109,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
        list_add(&dev->node, &dev_list);
        spin_unlock(&dev_list_lock);
 
-       result = nvme_setup_io_queues(dev);
-       if (result && result != -EBUSY)
+       result = set_queue_count(dev, num_online_cpus());
+       if (result == -EBUSY)
+               return -EBUSY;
+
+       nvme_teardown_admin_queue(dev);
+
+       if (result)
                goto disable;
 
-       return result;
+       result = nvme_setup_io_queues(dev, result);
+       if (result)
+               goto disable;
+
+       return 0;
 
  disable:
        spin_lock(&dev_list_lock);
-- 
1.7.7.6

-- 
Regards,
Alexander Gordeev
agord...@redhat.com
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to