The driver abuses the MSI descriptors for internal purposes. Aside of core
code and MSI providers nothing has to care about their existence. They have
been encapsulated with a lot of effort because this kind of abuse caused
all sorts of issues including a maintainability nightmare.

Rewrite the code so it uses dedicated storage to hand the required
information to the interrupt handler and use a custom cleanup function to
simplify the error path.

No functional change intended.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Cc: Manivannan Sadhasivam <manivannan.sadhasi...@linaro.org>
Cc: "James E.J. Bottomley" <james.bottom...@hansenpartnership.com>
Cc: "Martin K. Petersen" <martin.peter...@oracle.com>
Cc: linux-s...@vger.kernel.org
---
V4: Fix inverse NULL pointer check and use __free() for error paths - James
---
 drivers/ufs/host/ufs-qcom.c |   85 ++++++++++++++++++++++++--------------------
 1 file changed, 48 insertions(+), 37 deletions(-)

--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1782,25 +1782,38 @@ static void ufs_qcom_write_msi_msg(struc
        ufshcd_mcq_config_esi(hba, msg);
 }
 
+struct ufs_qcom_irq {
+       unsigned int            irq;
+       unsigned int            idx;
+       struct ufs_hba          *hba;
+};
+
 static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
 {
-       struct msi_desc *desc = data;
-       struct device *dev = msi_desc_to_dev(desc);
-       struct ufs_hba *hba = dev_get_drvdata(dev);
-       u32 id = desc->msi_index;
-       struct ufs_hw_queue *hwq = &hba->uhq[id];
+       struct ufs_qcom_irq *qi = data;
+       struct ufs_hba *hba = qi->hba;
+       struct ufs_hw_queue *hwq = &hba->uhq[qi->idx];
 
-       ufshcd_mcq_write_cqis(hba, 0x1, id);
+       ufshcd_mcq_write_cqis(hba, 0x1, qi->idx);
        ufshcd_mcq_poll_cqe_lock(hba, hwq);
 
        return IRQ_HANDLED;
 }
 
+static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi)
+{
+       for (struct ufs_qcom_irq *q = uqi; q->irq; q++)
+               devm_free_irq(q->hba->dev, q->irq, q->hba);
+
+       platform_device_msi_free_irqs_all(uqi->hba->dev);
+       devm_kfree(uqi->hba->dev, uqi);
+}
+
+DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T))
+
 static int ufs_qcom_config_esi(struct ufs_hba *hba)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-       struct msi_desc *desc;
-       struct msi_desc *failed_desc = NULL;
        int nr_irqs, ret;
 
        if (host->esi_enabled)
@@ -1811,6 +1824,14 @@ static int ufs_qcom_config_esi(struct uf
         * 2. Poll queues do not need ESI.
         */
        nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+
+       struct ufs_qcom_irq *qi __free(ufs_qcom_irq) =
+               devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
+       if (!qi)
+               return -ENOMEM;
+       /* Preset so __free() has a pointer to hba in all error paths */
+       qi[0].hba = hba;
+
        ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
                                                      ufs_qcom_write_msi_msg);
        if (ret) {
@@ -1818,41 +1839,31 @@ static int ufs_qcom_config_esi(struct uf
                return ret;
        }
 
-       msi_lock_descs(hba->dev);
-       msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
-               ret = devm_request_irq(hba->dev, desc->irq,
-                                      ufs_qcom_mcq_esi_handler,
-                                      IRQF_SHARED, "qcom-mcq-esi", desc);
+       for (int idx = 0; idx < nr_irqs; idx++) {
+               qi[idx].irq = msi_get_virq(hba->dev, idx);
+               qi[idx].idx = idx;
+               qi[idx].hba = hba;
+
+               ret = devm_request_irq(hba->dev, qi[idx].irq, 
ufs_qcom_mcq_esi_handler,
+                                      IRQF_SHARED, "qcom-mcq-esi", qi + idx);
                if (ret) {
                        dev_err(hba->dev, "%s: Fail to request IRQ for %d, err 
= %d\n",
-                               __func__, desc->irq, ret);
-                       failed_desc = desc;
-                       break;
+                               __func__, qi[idx].irq, ret);
+                       qi[idx].irq = 0;
+                       return ret;
                }
        }
-       msi_unlock_descs(hba->dev);
 
-       if (ret) {
-               /* Rewind */
-               msi_lock_descs(hba->dev);
-               msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
-                       if (desc == failed_desc)
-                               break;
-                       devm_free_irq(hba->dev, desc->irq, hba);
-               }
-               msi_unlock_descs(hba->dev);
-               platform_device_msi_free_irqs_all(hba->dev);
-       } else {
-               if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
-                   host->hw_ver.step == 0)
-                       ufshcd_rmwl(hba, ESI_VEC_MASK,
-                                   FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
-                                   REG_UFS_CFG3);
-               ufshcd_mcq_enable_esi(hba);
-               host->esi_enabled = true;
-       }
+       retain_ptr(qi);
 
-       return ret;
+       if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+           host->hw_ver.step == 0) {
+               ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, 
MAX_ESI_VEC - 1),
+                           REG_UFS_CFG3);
+       }
+       ufshcd_mcq_enable_esi(hba);
+       host->esi_enabled = true;
+       return 0;
 }
 
 /*


Reply via email to