From: Chad Dupuis <cdup...@marvell.com>

- If a TMF and cleanup are issued at the same time they could cause a
call trace if issued against the same xid as the io_req->tm_done
completion is used for both.
- Set and clear cleanup bit in cleanup routine.

Signed-off-by: Chad Dupuis <cdup...@marvell.com>
Signed-off-by: Saurav Kashyap <skash...@marvell.com>
---
 drivers/scsi/qedf/qedf.h    |  1 +
 drivers/scsi/qedf/qedf_io.c | 11 +++++++----
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 9e5e183..fb7d0d5 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -128,6 +128,7 @@ struct qedf_ioreq {
        struct delayed_work timeout_work;
        struct completion tm_done;
        struct completion abts_done;
+       struct completion cleanup_done;
        struct e4_fcoe_task_context *task;
        struct fcoe_task_params *task_params;
        struct scsi_sgl_task_params *sgl_task_params;
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 044ef63..1a36ab5 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -2065,10 +2065,13 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
                          io_req->xid);
                return SUCCESS;
        }
+       set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
 
        /* Ensure room on SQ */
        if (!atomic_read(&fcport->free_sqes)) {
                QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
+               /* Need to make sure we clear the flag since it was set */
+               clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
                return FAILED;
        }
 
@@ -2094,7 +2097,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
 
        task = qedf_get_task_mem(&qedf->tasks, xid);
 
-       init_completion(&io_req->tm_done);
+       init_completion(&io_req->cleanup_done);
 
        spin_lock_irqsave(&fcport->rport_lock, flags);
 
@@ -2108,8 +2111,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
 
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
-       tmo = wait_for_completion_timeout(&io_req->tm_done,
-           QEDF_CLEANUP_TIMEOUT * HZ);
+       tmo = wait_for_completion_timeout(&io_req->cleanup_done,
+                                         QEDF_CLEANUP_TIMEOUT * HZ);
 
        if (!tmo) {
                rc = FAILED;
@@ -2153,7 +2156,7 @@ void qedf_process_cleanup_compl(struct qedf_ctx *qedf, 
struct fcoe_cqe *cqe,
        clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
 
        /* Complete so we can finish cleaning up the I/O */
-       complete(&io_req->tm_done);
+       complete(&io_req->cleanup_done);
 }
 
 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd 
*sc_cmd,
-- 
1.8.3.1

Reply via email to